From eb87d48bb7fef47adc2dbb48044de7a439e105d4 Mon Sep 17 00:00:00 2001 From: Wei Liu Date: Tue, 5 Sep 2023 16:43:48 +0800 Subject: [PATCH] integration test for cloudevents client Signed-off-by: Wei Liu --- cloudevents/README.md | 147 + cloudevents/generic/agentclient.go | 2 + cloudevents/generic/sourceclient.go | 2 + go.mod | 6 + go.sum | 17 + test/integration/cloudevents/agent/agent.go | 23 + test/integration/cloudevents/source/client.go | 138 + .../cloudevents/source/resource.go | 62 + test/integration/cloudevents/source/store.go | 95 + test/integration/cloudevents_test.go | 315 ++ test/integration/suite_test.go | 40 +- .../github.com/gorilla/websocket/.gitignore | 25 + vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 39 + vendor/github.com/gorilla/websocket/client.go | 422 ++ .../gorilla/websocket/compression.go | 148 + vendor/github.com/gorilla/websocket/conn.go | 1230 +++++ vendor/github.com/gorilla/websocket/doc.go | 227 + vendor/github.com/gorilla/websocket/join.go | 42 + vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 55 + .../github.com/gorilla/websocket/mask_safe.go | 16 + .../github.com/gorilla/websocket/prepared.go | 102 + vendor/github.com/gorilla/websocket/proxy.go | 77 + vendor/github.com/gorilla/websocket/server.go | 365 ++ .../gorilla/websocket/tls_handshake.go | 21 + .../gorilla/websocket/tls_handshake_116.go | 21 + vendor/github.com/gorilla/websocket/util.go | 283 ++ .../gorilla/websocket/x_net_proxy.go | 473 ++ vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 38 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1047 +++++ .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 57 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 19 + .../mattn/go-isatty/isatty_others.go | 16 + .../mattn/go-isatty/isatty_plan9.go | 23 + .../mattn/go-isatty/isatty_solaris.go | 21 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 125 + .../mochi-mqtt/server/v2/.gitignore | 4 + .../mochi-mqtt/server/v2/.golangci.yml | 103 + .../mochi-mqtt/server/v2/Dockerfile | 31 + .../mochi-mqtt/server/v2/LICENSE.md | 23 + .../github.com/mochi-mqtt/server/v2/README.md | 424 ++ .../mochi-mqtt/server/v2/clients.go | 574 +++ .../github.com/mochi-mqtt/server/v2/hooks.go | 846 ++++ .../server/v2/hooks/auth/allow_all.go | 41 + .../mochi-mqtt/server/v2/hooks/auth/auth.go | 107 + .../mochi-mqtt/server/v2/hooks/auth/ledger.go | 246 + .../server/v2/hooks/storage/storage.go | 194 + .../mochi-mqtt/server/v2/inflight.go | 156 + .../server/v2/listeners/http_healthcheck.go | 104 + .../server/v2/listeners/http_sysinfo.go | 118 + .../server/v2/listeners/listeners.go | 135 + .../mochi-mqtt/server/v2/listeners/mock.go | 103 + .../mochi-mqtt/server/v2/listeners/net.go | 92 + .../mochi-mqtt/server/v2/listeners/tcp.go | 108 + .../server/v2/listeners/unixsock.go | 98 + .../server/v2/listeners/websocket.go | 194 + .../mochi-mqtt/server/v2/packets/codec.go | 172 + .../mochi-mqtt/server/v2/packets/codes.go | 148 + .../server/v2/packets/fixedheader.go | 63 + .../mochi-mqtt/server/v2/packets/packets.go | 1148 +++++ .../server/v2/packets/properties.go | 477 ++ .../mochi-mqtt/server/v2/packets/tpackets.go | 3939 +++++++++++++++++ .../github.com/mochi-mqtt/server/v2/server.go | 1533 +++++++ .../mochi-mqtt/server/v2/system/system.go | 61 + .../github.com/mochi-mqtt/server/v2/topics.go | 707 +++ vendor/github.com/rs/xid/.appveyor.yml | 27 + vendor/github.com/rs/xid/.travis.yml | 8 + vendor/github.com/rs/xid/LICENSE | 19 + vendor/github.com/rs/xid/README.md | 116 + vendor/github.com/rs/xid/error.go | 11 + vendor/github.com/rs/xid/hostid_darwin.go | 9 + vendor/github.com/rs/xid/hostid_fallback.go | 9 + vendor/github.com/rs/xid/hostid_freebsd.go | 9 + vendor/github.com/rs/xid/hostid_linux.go | 13 + vendor/github.com/rs/xid/hostid_windows.go | 38 + vendor/github.com/rs/xid/id.go | 392 ++ vendor/github.com/rs/zerolog/.gitignore | 25 + vendor/github.com/rs/zerolog/CNAME | 1 + vendor/github.com/rs/zerolog/LICENSE | 21 + vendor/github.com/rs/zerolog/README.md | 716 +++ vendor/github.com/rs/zerolog/_config.yml | 1 + vendor/github.com/rs/zerolog/array.go | 240 + vendor/github.com/rs/zerolog/console.go | 446 ++ vendor/github.com/rs/zerolog/context.go | 433 ++ vendor/github.com/rs/zerolog/ctx.go | 51 + vendor/github.com/rs/zerolog/encoder.go | 56 + vendor/github.com/rs/zerolog/encoder_cbor.go | 42 + vendor/github.com/rs/zerolog/encoder_json.go | 39 + vendor/github.com/rs/zerolog/event.go | 780 ++++ vendor/github.com/rs/zerolog/fields.go | 277 ++ vendor/github.com/rs/zerolog/globals.go | 142 + vendor/github.com/rs/zerolog/go112.go | 7 + vendor/github.com/rs/zerolog/hook.go | 64 + .../rs/zerolog/internal/cbor/README.md | 56 + .../rs/zerolog/internal/cbor/base.go | 19 + .../rs/zerolog/internal/cbor/cbor.go | 101 + .../rs/zerolog/internal/cbor/decode_stream.go | 614 +++ .../rs/zerolog/internal/cbor/string.go | 95 + .../rs/zerolog/internal/cbor/time.go | 93 + .../rs/zerolog/internal/cbor/types.go | 477 ++ .../rs/zerolog/internal/json/base.go | 19 + .../rs/zerolog/internal/json/bytes.go | 85 + .../rs/zerolog/internal/json/string.go | 149 + .../rs/zerolog/internal/json/time.go | 113 + .../rs/zerolog/internal/json/types.go | 405 ++ vendor/github.com/rs/zerolog/log.go | 476 ++ vendor/github.com/rs/zerolog/not_go112.go | 5 + vendor/github.com/rs/zerolog/pretty.png | Bin 0 -> 84064 bytes vendor/github.com/rs/zerolog/sampler.go | 134 + vendor/github.com/rs/zerolog/syslog.go | 80 + vendor/github.com/rs/zerolog/writer.go | 154 + vendor/modules.txt | 25 + 123 files changed, 25078 insertions(+), 4 deletions(-) create mode 100644 cloudevents/README.md create mode 100644 test/integration/cloudevents/agent/agent.go create mode 100644 test/integration/cloudevents/source/client.go create mode 100644 test/integration/cloudevents/source/resource.go create mode 100644 test/integration/cloudevents/source/store.go create mode 100644 test/integration/cloudevents_test.go create mode 100644 vendor/github.com/gorilla/websocket/.gitignore create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/join.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake.go create mode 100644 vendor/github.com/gorilla/websocket/tls_handshake_116.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/.gitignore create mode 100644 vendor/github.com/mochi-mqtt/server/v2/.golangci.yml create mode 100644 vendor/github.com/mochi-mqtt/server/v2/Dockerfile create mode 100644 vendor/github.com/mochi-mqtt/server/v2/LICENSE.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/README.md create mode 100644 vendor/github.com/mochi-mqtt/server/v2/clients.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/inflight.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/net.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/codec.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/codes.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/packets.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/properties.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/server.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/system/system.go create mode 100644 vendor/github.com/mochi-mqtt/server/v2/topics.go create mode 100644 vendor/github.com/rs/xid/.appveyor.yml create mode 100644 vendor/github.com/rs/xid/.travis.yml create mode 100644 vendor/github.com/rs/xid/LICENSE create mode 100644 vendor/github.com/rs/xid/README.md create mode 100644 vendor/github.com/rs/xid/error.go create mode 100644 vendor/github.com/rs/xid/hostid_darwin.go create mode 100644 vendor/github.com/rs/xid/hostid_fallback.go create mode 100644 vendor/github.com/rs/xid/hostid_freebsd.go create mode 100644 vendor/github.com/rs/xid/hostid_linux.go create mode 100644 vendor/github.com/rs/xid/hostid_windows.go create mode 100644 vendor/github.com/rs/xid/id.go create mode 100644 vendor/github.com/rs/zerolog/.gitignore create mode 100644 vendor/github.com/rs/zerolog/CNAME create mode 100644 vendor/github.com/rs/zerolog/LICENSE create mode 100644 vendor/github.com/rs/zerolog/README.md create mode 100644 vendor/github.com/rs/zerolog/_config.yml create mode 100644 vendor/github.com/rs/zerolog/array.go create mode 100644 vendor/github.com/rs/zerolog/console.go create mode 100644 vendor/github.com/rs/zerolog/context.go create mode 100644 vendor/github.com/rs/zerolog/ctx.go create mode 100644 vendor/github.com/rs/zerolog/encoder.go create mode 100644 vendor/github.com/rs/zerolog/encoder_cbor.go create mode 100644 vendor/github.com/rs/zerolog/encoder_json.go create mode 100644 vendor/github.com/rs/zerolog/event.go create mode 100644 vendor/github.com/rs/zerolog/fields.go create mode 100644 vendor/github.com/rs/zerolog/globals.go create mode 100644 vendor/github.com/rs/zerolog/go112.go create mode 100644 vendor/github.com/rs/zerolog/hook.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/README.md create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/base.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/cbor.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/string.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/time.go create mode 100644 vendor/github.com/rs/zerolog/internal/cbor/types.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/base.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/bytes.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/string.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/time.go create mode 100644 vendor/github.com/rs/zerolog/internal/json/types.go create mode 100644 vendor/github.com/rs/zerolog/log.go create mode 100644 vendor/github.com/rs/zerolog/not_go112.go create mode 100644 vendor/github.com/rs/zerolog/pretty.png create mode 100644 vendor/github.com/rs/zerolog/sampler.go create mode 100644 vendor/github.com/rs/zerolog/syslog.go create mode 100644 vendor/github.com/rs/zerolog/writer.go diff --git a/cloudevents/README.md b/cloudevents/README.md new file mode 100644 index 00000000..76581b47 --- /dev/null +++ b/cloudevents/README.md @@ -0,0 +1,147 @@ +# Cloudevents Clients + +We have implemented the [cloudevents](https://cloudevents.io/)-based clients in this package to assist developers in +easily implementing the [Event Based Manifestwork](https://github.com/open-cluster-management-io/enhancements/tree/main/enhancements/sig-architecture/224-event-based-manifestwork) +proposal. + +## Generic Clients + +The generic client (`generic.CloudEventsClient`) is used to resync/publish/subscribe resource objects between sources +and agents with cloudevents. + +A resource object can be any object that implements the `generic.ResourceObject` interface. + +### Building a generic client on a source + +Developers can use `generic.NewCloudEventSourceClient` method to build a generic client on the source. To build this +client the developers need to provide + +1. A cloudevents source options (`options.CloudEventsSourceOptions`), this options have two parts + - `sourceID`, it is a unique identifier for a source, for example, it can generate a source ID by hashing the hub + cluster URL and appending the controller name. Similarly, a RESTful service can select a unique name or generate a + unique ID in the associated database for its source identification. + - `CloudEventsOptions`, it provides cloudevents clients to send/receive cloudevents based on different event + protocol. We have supported the MQTT protocol (`mqtt.NewSourceOptions`), developers can use it directly. + +2. A resource lister (`generic.Lister`), it is used to list the resource objects on the source when resyncing the +resources between sources and agents, for example, a hub controller can list the resources from the resource informers, +and a RESTful service can list its resources from a database. + +3. A resource status hash getter method (`generic.StatusHashGetter`), this method will be used to calculate the resource +status hash when resyncing the resource status between sources and agents. + +4. Codecs (`generic.Codec`), they are used to encode a resource object into a cloudevent and decode a cloudevent into a +resource object with a given cloudevent data type. We have provided two data types (`io.open-cluster-management.works.v1alpha1.manifests` +that contains a single resource object in the cloudevent payload and `io.open-cluster-management.works.v1alpha1.manifestbundles` +that contains a list of resource objects in the cloudevent payload) for `ManifestWork`, they can be found in the `work/payload` +package. + +5. Resource handler methods (`generic.ResourceHandler`), they are used to handle the resources status after the client +received the resources status from agents. + +for example: + +```golang +// build a client for the source1 +client, err := generic.NewCloudEventSourceClient[*CustomerResource]( + ctx, + mqtt.NewSourceOptions(mqtt.NewMQTTOptions(), "source1"), + customerResourceLister, + customerResourceStatusHashGetter, + customerResourceCodec, + ) + +// start a go routine to receive the resources status from agents +go func() { + if err := client.Subscribe(ctx, customerResourceHandler); err != nil { + //TODO handle this error when subscribing the cloudevents failed + } +}() +``` + +You may refer to the [cloudevents client integration test](../test/integration/cloudevents/source) as an example. + +### Building a generic client on a manged cluster + +Developers can use `generic.NewCloudEventAgentClient` method to build a generic client on a managed cluster. To build +this client the developers need to provide + +1. A cloudevents agent options (`options.CloudEventsAgentOptions`), this options have three parts + - `agentID`, it is a unique identifier for an agent, for example, it can consist of a managed cluster name and an + agent name. + - `clusterName`, it is the name of a managed cluster on which the agent runs. + - `CloudEventsOptions`, it provides cloudevents clients to send/receive cloudevents based on different event + protocol. We have supported the MQTT protocol (`mqtt.NewAgentOptions`), developers can use it directly. + +2. A resource lister (`generic.Lister`), it is used to list the resource objects on a managed cluster when resyncing the +resources between sources and agents, for example, a work agent can list its works from its work informers. + +3. A resource status hash getter method (`generic.StatusHashGetter`), this method will be used to calculate the resource +status hash when resyncing the resource status between sources and agents. + +4. Codecs (`generic.Codec`), they are used to encode a resource object into a cloudevent and decode a cloudevent into a +resource object with a given cloudevent data type. We have provided two data types (`io.open-cluster-management.works.v1alpha1.manifests` +that contains a single resource object in the cloudevent payload and `io.open-cluster-management.works.v1alpha1.manifestbundles` +that contains a list of resource objects in the cloudevent payload) for `ManifestWork`, they can be found in the `work/payload` +package. + +5. Resource handler methods (`generic.ResourceHandler`), they are used to handle the resources after the client received +the resources from sources. + +for example: + +```golang +// build a client for a work agent on the cluster1 +client, err := generic.NewCloudEventAgentClient[*CustomerResource]( + ctx, + mqtt.NewAgentOptions(mqtt.NewMQTTOptions(), "cluster1", "cluster1-work-agent"), + &ManifestWorkLister{}, + ManifestWorkStatusHash, + &ManifestBundleCodec{}, + ) + +// start a go routine to receive the resources from sources +go func() { + if err := client.Subscribe(ctx, NewManifestWorkAgentHandler()); err != nil { + //TODO handle this error when subscribing the cloudevents failed + } +}() +``` + +## Work Clients + +We have provided a builder to build the `ManifestWork` client (`ManifestWorkInterface`) and informer (`ManifestWorkInformer`) +based on the generic client. + +### Building work client for work controllers on the hub cluster + +TODO + +### Building work client for work agent on the managed cluster + +Developers can use the builder to build the `ManifestWork` client and informer with the cluster name. + +```golang + +clusterName := "cluster1" +// Building the clients based on cloudevents with MQTT +config := mqtt.NewMQTTOptions() + +clientHolder, err := work.NewClientHolderBuilder(fmt.Sprintf("%s-work-agent", clusterName), config). + WithClusterName(clusterName). + // Supports two event data types for ManifestWork + WithCodecs(codec.NewManifestBundleCodec(), codec.NewManifestCodec(restMapper)). + NewClientHolder(ctx) +if err != nil { + return err +} + +manifestWorkClient := clientHolder.ManifestWorks(clusterName) +manifestWorkInformer := clientHolder.ManifestWorkInformer() + +// Building controllers with ManifestWork client and informer ... + +// Start the ManifestWork informer +go manifestWorkInformer.Informer().Run(ctx.Done()) + +``` \ No newline at end of file diff --git a/cloudevents/generic/agentclient.go b/cloudevents/generic/agentclient.go index 35868b0e..24142b92 100644 --- a/cloudevents/generic/agentclient.go +++ b/cloudevents/generic/agentclient.go @@ -18,6 +18,8 @@ import ( // // An agent is a component that handles the deployment of requested resources on the managed cluster and status report // to the source. +// +// TODO support limiting the message sending rate with a configuration. type CloudEventAgentClient[T ResourceObject] struct { cloudEventsOptions options.CloudEventsOptions sender cloudevents.Client diff --git a/cloudevents/generic/sourceclient.go b/cloudevents/generic/sourceclient.go index 7fa42af8..4085894a 100644 --- a/cloudevents/generic/sourceclient.go +++ b/cloudevents/generic/sourceclient.go @@ -19,6 +19,8 @@ import ( // // A source is a component that runs on a server, it can be a controller on the hub cluster or a RESTful service // handling resource requests. +// +// TODO support limiting the message sending rate with a configuration. type CloudEventSourceClient[T ResourceObject] struct { cloudEventsOptions options.CloudEventsOptions sender cloudevents.Client diff --git a/go.mod b/go.mod index ea2d4f0e..b6c31a30 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/evanphx/json-patch v4.12.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.2.0 + github.com/mochi-mqtt/server/v2 v2.3.0 github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.24.1 github.com/openshift/build-machinery-go v0.0.0-20230306181456-d321ffa04533 @@ -40,10 +41,13 @@ require ( github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -55,6 +59,8 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect + github.com/rs/xid v1.4.0 // indirect + github.com/rs/zerolog v1.28.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect diff --git a/go.sum b/go.sum index 2777058e..5c78931b 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,7 @@ github.com/cloudevents/sdk-go/protocol/mqtt_paho/v2 v2.0.0-20230807084042-7f5ef3 github.com/cloudevents/sdk-go/v2 v2.14.0 h1:Nrob4FwVgi5L4tV9lhjzZcjYqFVyJzsA56CwPaPfv6s= github.com/cloudevents/sdk-go/v2 v2.14.0/go.mod h1:xDmKfzNjM8gBvjaF8ijFjM1VYOVUEeUfapHMUX1T5To= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -105,6 +106,7 @@ github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5F github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -170,6 +172,8 @@ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -177,6 +181,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -204,9 +209,15 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mochi-mqtt/server/v2 v2.3.0 h1:vcFb7X7ANH1Qy2yGHMvp86N9VxjoUkZpr5mkIbfMLfw= +github.com/mochi-mqtt/server/v2 v2.3.0/go.mod h1:47GGVR0/5gbM1DzsI0f1yo25jcR1aaUIgj4dzmP5MNY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -267,6 +278,10 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= +github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -439,6 +454,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/test/integration/cloudevents/agent/agent.go b/test/integration/cloudevents/agent/agent.go new file mode 100644 index 00000000..6d372cbd --- /dev/null +++ b/test/integration/cloudevents/agent/agent.go @@ -0,0 +1,23 @@ +package agent + +import ( + "context" + + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" + "open-cluster-management.io/api/cloudevents/work" + "open-cluster-management.io/api/cloudevents/work/agent/codec" +) + +func StartWorkAgent(ctx context.Context, clusterName string, config *mqtt.MQTTOptions) (*work.ClientHolder, error) { + clientHolder, err := work.NewClientHolderBuilder(clusterName, config). + WithClusterName(clusterName). + WithCodecs(codec.NewManifestCodec(nil)). + NewClientHolder(ctx) + if err != nil { + return nil, err + } + + go clientHolder.ManifestWorkInformer().Informer().Run(ctx.Done()) + + return clientHolder, nil +} diff --git a/test/integration/cloudevents/source/client.go b/test/integration/cloudevents/source/client.go new file mode 100644 index 00000000..c2099a4d --- /dev/null +++ b/test/integration/cloudevents/source/client.go @@ -0,0 +1,138 @@ +package source + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "strconv" + + cloudevents "github.com/cloudevents/sdk-go/v2" + cloudeventstypes "github.com/cloudevents/sdk-go/v2/types" + "k8s.io/klog/v2" + + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + workv1 "open-cluster-management.io/api/work/v1" +) + +type resourceCodec struct{} + +var _ generic.Codec[*Resource] = &resourceCodec{} + +func (c *resourceCodec) EventDataType() types.CloudEventsDataType { + return payload.ManifestEventDataType +} + +func (c *resourceCodec) Encode(source string, eventType types.CloudEventsType, resource *Resource) (*cloudevents.Event, error) { + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + eventBuilder := types.NewEventBuilder(source, eventType). + WithResourceID(resource.ResourceID). + WithResourceVersion(resource.ResourceVersion). + WithClusterName(resource.Namespace) + + if !resource.GetDeletionTimestamp().IsZero() { + evt := eventBuilder.WithDeletionTimestamp(resource.GetDeletionTimestamp().Time).NewEvent() + return &evt, nil + } + + evt := eventBuilder.NewEvent() + + if err := evt.SetData(cloudevents.ApplicationJSON, &payload.Manifest{Manifest: resource.Spec}); err != nil { + return nil, fmt.Errorf("failed to encode manifests to cloud event: %v", err) + } + + return &evt, nil +} + +func (c *resourceCodec) Decode(evt *cloudevents.Event) (*Resource, error) { + eventType, err := types.ParseCloudEventsType(evt.Type()) + if err != nil { + return nil, fmt.Errorf("failed to parse cloud event type %s, %v", evt.Type(), err) + } + + if eventType.CloudEventsDataType != payload.ManifestEventDataType { + return nil, fmt.Errorf("unsupported cloudevents data type %s", eventType.CloudEventsDataType) + } + + evtExtensions := evt.Context.GetExtensions() + + resourceID, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceID]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceid extension: %v", err) + } + + resourceVersion, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionResourceVersion]) + if err != nil { + return nil, fmt.Errorf("failed to get resourceversion extension: %v", err) + } + + resourceVersionInt, err := strconv.ParseInt(resourceVersion, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to convert resourceversion - %v to int64", resourceVersion) + } + + clusterName, err := cloudeventstypes.ToString(evtExtensions[types.ExtensionClusterName]) + if err != nil { + return nil, fmt.Errorf("failed to get clustername extension: %v", err) + } + + manifestStatus := &payload.ManifestStatus{} + if err := evt.DataAs(manifestStatus); err != nil { + return nil, fmt.Errorf("failed to unmarshal event data %s, %v", string(evt.Data()), err) + } + + resource := &Resource{ + ResourceID: resourceID, + ResourceVersion: resourceVersionInt, + Namespace: clusterName, + Status: ResourceStatus{ + Conditions: manifestStatus.Conditions, + }, + } + + return resource, nil +} + +type resourceLister struct{} + +var _ generic.Lister[*Resource] = &resourceLister{} + +func (resLister *resourceLister) List(listOpts types.ListOptions) ([]*Resource, error) { + return GetStore().List(listOpts.ClusterName), nil +} + +func StartResourceSourceClient(ctx context.Context, config *mqtt.MQTTOptions) (generic.CloudEventsClient[*Resource], error) { + client, err := generic.NewCloudEventSourceClient[*Resource]( + ctx, + mqtt.NewSourceOptions(config, "integration-test"), + &resourceLister{}, + func(obj *Resource) (string, error) { + statusBytes, err := json.Marshal(&workv1.ManifestWorkStatus{Conditions: obj.Status.Conditions}) + if err != nil { + return "", fmt.Errorf("failed to marshal resource status, %v", err) + } + return fmt.Sprintf("%x", sha256.Sum256(statusBytes)), nil + }, + &resourceCodec{}, + ) + + if err != nil { + return nil, err + } + + go func() { + if err := client.Subscribe(ctx, func(action types.ResourceAction, resource *Resource) error { + return GetStore().UpdateStatus(resource) + }); err != nil { + klog.Fatalf("failed to subscribe to mqtt broker, %v", err) + } + }() + + return client, nil +} diff --git a/test/integration/cloudevents/source/resource.go b/test/integration/cloudevents/source/resource.go new file mode 100644 index 00000000..85feb69b --- /dev/null +++ b/test/integration/cloudevents/source/resource.go @@ -0,0 +1,62 @@ +package source + +import ( + "fmt" + + "github.com/google/uuid" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kubetypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic" +) + +type ResourceStatus struct { + Conditions []metav1.Condition +} + +type Resource struct { + ResourceID string + ResourceVersion int64 + Namespace string + DeletionTimestamp *metav1.Time + Spec unstructured.Unstructured + Status ResourceStatus +} + +var _ generic.ResourceObject = &Resource{} + +func NewResource(namespace, name string) *Resource { + return &Resource{ + ResourceID: ResourceID(namespace, name), + ResourceVersion: 1, + Namespace: namespace, + Spec: unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "namespace": namespace, + "name": name, + }, + }, + }, + } +} + +func (r *Resource) GetUID() kubetypes.UID { + return kubetypes.UID(r.ResourceID) +} + +func (r *Resource) GetResourceVersion() string { + return fmt.Sprintf("%d", r.ResourceVersion) +} + +func (r *Resource) GetDeletionTimestamp() *metav1.Time { + return r.DeletionTimestamp +} + +func ResourceID(namespace, name string) string { + return uuid.NewSHA1(uuid.NameSpaceOID, []byte(fmt.Sprintf("resource-%s-%s", namespace, name))).String() +} diff --git a/test/integration/cloudevents/source/store.go b/test/integration/cloudevents/source/store.go new file mode 100644 index 00000000..ef137e95 --- /dev/null +++ b/test/integration/cloudevents/source/store.go @@ -0,0 +1,95 @@ +package source + +import ( + "fmt" + "sync" +) + +type memoryStore struct { + sync.RWMutex + resources map[string]*Resource +} + +var store *memoryStore +var once sync.Once + +func GetStore() *memoryStore { + once.Do(func() { + store = &memoryStore{ + resources: make(map[string]*Resource), + } + }) + + return store +} + +func (s *memoryStore) Add(resource *Resource) { + s.Lock() + defer s.Unlock() + + _, ok := s.resources[resource.ResourceID] + if !ok { + s.resources[resource.ResourceID] = resource + } +} + +func (s *memoryStore) Update(resource *Resource) error { + s.Lock() + defer s.Unlock() + + _, ok := s.resources[resource.ResourceID] + if !ok { + return fmt.Errorf("the resource %s does not exist", resource.ResourceID) + } + + s.resources[resource.ResourceID] = resource + return nil +} + +func (s *memoryStore) UpdateStatus(resource *Resource) error { + s.Lock() + defer s.Unlock() + + last, ok := s.resources[resource.ResourceID] + if !ok { + return fmt.Errorf("the resource %s does not exist", resource.ResourceID) + } + + last.Status = resource.Status + s.resources[resource.ResourceID] = last + return nil +} + +func (s *memoryStore) Delete(resourceID string) { + s.Lock() + defer s.Unlock() + + delete(s.resources, resourceID) +} + +func (s *memoryStore) Get(resourceID string) (*Resource, error) { + s.RLock() + defer s.RUnlock() + + resource, ok := s.resources[resourceID] + if !ok { + return nil, fmt.Errorf("failed to find resource %s", resourceID) + } + + return resource, nil +} + +func (s *memoryStore) List(namespace string) []*Resource { + s.RLock() + defer s.RUnlock() + + resources := []*Resource{} + for _, res := range s.resources { + if res.Namespace != namespace { + continue + } + + resources = append(resources, res) + } + return resources +} diff --git a/test/integration/cloudevents_test.go b/test/integration/cloudevents_test.go new file mode 100644 index 00000000..92e53f3f --- /dev/null +++ b/test/integration/cloudevents_test.go @@ -0,0 +1,315 @@ +package integration + +import ( + "context" + "encoding/json" + "fmt" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + apitypes "k8s.io/apimachinery/pkg/types" + + "open-cluster-management.io/api/cloudevents/generic/types" + "open-cluster-management.io/api/cloudevents/work/payload" + "open-cluster-management.io/api/test/integration/cloudevents/agent" + "open-cluster-management.io/api/test/integration/cloudevents/source" + workv1 "open-cluster-management.io/api/work/v1" +) + +var _ = ginkgo.Describe("Cloudevents clients test", func() { + ginkgo.BeforeEach(func() { + ginkgo.By("init resource source store", func() { + source.GetStore().Add(source.NewResource("cluster1", "resource1")) + source.GetStore().Add(source.NewResource("cluster2", "resource1")) + }) + }) + + ginkgo.Context("Resync resources", func() { + ginkgo.It("resync resources between source and agent", func() { + ginkgo.By("start an agent on cluster1") + clusterName := "cluster1" + + clientHolder, err := agent.StartWorkAgent(context.TODO(), clusterName, mqttOptions) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + informer := clientHolder.ManifestWorkInformer() + lister := informer.Lister().ManifestWorks(clusterName) + agentWorkClient := clientHolder.ManifestWorks(clusterName) + + gomega.Eventually(func() error { + list, err := lister.List(labels.Everything()) + if err != nil { + return err + } + + // ensure there is only one work was synced on the cluster1 + if len(list) != 1 { + return fmt.Errorf("unexpected work list %v", list) + } + + // ensure the work can be get by work client + workName := source.ResourceID(clusterName, "resource1") + work, err := agentWorkClient.Get(context.TODO(), workName, metav1.GetOptions{}) + if err != nil { + return err + } + + newWork := work.DeepCopy() + newWork.Status = workv1.ManifestWorkStatus{Conditions: []metav1.Condition{{Type: "Created", Status: metav1.ConditionTrue}}} + + // only update the status on the agent local part + store := informer.Informer().GetStore() + if err := store.Update(newWork); err != nil { + return err + } + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + + // resync the status from source + err = sourceCloudEventsClient.Resync(context.TODO()) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + resourceID := source.ResourceID(clusterName, "resource1") + resource, err := source.GetStore().Get(resourceID) + if err != nil { + return err + } + + // ensure the resource status is synced + if !meta.IsStatusConditionTrue(resource.Status.Conditions, "Created") { + return fmt.Errorf("unexpected status %v", resource.Status.Conditions) + } + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + }) + }) + + ginkgo.Context("Publish a resource", func() { + ginkgo.It("send a resource to a cluster", func() { + ginkgo.By("start an agent on cluster2") + clusterName := "cluster2" + + clientHolder, err := agent.StartWorkAgent(context.TODO(), clusterName, mqttOptions) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + lister := clientHolder.ManifestWorkInformer().Lister().ManifestWorks(clusterName) + agentWorkClient := clientHolder.ManifestWorks(clusterName) + + gomega.Eventually(func() error { + list, err := lister.List(labels.Everything()) + if err != nil { + return err + } + + // ensure there is only one work was synced on the cluster2 + if len(list) != 1 { + return fmt.Errorf("unexpected work list %v", list) + } + + // ensure the work can be get by work client + workName := source.ResourceID(clusterName, "resource1") + _, err = agentWorkClient.Get(context.TODO(), workName, metav1.GetOptions{}) + if err != nil { + return err + } + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + + newResourceName := "resource2" + ginkgo.By("create a new resource on the source and send it to the cluster2", func() { + newResource := source.NewResource(clusterName, newResourceName) + source.GetStore().Add(newResource) + + err := sourceCloudEventsClient.Publish(context.TODO(), types.CloudEventsType{ + CloudEventsDataType: payload.ManifestEventDataType, + SubResource: types.SubResourceSpec, + Action: "test_create_request", + }, newResource) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.By("receive the new resource on the cluster2", func() { + gomega.Eventually(func() error { + workName := source.ResourceID(clusterName, newResourceName) + work, err := agentWorkClient.Get(context.TODO(), workName, metav1.GetOptions{}) + if err != nil { + return err + } + + // add finalizers firstly + patchBytes, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "uid": work.GetUID(), + "resourceVersion": work.GetResourceVersion(), + "finalizers": []string{"work-test-finalizer"}, + }, + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.TODO(), work.Name, apitypes.MergePatchType, patchBytes, metav1.PatchOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + work, err = agentWorkClient.Get(context.TODO(), workName, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Finalizers) != 1 { + return fmt.Errorf("expected finalizers on the work, but got %v", work.Finalizers) + } + + // update the work status + newWork := work.DeepCopy() + newWork.Status = workv1.ManifestWorkStatus{Conditions: []metav1.Condition{{Type: "Created", Status: metav1.ConditionTrue}}} + + oldData, err := json.Marshal(work) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + newData, err := json.Marshal(newWork) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + patchBytes, err = jsonpatch.CreateMergePatch(oldData, newData) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.TODO(), work.Name, apitypes.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + }) + + ginkgo.By("update the resource on the source and send it to the cluster2", func() { + var resource *source.Resource + var err error + + // ensure the resource is created on the cluster + resourceID := source.ResourceID(clusterName, newResourceName) + gomega.Eventually(func() error { + resource, err = source.GetStore().Get(resourceID) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(resource.Status.Conditions, "Created") { + return fmt.Errorf("unexpected status %v", resource.Status.Conditions) + } + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + + resource.ResourceVersion = resource.ResourceVersion + 1 + resource.Spec.Object["data"] = "test" + + err = source.GetStore().Update(resource) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = sourceCloudEventsClient.Publish(context.TODO(), types.CloudEventsType{ + CloudEventsDataType: payload.ManifestEventDataType, + SubResource: types.SubResourceSpec, + Action: "test_update_request", + }, resource) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.By("receive the updated resource on the cluster2", func() { + gomega.Eventually(func() error { + workName := source.ResourceID(clusterName, newResourceName) + work, err := agentWorkClient.Get(context.TODO(), workName, metav1.GetOptions{}) + if err != nil { + return err + } + + if len(work.Spec.Workload.Manifests) != 1 { + return fmt.Errorf("expected manifests in the work, but got %v", work) + } + + workload := map[string]any{} + if err := json.Unmarshal(work.Spec.Workload.Manifests[0].Raw, &workload); err != nil { + return err + } + + if workload["data"] != "test" { + return fmt.Errorf("unexpected workload %v", workload) + } + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + }) + + ginkgo.By("mark the resource to deleting on the source and send it to cluster2", func() { + resourceID := source.ResourceID(clusterName, newResourceName) + resource, err := source.GetStore().Get(resourceID) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + resource.DeletionTimestamp = &metav1.Time{Time: time.Now()} + + err = source.GetStore().Update(resource) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = sourceCloudEventsClient.Publish(context.TODO(), types.CloudEventsType{ + CloudEventsDataType: payload.ManifestEventDataType, + SubResource: types.SubResourceSpec, + Action: "test_delete_request", + }, resource) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.By("receive the deleting resource on the cluster2", func() { + gomega.Eventually(func() error { + workName := source.ResourceID(clusterName, newResourceName) + work, err := agentWorkClient.Get(context.TODO(), workName, metav1.GetOptions{}) + if err != nil { + return err + } + + if work.DeletionTimestamp.IsZero() { + return fmt.Errorf("expected work is deleting, but got %v", work) + } + + // remove the finalizers + patchBytes, err := json.Marshal(map[string]interface{}{ + "metadata": map[string]interface{}{ + "uid": work.GetUID(), + "resourceVersion": work.GetResourceVersion(), + "finalizers": []string{}, + }, + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + _, err = agentWorkClient.Patch(context.TODO(), work.Name, apitypes.MergePatchType, patchBytes, metav1.PatchOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + }) + + ginkgo.By("delete the resource from the source", func() { + gomega.Eventually(func() error { + resourceID := source.ResourceID(clusterName, newResourceName) + resource, err := source.GetStore().Get(resourceID) + if err != nil { + return err + } + + if !meta.IsStatusConditionTrue(resource.Status.Conditions, "Deleted") { + return fmt.Errorf("unexpected status %v", resource.Status.Conditions) + } + + source.GetStore().Delete(resourceID) + + return nil + }, 10*time.Second, 1*time.Second).Should(gomega.Succeed()) + }) + }) + }) +}) diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go index 1f6d9e3f..1dd76159 100644 --- a/test/integration/suite_test.go +++ b/test/integration/suite_test.go @@ -5,13 +5,15 @@ import ( "path/filepath" "testing" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/rand" - + mochimqtt "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" "github.com/onsi/ginkgo" "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -19,15 +21,23 @@ import ( clusterv1client "open-cluster-management.io/api/client/cluster/clientset/versioned" operatorclientset "open-cluster-management.io/api/client/operator/clientset/versioned" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" + "open-cluster-management.io/api/cloudevents/generic" + "open-cluster-management.io/api/cloudevents/generic/options/mqtt" + "open-cluster-management.io/api/test/integration/cloudevents/source" ) +const mqttBrokerHost = "127.0.0.1:1883" + var testEnv *envtest.Environment +var mqttBroker *mochimqtt.Server +var mqttOptions *mqtt.MQTTOptions var testNamespace string var kubernetesClient kubernetes.Interface var hubWorkClient workclientset.Interface var hubClusterClient clusterv1client.Interface var hubAddonClient addonv1alpha1client.Interface var operatorClient operatorclientset.Interface +var sourceCloudEventsClient generic.CloudEventsClient[*source.Resource] func TestIntegration(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) @@ -37,6 +47,20 @@ func TestIntegration(t *testing.T) { var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) { ginkgo.By("bootstrapping test environment") + // start a MQTT broker + mqttBroker = mochimqtt.New(&mochimqtt.Options{}) + // allow all connections. + err := mqttBroker.AddHook(new(auth.AllowHook), nil) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + err = mqttBroker.AddListener(listeners.NewTCP("mqtt-test-broker", mqttBrokerHost, nil)) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + go func() { + err := mqttBroker.Serve() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }() + // start a kube-apiserver testEnv = &envtest.Environment{ ErrorIfCRDPathMissing: true, @@ -81,6 +105,11 @@ var _ = ginkgo.BeforeSuite(func(done ginkgo.Done) { }, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) + mqttOptions = mqtt.NewMQTTOptions() + mqttOptions.BrokerHost = mqttBrokerHost + sourceCloudEventsClient, err = source.StartResourceSourceClient(context.TODO(), mqttOptions) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + close(done) }, 300) @@ -91,6 +120,9 @@ var _ = ginkgo.AfterSuite(func() { Delete(context.TODO(), testNamespace, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = mqttBroker.Close() + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + err = testEnv.Stop() gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 00000000..cd3fcd1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.idea/ +*.iml diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 00000000..1931f400 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 00000000..9171c972 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 00000000..2517a287 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,39 @@ +# Gorilla WebSocket + +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![CircleCI](https://circleci.com/gh/gorilla/websocket.svg?style=svg)](https://circleci.com/gh/gorilla/websocket) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + + +--- + +⚠️ **[The Gorilla WebSocket Package is looking for a new maintainer](https://github.com/gorilla/websocket/issues/370)** + +--- + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 00000000..2efd8355 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,422 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 00000000..813ffb1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 00000000..331eebc8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1230 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + c.setReadRemaining(int64(p[1] & 0x7f)) + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.setReadRemaining(0) + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + c.setReadRemaining(rem) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 00000000..8db0cef9 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 00000000..c64f8c82 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 00000000..dc2c1f64 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 00000000..d0742bf2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 00000000..36250ca7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 00000000..c854225e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 00000000..e0f466b7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 00000000..24d53b38 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,365 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 00000000..a62b68cc --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,21 @@ +//go:build go1.17 +// +build go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake_116.go b/vendor/github.com/gorilla/websocket/tls_handshake_116.go new file mode 100644 index 00000000..e1b2b44f --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake_116.go @@ -0,0 +1,21 @@ +//go:build !go1.17 +// +build !go1.17 + +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 00000000..7bf2f66c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,283 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 00000000..2e668f6b --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 00000000..91b5cef3 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 00000000..ca048371 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Build Status](https://github.com/mattn/go-colorable/workflows/test/badge.svg)](https://github.com/mattn/go-colorable/actions?query=workflow%3Atest) +[![Codecov](https://codecov.io/gh/mattn/go-colorable/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-colorable) +[![GoDoc](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 00000000..416d1bbb --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,38 @@ +//go:build appengine +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 00000000..766d9460 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,38 @@ +//go:build !windows && !appengine +// +build !windows,!appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable returns new instance of Writer which handles escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 00000000..1846ad5a --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,1047 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "sync" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) + commonLvbUnderscore = 0x8000 + + cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provides colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer + mutex sync.Mutex +} + +// NewColorable returns new instance of Writer which handles escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var mode uint32 + if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { + return file + } + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// returns Atoi(s) unless s == "" in which case it returns def +func atoiWithDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + return strconv.Atoi(s) +} + +// Write writes data on console +func (w *Writer) Write(data []byte) (n int, err error) { + w.mutex.Lock() + defer w.mutex.Unlock() + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = atoiWithDefault(buf.String(), 1) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n < 1 { + n = 1 + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'X': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var written dword + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case n == 4: + attr |= commonLvbUnderscore + case (1 <= n && n <= 3) || n == 5: + attr |= foregroundIntensity + case n == 7 || n == 27: + attr = + (attr &^ (foregroundMask | backgroundMask)) | + ((attr & foregroundMask) << 4) | + ((attr & backgroundMask) >> 4) + case n == 22: + attr &^= foregroundIntensity + case n == 24: + attr &^= commonLvbUnderscore + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256%len(n256foreAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256%len(n256backAttr)] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} + +// EnableColorsStdout enable colors if possible. +func EnableColorsStdout(enabled *bool) func() { + var mode uint32 + h := os.Stdout.Fd() + if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { + if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { + if enabled != nil { + *enabled = true + } + return func() { + procSetConsoleMode.Call(h, uintptr(mode)) + } + } + } + if enabled != nil { + *enabled = true + } + return func() {} +} diff --git a/vendor/github.com/mattn/go-colorable/go.test.sh b/vendor/github.com/mattn/go-colorable/go.test.sh new file mode 100644 index 00000000..012162b0 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 00000000..05d6f74b --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable holds writer but removes escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write writes data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var plaintext bytes.Buffer +loop: + for { + c1, err := er.ReadByte() + if err != nil { + plaintext.WriteTo(w.out) + break loop + } + if c1 != 0x1b { + plaintext.WriteByte(c1) + continue + } + _, err = plaintext.WriteTo(w.out) + if err != nil { + break loop + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000..65dc692b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 00000000..38418353 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000..17d4f90e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 00000000..012162b0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000..39bbcf00 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,19 @@ +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000..31503226 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,16 @@ +//go:build appengine || js || nacl || wasm +// +build appengine js nacl wasm + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_plan9.go b/vendor/github.com/mattn/go-isatty/isatty_plan9.go new file mode 100644 index 00000000..bae7f9bb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_plan9.go @@ -0,0 +1,23 @@ +//go:build plan9 +// +build plan9 + +package isatty + +import ( + "syscall" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + path, err := syscall.Fd2path(int(fd)) + if err != nil { + return false + } + return path == "/dev/cons" || path == "/mnt/term/dev/cons" +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000..0c3acf2d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,21 @@ +//go:build solaris && !appengine +// +build solaris,!appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 00000000..67787657 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +//go:build (linux || aix || zos) && !appengine +// +build linux aix zos +// +build !appengine + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000..8e3c9917 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,125 @@ +//go:build windows && !appengine +// +build windows,!appengine + +package isatty + +import ( + "errors" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + objectNameInfo uintptr = 1 + fileNameInfo = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + ntdll = syscall.NewLazyDLL("ntdll.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") + procNtQueryObject = ntdll.NewProc("NtQueryObject") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && + token[0] != `\cygwin` && + token[0] != `\Device\NamedPipe\msys` && + token[0] != `\Device\NamedPipe\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler +// since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion +// guys are using Windows XP, this is a workaround for those guys, it will also work on system from +// Windows vista to 10 +// see https://stackoverflow.com/a/18792477 for details +func getFileNameByHandle(fd uintptr) (string, error) { + if procNtQueryObject == nil { + return "", errors.New("ntdll.dll: NtQueryObject not supported") + } + + var buf [4 + syscall.MAX_PATH]uint16 + var result int + r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, + fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) + if r != 0 { + return "", e + } + return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + name, err := getFileNameByHandle(fd) + if err != nil { + return false + } + return isCygwinPipeName(name) + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/.gitignore b/vendor/github.com/mochi-mqtt/server/v2/.gitignore new file mode 100644 index 00000000..21b4810e --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/.gitignore @@ -0,0 +1,4 @@ +cmd/mqtt +.DS_Store +*.db +.idea \ No newline at end of file diff --git a/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml b/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml new file mode 100644 index 00000000..90839366 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/.golangci.yml @@ -0,0 +1,103 @@ +linters: + disable-all: false + fix: false # Fix found issues (if it's supported by the linter). + enable: + # - asasalint + # - asciicheck + # - bidichk + # - bodyclose + # - containedctx + # - contextcheck + #- cyclop + # - deadcode + - decorder + # - depguard + # - dogsled + # - dupl + - durationcheck + # - errchkjson + # - errname + - errorlint + # - execinquery + # - exhaustive + # - exhaustruct + # - exportloopref + #- forcetypeassert + #- forbidigo + #- funlen + #- gci + # - gochecknoglobals + # - gochecknoinits + # - gocognit + # - goconst + # - gocritic + - gocyclo + - godot + # - godox + # - goerr113 + # - gofmt + # - gofumpt + # - goheader + - goimports + # - golint + # - gomnd + # - gomoddirectives + # - gomodguard + # - goprintffuncname + - gosec + - gosimple + - govet + # - grouper + # - ifshort + - importas + - ineffassign + # - interfacebloat + # - interfacer + # - ireturn + # - lll + # - maintidx + # - makezero + - maligned + - misspell + # - nakedret + # - nestif + # - nilerr + # - nilnil + # - nlreturn + # - noctx + # - nolintlint + # - nonamedreturns + # - nosnakecase + # - nosprintfhostport + # - paralleltest + # - prealloc + # - predeclared + # - promlinter + - reassign + # - revive + # - rowserrcheck + # - scopelint + # - sqlclosecheck + # - staticcheck + # - structcheck + # - stylecheck + # - tagliatelle + # - tenv + # - testpackage + # - thelper + - tparallel + # - typecheck + - unconvert + - unparam + - unused + - usestdlibvars + # - varcheck + # - varnamelen + - wastedassign + - whitespace + # - wrapcheck + # - wsl + disable: + - errcheck + + diff --git a/vendor/github.com/mochi-mqtt/server/v2/Dockerfile b/vendor/github.com/mochi-mqtt/server/v2/Dockerfile new file mode 100644 index 00000000..de71c8ba --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/Dockerfile @@ -0,0 +1,31 @@ +FROM golang:1.19.0-alpine3.15 AS builder + +RUN apk update +RUN apk add git + +WORKDIR /app + +COPY go.mod ./ +COPY go.sum ./ +RUN go mod download + +COPY . ./ + +RUN go build -o /app/mochi ./cmd + + +FROM alpine + +WORKDIR / +COPY --from=builder /app/mochi . + +# tcp +EXPOSE 1883 + +# websockets +EXPOSE 1882 + +# dashboard +EXPOSE 8080 + +ENTRYPOINT [ "/mochi" ] diff --git a/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md b/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md new file mode 100644 index 00000000..25718ace --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/LICENSE.md @@ -0,0 +1,23 @@ + +The MIT License (MIT) + +Copyright (c) 2023 Mochi-MQTT Organisation +Copyright (c) 2019, 2022, 2023 Jonathan Blake (mochi-co) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mochi-mqtt/server/v2/README.md b/vendor/github.com/mochi-mqtt/server/v2/README.md new file mode 100644 index 00000000..dcce56cb --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/README.md @@ -0,0 +1,424 @@ + +

+ +![build status](https://github.com/mochi-mqtt/server/actions/workflows/build.yml/badge.svg) +[![Coverage Status](https://coveralls.io/repos/github/mochi-mqtt/server/badge.svg?branch=master&v2)](https://coveralls.io/github/mochi-mqtt/server?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/mochi-mqtt/server)](https://goreportcard.com/report/github.com/mochi-mqtt/server/v2) +[![Go Reference](https://pkg.go.dev/badge/github.com/mochi-mqtt/server.svg)](https://pkg.go.dev/github.com/mochi-mqtt/server/v2) +[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/mochi-mqtt/server/issues) + +

+ +# Mochi MQTT Broker +## The fully compliant, embeddable high-performance Go MQTT v5 (and v3.1.1) broker server +Mochi MQTT is an embeddable [fully compliant](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) MQTT v5 broker server written in Go, designed for the development of telemetry and internet-of-things projects. The server can be used either as a standalone binary or embedded as a library in your own applications, and has been designed to be as lightweight and fast as possible, with great care taken to ensure the quality and maintainability of the project. + +### What is MQTT? +MQTT stands for [MQ Telemetry Transport](https://en.wikipedia.org/wiki/MQTT). It is a publish/subscribe, extremely simple and lightweight messaging protocol, designed for constrained devices and low-bandwidth, high-latency or unreliable networks ([Learn more](https://mqtt.org/faq)). Mochi MQTT fully implements version 5.0.0 of the MQTT protocol. + +### When is this repo updated? +Unless it's a critical issue, new releases typically go out over the weekend. + +## What's new in Version 2? +Version 2.0.0 takes all the great things we loved about Mochi MQTT v1.0.0, learns from the mistakes, and improves on the things we wished we'd had. It's a total from-scratch rewrite, designed to fully implement MQTT v5 as a first-class feature. + +Don't forget to use the new v2 import paths: +```go +import "github.com/mochi-mqtt/server/v2" +``` + +- Full MQTTv5 Feature Compliance, compatibility for MQTT v3.1.1 and v3.0.0: + - User and MQTTv5 Packet Properties + - Topic Aliases + - Shared Subscriptions + - Subscription Options and Subscription Identifiers + - Message Expiry + - Client Session Expiry + - Send and Receive QoS Flow Control Quotas + - Server-side Disconnect and Auth Packets + - Will Delay Intervals + - Plus all the original MQTT features of Mochi MQTT v1, such as Full QoS(0,1,2), $SYS topics, retained messages, etc. +- Developer-centric: + - Most core broker code is now exported and accessible, for total developer control. + - Full-featured and flexible Hook-based interfacing system to provide easy 'plugin' development. + - Direct Packet Injection using special inline client, or masquerade as existing clients. +- Performant and Stable: + - Our classic trie-based Topic-Subscription model. + - Client-specific write buffers to avoid issues with slow-reading or irregular client behaviour. + - Passes all [Paho Interoperability Tests](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability) for MQTT v5 and MQTT v3. + - Over a thousand carefully considered unit test scenarios. +- TCP, Websocket (including SSL/TLS), and $SYS Dashboard listeners. +- Built-in Redis, Badger, and Bolt Persistence using Hooks (but you can also make your own). +- Built-in Rule-based Authentication and ACL Ledger using Hooks (also make your own). + +> There is no upgrade path from v1.0.0. Please review the documentation and this readme to get a sense of the changes required (e.g. the v1 events system, auth, and persistence have all been replaced with the new hooks system). + +### Compatibility Notes +Because of the overlap between the v5 specification and previous versions of mqtt, the server can accept both v5 and v3 clients, but note that in cases where both v5 an v3 clients are connected, properties and features provided for v5 clients will be downgraded for v3 clients (such as user properties). + +Support for MQTT v3.0.0 and v3.1.1 is considered hybrid-compatibility. Where not specifically restricted in the v3 specification, more modern and safety-first v5 behaviours are used instead - such as expiry for inflight and retained messages, and clients - and quality-of-service flow control limits. + +## Roadmap +- Please [open an issue](https://github.com/mochi-mqtt/server/issues) to request new features or event hooks! +- Cluster support. +- Enhanced Metrics support. +- File-based server configuration (supporting docker). + +## Quick Start +### Running the Broker with Go +Mochi MQTT can be used as a standalone broker. Simply checkout this repository and run the [cmd/main.go](cmd/main.go) entrypoint in the [cmd](cmd) folder which will expose tcp (:1883), websocket (:1882), and dashboard (:8080) listeners. + +``` +cd cmd +go build -o mqtt && ./mqtt +``` + +### Using Docker +A simple Dockerfile is provided for running the [cmd/main.go](cmd/main.go) Websocket, TCP, and Stats server: + +```sh +docker build -t mochi:latest . +docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest +``` + +## Developing with Mochi MQTT +### Importing as a package +Importing Mochi MQTT as a package requires just a few lines of code to get started. +``` go +import ( + "log" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/hooks/auth" + "github.com/mochi-mqtt/server/v2/listeners" +) + +func main() { + // Create the new MQTT Server. + server := mqtt.New(nil) + + // Allow all connections. + _ = server.AddHook(new(auth.AllowHook), nil) + + // Create a TCP listener on a standard port. + tcp := listeners.NewTCP("t1", ":1883", nil) + err := server.AddListener(tcp) + if err != nil { + log.Fatal(err) + } + + err = server.Serve() + if err != nil { + log.Fatal(err) + } +} +``` + +Examples of running the broker with various configurations can be found in the [examples](examples) folder. + +#### Network Listeners +The server comes with a variety of pre-packaged network listeners which allow the broker to accept connections on different protocols. The current listeners are: + +| Listener | Usage | +|------------------------------|----------------------------------------------------------------------------------------------| +| listeners.NewTCP | A TCP listener | +| listeners.NewUnixSock | A Unix Socket listener | +| listeners.NewNet | A net.Listener listener | +| listeners.NewWebsocket | A Websocket listener | +| listeners.NewHTTPStats | An HTTP $SYS info dashboard | +| listeners.NewHTTPHealthCheck | An HTTP healthcheck listener to provide health check responses for e.g. cloud infrastructure | + +> Use the `listeners.Listener` interface to develop new listeners. If you do, please let us know! + +A `*listeners.Config` may be passed to configure TLS. + +Examples of usage can be found in the [examples](examples) folder or [cmd/main.go](cmd/main.go). + +### Server Options and Capabilities +A number of configurable options are available which can be used to alter the behaviour or restrict access to certain features in the server. + +```go +server := mqtt.New(&mqtt.Options{ + Capabilities: mqtt.Capabilities{ + MaximumSessionExpiryInterval: 3600, + Compatibilities: mqtt.Compatibilities{ + ObscureNotAuthorized: true, + }, + }, + ClientNetWriteBufferSize: 4096, + ClientNetReadBufferSize: 4096, + SysTopicResendInterval: 10, +}) +``` + +Review the mqtt.Options, mqtt.Capabilities, and mqtt.Compatibilities structs for a comprehensive list of options. `ClientNetWriteBufferSize` and `ClientNetReadBufferSize` can be configured to adjust memory usage per client, based on your needs. + + +## Event Hooks +A universal event hooks system allows developers to hook into various parts of the server and client life cycle to add and modify functionality of the broker. These universal hooks are used to provide everything from authentication, persistent storage, to debugging tools. + +Hooks are stackable - you can add multiple hooks to a server, and they will be run in the order they were added. Some hooks modify values, and these modified values will be passed to the subsequent hooks before being returned to the runtime code. + +| Type | Import | Info | +| -- | -- | -- | +| Access Control | [mochi-mqtt/server/hooks/auth . AllowHook](hooks/auth/allow_all.go) | Allow access to all connecting clients and read/write to all topics. | +| Access Control | [mochi-mqtt/server/hooks/auth . Auth](hooks/auth/auth.go) | Rule-based access control ledger. | +| Persistence | [mochi-mqtt/server/hooks/storage/bolt](hooks/storage/bolt/bolt.go) | Persistent storage using [BoltDB](https://dbdb.io/db/boltdb) (deprecated). | +| Persistence | [mochi-mqtt/server/hooks/storage/badger](hooks/storage/badger/badger.go) | Persistent storage using [BadgerDB](https://github.com/dgraph-io/badger). | +| Persistence | [mochi-mqtt/server/hooks/storage/redis](hooks/storage/redis/redis.go) | Persistent storage using [Redis](https://redis.io). | +| Debugging | [mochi-mqtt/server/hooks/debug](hooks/debug/debug.go) | Additional debugging output to visualise packet flow. | + +Many of the internal server functions are now exposed to developers, so you can make your own Hooks by using the above as examples. If you do, please [Open an issue](https://github.com/mochi-mqtt/server/issues) and let everyone know! + +### Access Control +#### Allow Hook +By default, Mochi MQTT uses a DENY-ALL access control rule. To allow connections, this must overwritten using an Access Control hook. The simplest of these hooks is the `auth.AllowAll` hook, which provides ALLOW-ALL rules to all connections, subscriptions, and publishing. It's also the simplest hook to use: + +```go +server := mqtt.New(nil) +_ = server.AddHook(new(auth.AllowHook), nil) +``` + +> Don't do this if you are exposing your server to the internet or untrusted networks - it should really be used for development, testing, and debugging only. + +#### Auth Ledger +The Auth Ledger hook provides a sophisticated mechanism for defining access rules in a struct format. Auth ledger rules come in two forms: Auth rules (connection), and ACL rules (publish subscribe). + +Auth rules have 4 optional criteria and an assertion flag: +| Criteria | Usage | +| -- | -- | +| Client | client id of the connecting client | +| Username | username of the connecting client | +| Password | password of the connecting client | +| Remote | the remote address or ip of the client | +| Allow | true (allow this user) or false (deny this user) | + +ACL rules have 3 optional criteria and an filter match: +| Criteria | Usage | +| -- | -- | +| Client | client id of the connecting client | +| Username | username of the connecting client | +| Remote | the remote address or ip of the client | +| Filters | an array of filters to match | + +Rules are processed in index order (0,1,2,3), returning on the first matching rule. See [hooks/auth/ledger.go](hooks/auth/ledger.go) to review the structs. + +```go +server := mqtt.New(nil) +err := server.AddHook(new(auth.Hook), &auth.Options{ + Ledger: &auth.Ledger{ + Auth: auth.AuthRules{ // Auth disallows all by default + {Username: "peach", Password: "password1", Allow: true}, + {Username: "melon", Password: "password2", Allow: true}, + {Remote: "127.0.0.1:*", Allow: true}, + {Remote: "localhost:*", Allow: true}, + }, + ACL: auth.ACLRules{ // ACL allows all by default + {Remote: "127.0.0.1:*"}, // local superuser allow all + { + // user melon can read and write to their own topic + Username: "melon", Filters: auth.Filters{ + "melon/#": auth.ReadWrite, + "updates/#": auth.WriteOnly, // can write to updates, but can't read updates from others + }, + }, + { + // Otherwise, no clients have publishing permissions + Filters: auth.Filters{ + "#": auth.ReadOnly, + "updates/#": auth.Deny, + }, + }, + }, + } +}) +``` + +The ledger can also be stored as JSON or YAML and loaded using the Data field: +```go +err = server.AddHook(new(auth.Hook), &auth.Options{ + Data: data, // build ledger from byte slice: yaml or json +}) +``` +See [examples/auth/encoded/main.go](examples/auth/encoded/main.go) for more information. + +### Persistent Storage +#### Redis +A basic Redis storage hook is available which provides persistence for the broker. It can be added to the server in the same fashion as any other hook, with several options. It uses github.com/go-redis/redis/v8 under the hook, and is completely configurable through the Options value. +```go +err := server.AddHook(new(redis.Hook), &redis.Options{ + Options: &rv8.Options{ + Addr: "localhost:6379", // default redis address + Password: "", // your password + DB: 0, // your redis db + }, +}) +if err != nil { + log.Fatal(err) +} +``` +For more information on how the redis hook works, or how to use it, see the [examples/persistence/redis/main.go](examples/persistence/redis/main.go) or [hooks/storage/redis](hooks/storage/redis) code. + +#### Badger DB +There's also a BadgerDB storage hook if you prefer file based storage. It can be added and configured in much the same way as the other hooks (with somewhat less options). +```go +err := server.AddHook(new(badger.Hook), &badger.Options{ + Path: badgerPath, +}) +if err != nil { + log.Fatal(err) +} +``` +For more information on how the badger hook works, or how to use it, see the [examples/persistence/badger/main.go](examples/persistence/badger/main.go) or [hooks/storage/badger](hooks/storage/badger) code. + +There is also a BoltDB hook which has been deprecated in favour of Badger, but if you need it, check [examples/persistence/bolt/main.go](examples/persistence/bolt/main.go). + +## Developing with Event Hooks +Many hooks are available for interacting with the broker and client lifecycle. +The function signatures for all the hooks and `mqtt.Hook` interface can be found in [hooks.go](hooks.go). + +> The most flexible event hooks are OnPacketRead, OnPacketEncode, and OnPacketSent - these hooks be used to control and modify all incoming and outgoing packets. + +| Function | Usage | +|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| OnStarted | Called when the server has successfully started. | +| OnStopped | Called when the server has successfully stopped. | +| OnConnectAuthenticate | Called when a user attempts to authenticate with the server. An implementation of this method MUST be used to allow or deny access to the server (see hooks/auth/allow_all or basic). It can be used in custom hooks to check connecting users against an existing user database. Returns true if allowed. | +| OnACLCheck | Called when a user attempts to publish or subscribe to a topic filter. As above. | +| OnSysInfoTick | Called when the $SYS topic values are published out. | +| OnConnect | Called when a new client connects, may return an error or packet code to halt the client connection process. | +| OnSessionEstablish | Called immediately after a new client connects and authenticates and immediately before the session is established and CONNACK is sent. +| OnSessionEstablished | Called when a new client successfully establishes a session (after OnConnect) | +| OnDisconnect | Called when a client is disconnected for any reason. | +| OnAuthPacket | Called when an auth packet is received. It is intended to allow developers to create their own mqtt v5 Auth Packet handling mechanisms. Allows packet modification. | +| OnPacketRead | Called when a packet is received from a client. Allows packet modification. | +| OnPacketEncode | Called immediately before a packet is encoded to be sent to a client. Allows packet modification. | +| OnPacketSent | Called when a packet has been sent to a client. | +| OnPacketProcessed | Called when a packet has been received and successfully handled by the broker. | +| OnSubscribe | Called when a client subscribes to one or more filters. Allows packet modification. | +| OnSubscribed | Called when a client successfully subscribes to one or more filters. | +| OnSelectSubscribers | Called when subscribers have been collected for a topic, but before shared subscription subscribers have been selected. Allows receipient modification. | +| OnUnsubscribe | Called when a client unsubscribes from one or more filters. Allows packet modification. | +| OnUnsubscribed | Called when a client successfully unsubscribes from one or more filters. | +| OnPublish | Called when a client publishes a message. Allows packet modification. | +| OnPublished | Called when a client has published a message to subscribers. | +| OnPublishDropped | Called when a message to a client is dropped before delivery, such as if the client is taking too long to respond. | +| OnRetainMessage | Called then a published message is retained. | +| OnRetainPublished | Called then a retained message is published to a client. | +| OnQosPublish | Called when a publish packet with Qos >= 1 is issued to a subscriber. | +| OnQosComplete | Called when the Qos flow for a message has been completed. | +| OnQosDropped | Called when an inflight message expires before completion. | +| OnPacketIDExhausted | Called when a client runs out of unused packet ids to assign. | +| OnWill | Called when a client disconnects and intends to issue a will message. Allows packet modification. | +| OnWillSent | Called when an LWT message has been issued from a disconnecting client. | +| OnClientExpired | Called when a client session has expired and should be deleted. | +| OnRetainedExpired | Called when a retained message has expired and should be deleted. | +| StoredClients | Returns clients, eg. from a persistent store. | +| StoredSubscriptions | Returns client subscriptions, eg. from a persistent store. | +| StoredInflightMessages | Returns inflight messages, eg. from a persistent store. | +| StoredRetainedMessages | Returns retained messages, eg. from a persistent store. | +| StoredSysInfo | Returns stored system info values, eg. from a persistent store. | + +If you are building a persistent storage hook, see the existing persistent hooks for inspiration and patterns. If you are building an auth hook, you will need `OnACLCheck` and `OnConnectAuthenticate`. + + +### Direct Publish +To publish basic message to a topic from within the embedding application, you can use the `server.Publish(topic string, payload []byte, retain bool, qos byte) error` method. + +```go +err := server.Publish("direct/publish", []byte("packet scheduled message"), false, 0) +``` +> The Qos byte in this case is only used to set the upper qos limit available for subscribers, as per MQTT v5 spec. + +### Packet Injection +If you want more control, or want to set specific MQTT v5 properties and other values you can create your own publish packets from a client of your choice. This method allows you to inject MQTT packets (no just publish) directly into the runtime as though they had been received by a specific client. Most of the time you'll want to use the special client flag `inline=true`, as it has unique privileges: it bypasses all ACL and topic validation checks, meaning it can even publish to $SYS topics. + +Packet injection can be used for any MQTT packet, including ping requests, subscriptions, etc. And because the Clients structs and methods are now exported, you can even inject packets on behalf of a connected client (if you have a very custom requirements). + +```go +cl := server.NewClient(nil, "local", "inline", true) +server.InjectPacket(cl, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + }, + TopicName: "direct/publish", + Payload: []byte("scheduled message"), +}) +``` + +> MQTT packets still need to be correctly formed, so refer our [the test packets catalogue](packets/tpackets.go) and [MQTTv5 Specification](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html) for inspiration. + +See the [hooks example](examples/hooks/main.go) to see this feature in action. + + + +### Testing +#### Unit Tests +Mochi MQTT tests over a thousand scenarios with thoughtfully hand written unit tests to ensure each function does exactly what we expect. You can run the tests using go: +``` +go run --cover ./... +``` + +#### Paho Interoperability Test +You can check the broker against the [Paho Interoperability Test](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability) by starting the broker using `examples/paho/main.go`, and then running the mqtt v5 and v3 tests with `python3 client_test5.py` from the _interoperability_ folder. + +> Note that there are currently a number of outstanding issues regarding false negatives in the paho suite, and as such, certain compatibility modes are enabled in the `paho/main.go` example. + + +## Performance Benchmarks +Mochi MQTT performance is comparable with popular brokers such as Mosquitto, EMQX, and others. + +Performance benchmarks were tested using [MQTT-Stresser](https://github.com/inovex/mqtt-stresser) on a Apple Macbook Air M2, using `cmd/main.go` default settings. Taking into account bursts of high and low throughput, the median scores are the most useful. Higher is better. + +> The values presented in the benchmark are not representative of true messages per second throughput. They rely on an unusual calculation by mqtt-stresser, but are usable as they are consistent across all brokers. +> Benchmarks are provided as a general performance expectation guideline only. Comparisons are performed using out-of-the-box default configurations. + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=2 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 124,772 | 125,456 | 124,614 | 314,461 | 313,186 | 311,910 | +| [Mosquitto v2.0.15](https://github.com/eclipse/mosquitto) | 155,920 | 155,919 | 155,918 | 185,485 | 185,097 | 184,709 | +| [EMQX v5.0.11](https://github.com/emqx/emqx) | 156,945 | 156,257 | 155,568 | 17,918 | 17,783 | 17,649 | +| [Rumqtt v0.21.0](https://github.com/bytebeamio/rumqtt) | 112,208 | 108,480 | 104,753 | 135,784 | 126,446 | 117,108 | + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=10 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 41,825 | 31,663| 23,008 | 144,058 | 65,903 | 37,618 | +| Mosquitto v2.0.15 | 42,729 | 38,633 | 29,879 | 23,241 | 19,714 | 18,806 | +| EMQX v5.0.11 | 21,553 | 17,418 | 14,356 | 4,257 | 3,980 | 3,756 | +| Rumqtt v0.21.0 | 42,213 | 23,153 | 20,814 | 49,465 | 36,626 | 19,283 | + +Million Message Challenge (hit the server with 1 million messages immediately): + +`mqtt-stresser -broker tcp://localhost:1883 -num-clients=100 -num-messages=10000` +| Broker | publish fastest | median | slowest | receive fastest | median | slowest | +| -- | -- | -- | -- | -- | -- | -- | +| Mochi v2.2.10 | 13,532 | 4,425 | 2,344 | 52,120 | 7,274 | 2,701 | +| Mosquitto v2.0.15 | 3,826 | 3,395 | 3,032 | 1,200 | 1,150 | 1,118 | +| EMQX v5.0.11 | 4,086 | 2,432 | 2,274 | 434 | 333 | 311 | +| Rumqtt v0.21.0 | 78,972 | 5,047 | 3,804 | 4,286 | 3,249 | 2,027 | + +> Not sure what's going on with EMQX here, perhaps the docker out-of-the-box settings are not optimal, so take it with a pinch of salt as we know for a fact it's a solid piece of software. + +## Contribution Guidelines +Contributions and feedback are both welcomed and encouraged! [Open an issue](https://github.com/mochi-mqtt/server/issues) to report a bug, ask a question, or make a feature request. If you open a pull request, please try to follow the following guidelines: +- Try to maintain test coverage where reasonably possible. +- Clearly state what the PR does and why. +- Remember to add your SPDX FileContributor tag to files where you have made a meaningful contribution. + +[SPDX Annotations](https://spdx.dev) are used to clearly indicate the license, copyright, and contributions of each file in a machine-readable format. If you are adding a new file to the repository, please ensure it has the following SPDX header: +```go +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt +// SPDX-FileContributor: Your name or alias + +package name +``` + +Please ensure to add a new `SPDX-FileContributor` line for each contributor to the file. Refer to other files for examples. Please remember to do this, your contributions to this project are valuable and appreciated - it's important to receive credit! + +## Stargazers over time 🥰 +[![Stargazers over time](https://starchart.cc/mochi-mqtt/server.svg)](https://starchart.cc/mochi-mqtt/server) +Are you using Mochi MQTT in a project? [Let us know!](https://github.com/mochi-mqtt/server/issues) + diff --git a/vendor/github.com/mochi-mqtt/server/v2/clients.go b/vendor/github.com/mochi-mqtt/server/v2/clients.go new file mode 100644 index 00000000..75fe8685 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/clients.go @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/rs/xid" + + "github.com/mochi-mqtt/server/v2/packets" +) + +const ( + defaultKeepalive uint16 = 10 // the default connection keepalive value in seconds + defaultClientProtocolVersion byte = 4 // the default mqtt protocol version of connecting clients (if somehow unspecified). +) + +// ReadFn is the function signature for the function used for reading and processing new packets. +type ReadFn func(*Client, packets.Packet) error + +// Clients contains a map of the clients known by the broker. +type Clients struct { + internal map[string]*Client // clients known by the broker, keyed on client id. + sync.RWMutex +} + +// NewClients returns an instance of Clients. +func NewClients() *Clients { + return &Clients{ + internal: make(map[string]*Client), + } +} + +// Add adds a new client to the clients map, keyed on client id. +func (cl *Clients) Add(val *Client) { + cl.Lock() + defer cl.Unlock() + cl.internal[val.ID] = val +} + +// GetAll returns all the clients. +func (cl *Clients) GetAll() map[string]*Client { + cl.RLock() + defer cl.RUnlock() + m := map[string]*Client{} + for k, v := range cl.internal { + m[k] = v + } + return m +} + +// Get returns the value of a client if it exists. +func (cl *Clients) Get(id string) (*Client, bool) { + cl.RLock() + defer cl.RUnlock() + val, ok := cl.internal[id] + return val, ok +} + +// Len returns the length of the clients map. +func (cl *Clients) Len() int { + cl.RLock() + defer cl.RUnlock() + val := len(cl.internal) + return val +} + +// Delete removes a client from the internal map. +func (cl *Clients) Delete(id string) { + cl.Lock() + defer cl.Unlock() + delete(cl.internal, id) +} + +// GetByListener returns clients matching a listener id. +func (cl *Clients) GetByListener(id string) []*Client { + cl.RLock() + defer cl.RUnlock() + clients := make([]*Client, 0, cl.Len()) + for _, client := range cl.internal { + if client.Net.Listener == id && !client.Closed() { + clients = append(clients, client) + } + } + return clients +} + +// Client contains information about a client known by the broker. +type Client struct { + Properties ClientProperties // client properties + State ClientState // the operational state of the client. + Net ClientConnection // network connection state of the clinet + ID string // the client id. + ops *ops // ops provides a reference to server ops. + sync.RWMutex // mutex +} + +// ClientConnection contains the connection transport and metadata for the client. +type ClientConnection struct { + Conn net.Conn // the net.Conn used to establish the connection + bconn *bufio.ReadWriter // a buffered net.Conn for reading packets + Remote string // the remote address of the client + Listener string // listener id of the client + Inline bool // client is an inline programmetic client +} + +// ClientProperties contains the properties which define the client behaviour. +type ClientProperties struct { + Props packets.Properties + Will Will + Username []byte + ProtocolVersion byte + Clean bool +} + +// Will contains the last will and testament details for a client connection. +type Will struct { + Payload []byte // - + User []packets.UserProperty // - + TopicName string // - + Flag uint32 // 0,1 + WillDelayInterval uint32 // - + Qos byte // - + Retain bool // - +} + +// State tracks the state of the client. +type ClientState struct { + TopicAliases TopicAliases // a map of topic aliases + stopCause atomic.Value // reason for stopping + Inflight *Inflight // a map of in-flight qos messages + Subscriptions *Subscriptions // a map of the subscription filters a client maintains + disconnected int64 // the time the client disconnected in unix time, for calculating expiry + outbound chan *packets.Packet // queue for pending outbound packets + endOnce sync.Once // only end once + isTakenOver uint32 // used to identify orphaned clients + packetID uint32 // the current highest packetID + open context.Context // indicate that the client is open for packet exchange + cancelOpen context.CancelFunc // cancel function for open context + outboundQty int32 // number of messages currently in the outbound queue + Keepalive uint16 // the number of seconds the connection can wait + ServerKeepalive bool // keepalive was set by the server +} + +// newClient returns a new instance of Client. This is almost exclusively used by Server +// for creating new clients, but it lives here because it's not dependent. +func newClient(c net.Conn, o *ops) *Client { + ctx, cancel := context.WithCancel(context.Background()) + cl := &Client{ + State: ClientState{ + Inflight: NewInflights(), + Subscriptions: NewSubscriptions(), + TopicAliases: NewTopicAliases(o.options.Capabilities.TopicAliasMaximum), + open: ctx, + cancelOpen: cancel, + Keepalive: defaultKeepalive, + outbound: make(chan *packets.Packet, o.options.Capabilities.MaximumClientWritesPending), + }, + Properties: ClientProperties{ + ProtocolVersion: defaultClientProtocolVersion, // default protocol version + }, + ops: o, + } + + if c != nil { + cl.Net = ClientConnection{ + Conn: c, + bconn: bufio.NewReadWriter( + bufio.NewReaderSize(c, o.options.ClientNetReadBufferSize), + bufio.NewWriterSize(c, o.options.ClientNetWriteBufferSize), + ), + Remote: c.RemoteAddr().String(), + } + } + + return cl +} + +// WriteLoop ranges over pending outbound messages and writes them to the client connection. +func (cl *Client) WriteLoop() { + for { + select { + case pk := <-cl.State.outbound: + if err := cl.WritePacket(*pk); err != nil { + cl.ops.log.Debug().Err(err).Str("client", cl.ID).Interface("packet", pk).Msg("failed publishing packet") + } + atomic.AddInt32(&cl.State.outboundQty, -1) + case <-cl.State.open.Done(): + return + } + } +} + +// ParseConnect parses the connect parameters and properties for a client. +func (cl *Client) ParseConnect(lid string, pk packets.Packet) { + cl.Net.Listener = lid + + cl.Properties.ProtocolVersion = pk.ProtocolVersion + cl.Properties.Username = pk.Connect.Username + cl.Properties.Clean = pk.Connect.Clean + cl.Properties.Props = pk.Properties.Copy(false) + + cl.State.Keepalive = pk.Connect.Keepalive // [MQTT-3.2.2-22] + cl.State.Inflight.ResetReceiveQuota(int32(cl.ops.options.Capabilities.ReceiveMaximum)) // server receive max per client + cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum)) // client receive max + cl.State.TopicAliases.Outbound = NewOutboundTopicAliases(cl.Properties.Props.TopicAliasMaximum) + + cl.ID = pk.Connect.ClientIdentifier + if cl.ID == "" { + cl.ID = xid.New().String() // [MQTT-3.1.3-6] [MQTT-3.1.3-7] + cl.Properties.Props.AssignedClientID = cl.ID + } + + if pk.Connect.WillFlag { + cl.Properties.Will = Will{ + Qos: pk.Connect.WillQos, + Retain: pk.Connect.WillRetain, + Payload: pk.Connect.WillPayload, + TopicName: pk.Connect.WillTopic, + WillDelayInterval: pk.Connect.WillProperties.WillDelayInterval, + User: pk.Connect.WillProperties.User, + } + if pk.Properties.SessionExpiryIntervalFlag && + pk.Properties.SessionExpiryInterval < pk.Connect.WillProperties.WillDelayInterval { + cl.Properties.Will.WillDelayInterval = pk.Properties.SessionExpiryInterval + } + if pk.Connect.WillFlag { + cl.Properties.Will.Flag = 1 // atomic for checking + } + } +} + +// refreshDeadline refreshes the read/write deadline for the net.Conn connection. +func (cl *Client) refreshDeadline(keepalive uint16) { + var expiry time.Time // nil time can be used to disable deadline if keepalive = 0 + if keepalive > 0 { + expiry = time.Now().Add(time.Duration(keepalive+(keepalive/2)) * time.Second) // [MQTT-3.1.2-22] + } + + if cl.Net.Conn != nil { + _ = cl.Net.Conn.SetDeadline(expiry) // [MQTT-3.1.2-22] + } +} + +// NextPacketID returns the next available (unused) packet id for the client. +// If no unused packet ids are available, an error is returned and the client +// should be disconnected. +func (cl *Client) NextPacketID() (i uint32, err error) { + cl.Lock() + defer cl.Unlock() + + i = atomic.LoadUint32(&cl.State.packetID) + started := i + overflowed := false + for { + if overflowed && i == started { + return 0, packets.ErrQuotaExceeded + } + + if i >= cl.ops.options.Capabilities.maximumPacketID { + overflowed = true + i = 0 + continue + } + + i++ + + if _, ok := cl.State.Inflight.Get(uint16(i)); !ok { + atomic.StoreUint32(&cl.State.packetID, i) + return i, nil + } + } +} + +// ResendInflightMessages attempts to resend any pending inflight messages to connected clients. +func (cl *Client) ResendInflightMessages(force bool) error { + if cl.State.Inflight.Len() == 0 { + return nil + } + + for _, tk := range cl.State.Inflight.GetAll(false) { + if tk.FixedHeader.Type == packets.Publish { + tk.FixedHeader.Dup = true // [MQTT-3.3.1-1] [MQTT-3.3.1-3] + } + + cl.ops.hooks.OnQosPublish(cl, tk, tk.Created, 0) + err := cl.WritePacket(tk) + if err != nil { + return err + } + + if tk.FixedHeader.Type == packets.Puback || tk.FixedHeader.Type == packets.Pubcomp { + if ok := cl.State.Inflight.Delete(tk.PacketID); ok { + cl.ops.hooks.OnQosComplete(cl, tk) + atomic.AddInt64(&cl.ops.info.Inflight, -1) + } + } + } + + return nil +} + +// ClearInflights deletes all inflight messages for the client, eg. for a disconnected user with a clean session. +func (cl *Client) ClearInflights(now, maximumExpiry int64) []uint16 { + deleted := []uint16{} + for _, tk := range cl.State.Inflight.GetAll(false) { + if (tk.Expiry > 0 && tk.Expiry < now) || tk.Created+maximumExpiry < now { + if ok := cl.State.Inflight.Delete(tk.PacketID); ok { + cl.ops.hooks.OnQosDropped(cl, tk) + atomic.AddInt64(&cl.ops.info.Inflight, -1) + deleted = append(deleted, tk.PacketID) + } + } + } + + return deleted +} + +// Read reads incoming packets from the connected client and transforms them into +// packets to be handled by the packetHandler. +func (cl *Client) Read(packetHandler ReadFn) error { + var err error + + for { + if cl.Closed() { + return nil + } + + cl.refreshDeadline(cl.State.Keepalive) + fh := new(packets.FixedHeader) + err = cl.ReadFixedHeader(fh) + if err != nil { + return err + } + + pk, err := cl.ReadPacket(fh) + if err != nil { + return err + } + + err = packetHandler(cl, pk) // Process inbound packet. + if err != nil { + return err + } + } +} + +// Stop instructs the client to shut down all processing goroutines and disconnect. +func (cl *Client) Stop(err error) { + cl.State.endOnce.Do(func() { + + if cl.Net.Conn != nil { + _ = cl.Net.Conn.Close() // omit close error + } + + if err != nil { + cl.State.stopCause.Store(err) + } + + if cl.State.cancelOpen != nil { + cl.State.cancelOpen() + } + + atomic.StoreInt64(&cl.State.disconnected, time.Now().Unix()) + }) +} + +// StopCause returns the reason the client connection was stopped, if any. +func (cl *Client) StopCause() error { + if cl.State.stopCause.Load() == nil { + return nil + } + return cl.State.stopCause.Load().(error) +} + +// Closed returns true if client connection is closed. +func (cl *Client) Closed() bool { + return cl.State.open == nil || cl.State.open.Err() != nil +} + +// ReadFixedHeader reads in the values of the next packet's fixed header. +func (cl *Client) ReadFixedHeader(fh *packets.FixedHeader) error { + if cl.Net.bconn == nil { + return ErrConnectionClosed + } + + b, err := cl.Net.bconn.ReadByte() + if err != nil { + return err + } + + err = fh.Decode(b) + if err != nil { + return err + } + + var bu int + fh.Remaining, bu, err = packets.DecodeLength(cl.Net.bconn) + if err != nil { + return err + } + + if cl.ops.options.Capabilities.MaximumPacketSize > 0 && uint32(fh.Remaining+1) > cl.ops.options.Capabilities.MaximumPacketSize { + return packets.ErrPacketTooLarge // [MQTT-3.2.2-15] + } + + atomic.AddInt64(&cl.ops.info.BytesReceived, int64(bu+1)) + return nil +} + +// ReadPacket reads the remaining buffer into an MQTT packet. +func (cl *Client) ReadPacket(fh *packets.FixedHeader) (pk packets.Packet, err error) { + atomic.AddInt64(&cl.ops.info.PacketsReceived, 1) + + pk.ProtocolVersion = cl.Properties.ProtocolVersion // inherit client protocol version for decoding + pk.FixedHeader = *fh + p := make([]byte, pk.FixedHeader.Remaining) + n, err := io.ReadFull(cl.Net.bconn, p) + if err != nil { + return pk, err + } + + atomic.AddInt64(&cl.ops.info.BytesReceived, int64(n)) + + // Decode the remaining packet values using a fresh copy of the bytes, + // otherwise the next packet will change the data of this one. + px := append([]byte{}, p[:]...) + switch pk.FixedHeader.Type { + case packets.Connect: + err = pk.ConnectDecode(px) + case packets.Disconnect: + err = pk.DisconnectDecode(px) + case packets.Connack: + err = pk.ConnackDecode(px) + case packets.Publish: + err = pk.PublishDecode(px) + if err == nil { + atomic.AddInt64(&cl.ops.info.MessagesReceived, 1) + } + case packets.Puback: + err = pk.PubackDecode(px) + case packets.Pubrec: + err = pk.PubrecDecode(px) + case packets.Pubrel: + err = pk.PubrelDecode(px) + case packets.Pubcomp: + err = pk.PubcompDecode(px) + case packets.Subscribe: + err = pk.SubscribeDecode(px) + case packets.Suback: + err = pk.SubackDecode(px) + case packets.Unsubscribe: + err = pk.UnsubscribeDecode(px) + case packets.Unsuback: + err = pk.UnsubackDecode(px) + case packets.Pingreq: + case packets.Pingresp: + case packets.Auth: + err = pk.AuthDecode(px) + default: + err = fmt.Errorf("invalid packet type; %v", pk.FixedHeader.Type) + } + + if err != nil { + return pk, err + } + + pk, err = cl.ops.hooks.OnPacketRead(cl, pk) + return +} + +// WritePacket encodes and writes a packet to the client. +func (cl *Client) WritePacket(pk packets.Packet) error { + if cl.Closed() { + return ErrConnectionClosed + } + + if cl.Net.Conn == nil { + return nil + } + + if pk.Expiry > 0 { + pk.Properties.MessageExpiryInterval = uint32(pk.Expiry - time.Now().Unix()) // [MQTT-3.3.2-6] + } + + pk.ProtocolVersion = cl.Properties.ProtocolVersion + if pk.Mods.MaxSize == 0 { // NB we use this statement to embed client packet sizes in tests + pk.Mods.MaxSize = cl.Properties.Props.MaximumPacketSize + } + + if cl.Properties.Props.RequestProblemInfoFlag && cl.Properties.Props.RequestProblemInfo == 0x0 { + pk.Mods.DisallowProblemInfo = true // [MQTT-3.1.2-29] strict, no problem info on any packet if set + } + + if pk.FixedHeader.Type != packets.Connack || cl.Properties.Props.RequestResponseInfo == 0x1 || cl.ops.options.Capabilities.Compatibilities.AlwaysReturnResponseInfo { + pk.Mods.AllowResponseInfo = true // [MQTT-3.1.2-28] we need to know which properties we can encode + } + + pk = cl.ops.hooks.OnPacketEncode(cl, pk) + + var err error + buf := new(bytes.Buffer) + switch pk.FixedHeader.Type { + case packets.Connect: + err = pk.ConnectEncode(buf) + case packets.Connack: + err = pk.ConnackEncode(buf) + case packets.Publish: + err = pk.PublishEncode(buf) + case packets.Puback: + err = pk.PubackEncode(buf) + case packets.Pubrec: + err = pk.PubrecEncode(buf) + case packets.Pubrel: + err = pk.PubrelEncode(buf) + case packets.Pubcomp: + err = pk.PubcompEncode(buf) + case packets.Subscribe: + err = pk.SubscribeEncode(buf) + case packets.Suback: + err = pk.SubackEncode(buf) + case packets.Unsubscribe: + err = pk.UnsubscribeEncode(buf) + case packets.Unsuback: + err = pk.UnsubackEncode(buf) + case packets.Pingreq: + err = pk.PingreqEncode(buf) + case packets.Pingresp: + err = pk.PingrespEncode(buf) + case packets.Disconnect: + err = pk.DisconnectEncode(buf) + case packets.Auth: + err = pk.AuthEncode(buf) + default: + err = fmt.Errorf("%w: %v", packets.ErrNoValidPacketAvailable, pk.FixedHeader.Type) + } + if err != nil { + return err + } + + if pk.Mods.MaxSize > 0 && uint32(buf.Len()) > pk.Mods.MaxSize { + return packets.ErrPacketTooLarge // [MQTT-3.1.2-24] [MQTT-3.1.2-25] + } + + nb := net.Buffers{buf.Bytes()} + n, err := func() (int64, error) { + cl.Lock() + defer cl.Unlock() + return nb.WriteTo(cl.Net.Conn) + }() + if err != nil { + return err + } + + atomic.AddInt64(&cl.ops.info.BytesSent, n) + atomic.AddInt64(&cl.ops.info.PacketsSent, 1) + if pk.FixedHeader.Type == packets.Publish { + atomic.AddInt64(&cl.ops.info.MessagesSent, 1) + } + + cl.ops.hooks.OnPacketSent(cl, pk, buf.Bytes()) + + return err +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks.go b/vendor/github.com/mochi-mqtt/server/v2/hooks.go new file mode 100644 index 00000000..1af3c6df --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks.go @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co, thedevop + +package mqtt + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/hooks/storage" + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" + + "github.com/rs/zerolog" +) + +const ( + SetOptions byte = iota + OnSysInfoTick + OnStarted + OnStopped + OnConnectAuthenticate + OnACLCheck + OnConnect + OnSessionEstablish + OnSessionEstablished + OnDisconnect + OnAuthPacket + OnPacketRead + OnPacketEncode + OnPacketSent + OnPacketProcessed + OnSubscribe + OnSubscribed + OnSelectSubscribers + OnUnsubscribe + OnUnsubscribed + OnPublish + OnPublished + OnPublishDropped + OnRetainMessage + OnRetainPublished + OnQosPublish + OnQosComplete + OnQosDropped + OnPacketIDExhausted + OnWill + OnWillSent + OnClientExpired + OnRetainedExpired + StoredClients + StoredSubscriptions + StoredInflightMessages + StoredRetainedMessages + StoredSysInfo +) + +var ( + // ErrInvalidConfigType indicates a different Type of config value was expected to what was received. + ErrInvalidConfigType = errors.New("invalid config type provided") +) + +// Hook provides an interface of handlers for different events which occur +// during the lifecycle of the broker. +type Hook interface { + ID() string + Provides(b byte) bool + Init(config any) error + Stop() error + SetOpts(l *zerolog.Logger, o *HookOptions) + OnStarted() + OnStopped() + OnConnectAuthenticate(cl *Client, pk packets.Packet) bool + OnACLCheck(cl *Client, topic string, write bool) bool + OnSysInfoTick(*system.Info) + OnConnect(cl *Client, pk packets.Packet) error + OnSessionEstablish(cl *Client, pk packets.Packet) + OnSessionEstablished(cl *Client, pk packets.Packet) + OnDisconnect(cl *Client, err error, expire bool) + OnAuthPacket(cl *Client, pk packets.Packet) (packets.Packet, error) + OnPacketRead(cl *Client, pk packets.Packet) (packets.Packet, error) // triggers when a new packet is received by a client, but before packet validation + OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet // modify a packet before it is byte-encoded and written to the client + OnPacketSent(cl *Client, pk packets.Packet, b []byte) // triggers when packet bytes have been written to the client + OnPacketProcessed(cl *Client, pk packets.Packet, err error) // triggers after a packet from the client been processed (handled) + OnSubscribe(cl *Client, pk packets.Packet) packets.Packet + OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) + OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers + OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet + OnUnsubscribed(cl *Client, pk packets.Packet) + OnPublish(cl *Client, pk packets.Packet) (packets.Packet, error) + OnPublished(cl *Client, pk packets.Packet) + OnPublishDropped(cl *Client, pk packets.Packet) + OnRetainMessage(cl *Client, pk packets.Packet, r int64) + OnRetainPublished(cl *Client, pk packets.Packet) + OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) + OnQosComplete(cl *Client, pk packets.Packet) + OnQosDropped(cl *Client, pk packets.Packet) + OnPacketIDExhausted(cl *Client, pk packets.Packet) + OnWill(cl *Client, will Will) (Will, error) + OnWillSent(cl *Client, pk packets.Packet) + OnClientExpired(cl *Client) + OnRetainedExpired(filter string) + StoredClients() ([]storage.Client, error) + StoredSubscriptions() ([]storage.Subscription, error) + StoredInflightMessages() ([]storage.Message, error) + StoredRetainedMessages() ([]storage.Message, error) + StoredSysInfo() (storage.SystemInfo, error) +} + +// HookOptions contains values which are inherited from the server on initialisation. +type HookOptions struct { + Capabilities *Capabilities +} + +// Hooks is a slice of Hook interfaces to be called in sequence. +type Hooks struct { + Log *zerolog.Logger // a logger for the hook (from the server) + internal atomic.Value // a slice of []Hook + wg sync.WaitGroup // a waitgroup for syncing hook shutdown + qty int64 // the number of hooks in use + sync.Mutex // a mutex for locking when adding hooks +} + +// Len returns the number of hooks added. +func (h *Hooks) Len() int64 { + return atomic.LoadInt64(&h.qty) +} + +// Provides returns true if any one hook provides any of the requested hook methods. +func (h *Hooks) Provides(b ...byte) bool { + for _, hook := range h.GetAll() { + for _, hb := range b { + if hook.Provides(hb) { + return true + } + } + } + + return false +} + +// Add adds and initializes a new hook. +func (h *Hooks) Add(hook Hook, config any) error { + h.Lock() + defer h.Unlock() + + err := hook.Init(config) + if err != nil { + return fmt.Errorf("failed initialising %s hook: %w", hook.ID(), err) + } + + i, ok := h.internal.Load().([]Hook) + if !ok { + i = []Hook{} + } + + i = append(i, hook) + h.internal.Store(i) + atomic.AddInt64(&h.qty, 1) + h.wg.Add(1) + + return nil +} + +// GetAll returns a slice of all the hooks. +func (h *Hooks) GetAll() []Hook { + i, ok := h.internal.Load().([]Hook) + if !ok { + return []Hook{} + } + + return i +} + +// Stop indicates all attached hooks to gracefully end. +func (h *Hooks) Stop() { + go func() { + for _, hook := range h.GetAll() { + h.Log.Info().Str("hook", hook.ID()).Msg("stopping hook") + if err := hook.Stop(); err != nil { + h.Log.Debug().Err(err).Str("hook", hook.ID()).Msg("problem stopping hook") + } + + h.wg.Done() + } + }() + + h.wg.Wait() +} + +// OnSysInfoTick is called when the $SYS topic values are published out. +func (h *Hooks) OnSysInfoTick(sys *system.Info) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSysInfoTick) { + hook.OnSysInfoTick(sys) + } + } +} + +// OnStarted is called when the server has successfully started. +func (h *Hooks) OnStarted() { + for _, hook := range h.GetAll() { + if hook.Provides(OnStarted) { + hook.OnStarted() + } + } +} + +// OnStopped is called when the server has successfully stopped. +func (h *Hooks) OnStopped() { + for _, hook := range h.GetAll() { + if hook.Provides(OnStopped) { + hook.OnStopped() + } + } +} + +// OnConnect is called when a new client connects, and may return a packets.Code as an error to halt the connection. +func (h *Hooks) OnConnect(cl *Client, pk packets.Packet) error { + for _, hook := range h.GetAll() { + if hook.Provides(OnConnect) { + err := hook.OnConnect(cl, pk) + if err != nil { + return err + } + } + } + return nil +} + +// OnSessionEstablish is called right after a new client connects and authenticates and right before +// the session is established and CONNACK is sent. +func (h *Hooks) OnSessionEstablish(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSessionEstablish) { + hook.OnSessionEstablish(cl, pk) + } + } +} + +// OnSessionEstablished is called when a new client establishes a session (after OnConnect). +func (h *Hooks) OnSessionEstablished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSessionEstablished) { + hook.OnSessionEstablished(cl, pk) + } + } +} + +// OnDisconnect is called when a client is disconnected for any reason. +func (h *Hooks) OnDisconnect(cl *Client, err error, expire bool) { + for _, hook := range h.GetAll() { + if hook.Provides(OnDisconnect) { + hook.OnDisconnect(cl, err, expire) + } + } +} + +// OnPacketRead is called when a packet is received from a client. +func (h *Hooks) OnPacketRead(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketRead) { + npk, err := hook.OnPacketRead(cl, pkx) + if err != nil && errors.Is(err, packets.ErrRejectPacket) { + h.Log.Debug().Err(err).Str("hook", hook.ID()).Interface("packet", pkx).Msg("packet rejected") + return pk, err + } else if err != nil { + continue + } + + pkx = npk + } + } + + return +} + +// OnAuthPacket is called when an auth packet is received. It is intended to allow developers +// to create their own auth packet handling mechanisms. +func (h *Hooks) OnAuthPacket(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnAuthPacket) { + npk, err := hook.OnAuthPacket(cl, pkx) + if err != nil { + return pk, err + } + + pkx = npk + } + } + + return +} + +// OnPacketEncode is called immediately before a packet is encoded to be sent to a client. +func (h *Hooks) OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketEncode) { + pk = hook.OnPacketEncode(cl, pk) + } + } + + return pk +} + +// OnPacketProcessed is called when a packet has been received and successfully handled by the broker. +func (h *Hooks) OnPacketProcessed(cl *Client, pk packets.Packet, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketProcessed) { + hook.OnPacketProcessed(cl, pk, err) + } + } +} + +// OnPacketSent is called when a packet has been sent to a client. It takes a bytes parameter +// containing the bytes sent. +func (h *Hooks) OnPacketSent(cl *Client, pk packets.Packet, b []byte) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketSent) { + hook.OnPacketSent(cl, pk, b) + } + } +} + +// OnSubscribe is called when a client subscribes to one or more filters. This method +// differs from OnSubscribed in that it allows you to modify the subscription values +// before the packet is processed. The return values of the hook methods are passed-through +// in the order the hooks were attached. +func (h *Hooks) OnSubscribe(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnSubscribe) { + pk = hook.OnSubscribe(cl, pk) + } + } + return pk +} + +// OnSubscribed is called when a client subscribes to one or more filters. +func (h *Hooks) OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) { + for _, hook := range h.GetAll() { + if hook.Provides(OnSubscribed) { + hook.OnSubscribed(cl, pk, reasonCodes) + } + } +} + +// OnSelectSubscribers is called when subscribers have been collected for a topic, but before +// shared subscription subscribers have been selected. This hook can be used to programmatically +// remove or add clients to a publish to subscribers process, or to select the subscriber for a shared +// group in a custom manner (such as based on client id, ip, etc). +func (h *Hooks) OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers { + for _, hook := range h.GetAll() { + if hook.Provides(OnSelectSubscribers) { + subs = hook.OnSelectSubscribers(subs, pk) + } + } + return subs +} + +// OnUnsubscribe is called when a client unsubscribes from one or more filters. This method +// differs from OnUnsubscribed in that it allows you to modify the unsubscription values +// before the packet is processed. The return values of the hook methods are passed-through +// in the order the hooks were attached. +func (h *Hooks) OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet { + for _, hook := range h.GetAll() { + if hook.Provides(OnUnsubscribe) { + pk = hook.OnUnsubscribe(cl, pk) + } + } + return pk +} + +// OnUnsubscribed is called when a client unsubscribes from one or more filters. +func (h *Hooks) OnUnsubscribed(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnUnsubscribed) { + hook.OnUnsubscribed(cl, pk) + } + } +} + +// OnPublish is called when a client publishes a message. This method differs from OnPublished +// in that it allows you to modify you to modify the incoming packet before it is processed. +// The return values of the hook methods are passed-through in the order the hooks were attached. +func (h *Hooks) OnPublish(cl *Client, pk packets.Packet) (pkx packets.Packet, err error) { + pkx = pk + for _, hook := range h.GetAll() { + if hook.Provides(OnPublish) { + npk, err := hook.OnPublish(cl, pkx) + if err != nil { + if errors.Is(err, packets.ErrRejectPacket) { + h.Log.Debug().Err(err).Str("hook", hook.ID()).Interface("packet", pkx).Msg("publish packet rejected") + return pk, err + } + h.Log.Error().Err(err).Str("hook", hook.ID()).Interface("packet", pkx).Msg("publish packet error") + return pk, err + } + pkx = npk + } + } + + return +} + +// OnPublished is called when a client has published a message to subscribers. +func (h *Hooks) OnPublished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPublished) { + hook.OnPublished(cl, pk) + } + } +} + +// OnPublishDropped is called when a message to a client was dropped instead of delivered +// such as when a client is too slow to respond. +func (h *Hooks) OnPublishDropped(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPublishDropped) { + hook.OnPublishDropped(cl, pk) + } + } +} + +// OnRetainMessage is called then a published message is retained. +func (h *Hooks) OnRetainMessage(cl *Client, pk packets.Packet, r int64) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainMessage) { + hook.OnRetainMessage(cl, pk, r) + } + } +} + +// OnRetainPublished is called when a retained message is published. +func (h *Hooks) OnRetainPublished(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainPublished) { + hook.OnRetainPublished(cl, pk) + } + } +} + +// OnQosPublish is called when a publish packet with Qos >= 1 is issued to a subscriber. +// In other words, this method is called when a new inflight message is created or resent. +// It is typically used to store a new inflight message. +func (h *Hooks) OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosPublish) { + hook.OnQosPublish(cl, pk, sent, resends) + } + } +} + +// OnQosComplete is called when the Qos flow for a message has been completed. +// In other words, when an inflight message is resolved. +// It is typically used to delete an inflight message from a store. +func (h *Hooks) OnQosComplete(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosComplete) { + hook.OnQosComplete(cl, pk) + } + } +} + +// OnQosDropped is called the Qos flow for a message expires. In other words, when +// an inflight message expires or is abandoned. It is typically used to delete an +// inflight message from a store. +func (h *Hooks) OnQosDropped(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnQosDropped) { + hook.OnQosDropped(cl, pk) + } + } +} + +// OnPacketIDExhausted is called when the client runs out of unused packet ids to +// assign to a packet. +func (h *Hooks) OnPacketIDExhausted(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnPacketIDExhausted) { + hook.OnPacketIDExhausted(cl, pk) + } + } +} + +// OnWill is called when a client disconnects and publishes an LWT message. This method +// differs from OnWillSent in that it allows you to modify the LWT message before it is +// published. The return values of the hook methods are passed-through in the order +// the hooks were attached. +func (h *Hooks) OnWill(cl *Client, will Will) Will { + for _, hook := range h.GetAll() { + if hook.Provides(OnWill) { + mlwt, err := hook.OnWill(cl, will) + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Interface("will", will).Msg("parse will error") + continue + } + will = mlwt + } + } + + return will +} + +// OnWillSent is called when an LWT message has been issued from a disconnecting client. +func (h *Hooks) OnWillSent(cl *Client, pk packets.Packet) { + for _, hook := range h.GetAll() { + if hook.Provides(OnWillSent) { + hook.OnWillSent(cl, pk) + } + } +} + +// OnClientExpired is called when a client session has expired and should be deleted. +func (h *Hooks) OnClientExpired(cl *Client) { + for _, hook := range h.GetAll() { + if hook.Provides(OnClientExpired) { + hook.OnClientExpired(cl) + } + } +} + +// OnRetainedExpired is called when a retained message has expired and should be deleted. +func (h *Hooks) OnRetainedExpired(filter string) { + for _, hook := range h.GetAll() { + if hook.Provides(OnRetainedExpired) { + hook.OnRetainedExpired(filter) + } + } +} + +// StoredClients returns all clients, e.g. from a persistent store, is used to +// populate the server clients list before start. +func (h *Hooks) StoredClients() (v []storage.Client, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredClients) { + v, err := hook.StoredClients() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load clients") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredSubscriptions returns all subcriptions, e.g. from a persistent store, and is +// used to populate the server subscriptions list before start. +func (h *Hooks) StoredSubscriptions() (v []storage.Subscription, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredSubscriptions) { + v, err := hook.StoredSubscriptions() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load subscriptions") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredInflightMessages returns all inflight messages, e.g. from a persistent store, +// and is used to populate the restored clients with inflight messages before start. +func (h *Hooks) StoredInflightMessages() (v []storage.Message, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredInflightMessages) { + v, err := hook.StoredInflightMessages() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load inflight messages") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredRetainedMessages returns all retained messages, e.g. from a persistent store, +// and is used to populate the server topics with retained messages before start. +func (h *Hooks) StoredRetainedMessages() (v []storage.Message, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredRetainedMessages) { + v, err := hook.StoredRetainedMessages() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load retained messages") + return v, err + } + + if len(v) > 0 { + return v, nil + } + } + } + + return +} + +// StoredSysInfo returns a set of system info values. +func (h *Hooks) StoredSysInfo() (v storage.SystemInfo, err error) { + for _, hook := range h.GetAll() { + if hook.Provides(StoredSysInfo) { + v, err := hook.StoredSysInfo() + if err != nil { + h.Log.Error().Err(err).Str("hook", hook.ID()).Msg("failed to load $SYS info") + return v, err + } + + if v.Version != "" { + return v, nil + } + } + } + + return +} + +// OnConnectAuthenticate is called when a user attempts to authenticate with the server. +// An implementation of this method MUST be used to allow or deny access to the +// server (see hooks/auth/allow_all or basic). It can be used in custom hooks to +// check connecting users against an existing user database. +func (h *Hooks) OnConnectAuthenticate(cl *Client, pk packets.Packet) bool { + for _, hook := range h.GetAll() { + if hook.Provides(OnConnectAuthenticate) { + if ok := hook.OnConnectAuthenticate(cl, pk); ok { + return true + } + } + } + + return false +} + +// OnACLCheck is called when a user attempts to publish or subscribe to a topic filter. +// An implementation of this method MUST be used to allow or deny access to the +// (see hooks/auth/allow_all or basic). It can be used in custom hooks to +// check publishing and subscribing users against an existing permissions or roles database. +func (h *Hooks) OnACLCheck(cl *Client, topic string, write bool) bool { + for _, hook := range h.GetAll() { + if hook.Provides(OnACLCheck) { + if ok := hook.OnACLCheck(cl, topic, write); ok { + return true + } + } + } + + return false +} + +// HookBase provides a set of default methods for each hook. It should be embedded in +// all hooks. +type HookBase struct { + Hook + Log *zerolog.Logger + Opts *HookOptions +} + +// ID returns the ID of the hook. +func (h *HookBase) ID() string { + return "base" +} + +// Provides indicates which methods a hook provides. The default is none - this method +// should be overridden by the embedding hook. +func (h *HookBase) Provides(b byte) bool { + return false +} + +// Init performs any pre-start initializations for the hook, such as connecting to databases +// or opening files. +func (h *HookBase) Init(config any) error { + return nil +} + +// SetOpts is called by the server to propagate internal values and generally should +// not be called manually. +func (h *HookBase) SetOpts(l *zerolog.Logger, opts *HookOptions) { + h.Log = l + h.Opts = opts +} + +// Stop is called to gracefully shut down the hook. +func (h *HookBase) Stop() error { + return nil +} + +// OnStarted is called when the server starts. +func (h *HookBase) OnStarted() {} + +// OnStopped is called when the server stops. +func (h *HookBase) OnStopped() {} + +// OnSysInfoTick is called when the server publishes system info. +func (h *HookBase) OnSysInfoTick(*system.Info) {} + +// OnConnectAuthenticate is called when a user attempts to authenticate with the server. +func (h *HookBase) OnConnectAuthenticate(cl *Client, pk packets.Packet) bool { + return false +} + +// OnACLCheck is called when a user attempts to subscribe or publish to a topic. +func (h *HookBase) OnACLCheck(cl *Client, topic string, write bool) bool { + return false +} + +// OnConnect is called when a new client connects. +func (h *HookBase) OnConnect(cl *Client, pk packets.Packet) error { + return nil +} + +// OnSessionEstablish is called right after a new client connects and authenticates and right before +// the session is established and CONNACK is sent. +func (h *HookBase) OnSessionEstablish(cl *Client, pk packets.Packet) {} + +// OnSessionEstablished is called when a new client establishes a session (after OnConnect). +func (h *HookBase) OnSessionEstablished(cl *Client, pk packets.Packet) {} + +// OnDisconnect is called when a client is disconnected for any reason. +func (h *HookBase) OnDisconnect(cl *Client, err error, expire bool) {} + +// OnAuthPacket is called when an auth packet is received from the client. +func (h *HookBase) OnAuthPacket(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPacketRead is called when a packet is received. +func (h *HookBase) OnPacketRead(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPacketEncode is called before a packet is byte-encoded and written to the client. +func (h *HookBase) OnPacketEncode(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnPacketSent is called immediately after a packet is written to a client. +func (h *HookBase) OnPacketSent(cl *Client, pk packets.Packet, b []byte) {} + +// OnPacketProcessed is called immediately after a packet from a client is processed. +func (h *HookBase) OnPacketProcessed(cl *Client, pk packets.Packet, err error) {} + +// OnSubscribe is called when a client subscribes to one or more filters. +func (h *HookBase) OnSubscribe(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnSubscribed is called when a client subscribes to one or more filters. +func (h *HookBase) OnSubscribed(cl *Client, pk packets.Packet, reasonCodes []byte) {} + +// OnSelectSubscribers is called when selecting subscribers to receive a message. +func (h *HookBase) OnSelectSubscribers(subs *Subscribers, pk packets.Packet) *Subscribers { + return subs +} + +// OnUnsubscribe is called when a client unsubscribes from one or more filters. +func (h *HookBase) OnUnsubscribe(cl *Client, pk packets.Packet) packets.Packet { + return pk +} + +// OnUnsubscribed is called when a client unsubscribes from one or more filters. +func (h *HookBase) OnUnsubscribed(cl *Client, pk packets.Packet) {} + +// OnPublish is called when a client publishes a message. +func (h *HookBase) OnPublish(cl *Client, pk packets.Packet) (packets.Packet, error) { + return pk, nil +} + +// OnPublished is called when a client has published a message to subscribers. +func (h *HookBase) OnPublished(cl *Client, pk packets.Packet) {} + +// OnPublishDropped is called when a message to a client is dropped instead of being delivered. +func (h *HookBase) OnPublishDropped(cl *Client, pk packets.Packet) {} + +// OnRetainMessage is called then a published message is retained. +func (h *HookBase) OnRetainMessage(cl *Client, pk packets.Packet, r int64) {} + +// OnRetainPublished is called when a retained message is published. +func (h *HookBase) OnRetainPublished(cl *Client, pk packets.Packet) {} + +// OnQosPublish is called when a publish packet with Qos > 1 is issued to a subscriber. +func (h *HookBase) OnQosPublish(cl *Client, pk packets.Packet, sent int64, resends int) {} + +// OnQosComplete is called when the Qos flow for a message has been completed. +func (h *HookBase) OnQosComplete(cl *Client, pk packets.Packet) {} + +// OnQosDropped is called the Qos flow for a message expires. +func (h *HookBase) OnQosDropped(cl *Client, pk packets.Packet) {} + +// OnPacketIDExhausted is called when the client runs out of unused packet ids to assign to a packet. +func (h *HookBase) OnPacketIDExhausted(cl *Client, pk packets.Packet) {} + +// OnWill is called when a client disconnects and publishes an LWT message. +func (h *HookBase) OnWill(cl *Client, will Will) (Will, error) { + return will, nil +} + +// OnWillSent is called when an LWT message has been issued from a disconnecting client. +func (h *HookBase) OnWillSent(cl *Client, pk packets.Packet) {} + +// OnClientExpired is called when a client session has expired. +func (h *HookBase) OnClientExpired(cl *Client) {} + +// OnRetainedExpired is called when a retained message for a topic has expired. +func (h *HookBase) OnRetainedExpired(topic string) {} + +// StoredClients returns all clients from a store. +func (h *HookBase) StoredClients() (v []storage.Client, err error) { + return +} + +// StoredSubscriptions returns all subcriptions from a store. +func (h *HookBase) StoredSubscriptions() (v []storage.Subscription, err error) { + return +} + +// StoredInflightMessages returns all inflight messages from a store. +func (h *HookBase) StoredInflightMessages() (v []storage.Message, err error) { + return +} + +// StoredRetainedMessages returns all retained messages from a store. +func (h *HookBase) StoredRetainedMessages() (v []storage.Message, err error) { + return +} + +// StoredSysInfo returns a set of system info values. +func (h *HookBase) StoredSysInfo() (v storage.SystemInfo, err error) { + return +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go new file mode 100644 index 00000000..e05a0de3 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/allow_all.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "bytes" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +// AllowHook is an authentication hook which allows connection access +// for all users and read and write access to all topics. +type AllowHook struct { + mqtt.HookBase +} + +// ID returns the ID of the hook. +func (h *AllowHook) ID() string { + return "allow-all-auth" +} + +// Provides indicates which hook methods this hook provides. +func (h *AllowHook) Provides(b byte) bool { + return bytes.Contains([]byte{ + mqtt.OnConnectAuthenticate, + mqtt.OnACLCheck, + }, []byte{b}) +} + +// OnConnectAuthenticate returns true/allowed for all requests. +func (h *AllowHook) OnConnectAuthenticate(cl *mqtt.Client, pk packets.Packet) bool { + return true +} + +// OnACLCheck returns true/allowed for all checks. +func (h *AllowHook) OnACLCheck(cl *mqtt.Client, topic string, write bool) bool { + return true +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go new file mode 100644 index 00000000..ed914613 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/auth.go @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "bytes" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +// Options contains the configuration/rules data for the auth ledger. +type Options struct { + Data []byte + Ledger *Ledger +} + +// Hook is an authentication hook which implements an auth ledger. +type Hook struct { + mqtt.HookBase + config *Options + ledger *Ledger +} + +// ID returns the ID of the hook. +func (h *Hook) ID() string { + return "auth-ledger" +} + +// Provides indicates which hook methods this hook provides. +func (h *Hook) Provides(b byte) bool { + return bytes.Contains([]byte{ + mqtt.OnConnectAuthenticate, + mqtt.OnACLCheck, + }, []byte{b}) +} + +// Init configures the hook with the auth ledger to be used for checking. +func (h *Hook) Init(config any) error { + if _, ok := config.(*Options); !ok && config != nil { + return mqtt.ErrInvalidConfigType + } + + if config == nil { + config = new(Options) + } + + h.config = config.(*Options) + + var err error + if h.config.Ledger != nil { + h.ledger = h.config.Ledger + } else if len(h.config.Data) > 0 { + h.ledger = new(Ledger) + err = h.ledger.Unmarshal(h.config.Data) + } + if err != nil { + return err + } + + if h.ledger == nil { + h.ledger = &Ledger{ + Auth: AuthRules{}, + ACL: ACLRules{}, + } + } + + h.Log.Info(). + Int("authentication", len(h.ledger.Auth)). + Int("acl", len(h.ledger.ACL)). + Msg("loaded auth rules") + + return nil +} + +// OnConnectAuthenticate returns true if the connecting client has rules which provide access +// in the auth ledger. +func (h *Hook) OnConnectAuthenticate(cl *mqtt.Client, pk packets.Packet) bool { + if _, ok := h.ledger.AuthOk(cl, pk); ok { + return true + } + + h.Log.Info(). + Str("username", string(pk.Connect.Username)). + Str("remote", cl.Net.Remote). + Msg("client failed authentication check") + + return false +} + +// OnACLCheck returns true if the connecting client has matching read or write access to subscribe +// or publish to a given topic. +func (h *Hook) OnACLCheck(cl *mqtt.Client, topic string, write bool) bool { + if _, ok := h.ledger.ACLOk(cl, topic, write); ok { + return true + } + + h.Log.Debug(). + Str("client", cl.ID). + Str("username", string(cl.Properties.Username)). + Str("topic", topic). + Msg("client failed allowed ACL check") + + return false +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go new file mode 100644 index 00000000..9e5e2e67 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/auth/ledger.go @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package auth + +import ( + "encoding/json" + "strings" + "sync" + + "gopkg.in/yaml.v3" + + "github.com/mochi-mqtt/server/v2" + "github.com/mochi-mqtt/server/v2/packets" +) + +const ( + Deny Access = iota // user cannot access the topic + ReadOnly // user can only subscribe to the topic + WriteOnly // user can only publish to the topic + ReadWrite // user can both publish and subscribe to the topic +) + +// Access determines the read/write privileges for an ACL rule. +type Access byte + +// Users contains a map of access rules for specific users, keyed on username. +type Users map[string]UserRule + +// UserRule defines a set of access rules for a specific user. +type UserRule struct { + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Password RString `json:"password,omitempty" yaml:"password,omitempty"` // the password of a user + ACL Filters `json:"acl,omitempty" yaml:"acl,omitempty"` // filters to match, if desired + Disallow bool `json:"disallow,omitempty" yaml:"disallow,omitempty"` // allow or disallow the user +} + +// AuthRules defines generic access rules applicable to all users. +type AuthRules []AuthRule + +type AuthRule struct { + Client RString `json:"client,omitempty" yaml:"client,omitempty"` // the id of a connecting client + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Remote RString `json:"remote,omitempty" yaml:"remote,omitempty"` // remote address or + Password RString `json:"password,omitempty" yaml:"password,omitempty"` // the password of a user + Allow bool `json:"allow,omitempty" yaml:"allow,omitempty"` // allow or disallow the users +} + +// ACLRules defines generic topic or filter access rules applicable to all users. +type ACLRules []ACLRule + +// ACLRule defines access rules for a specific topic or filter. +type ACLRule struct { + Client RString `json:"client,omitempty" yaml:"client,omitempty"` // the id of a connecting client + Username RString `json:"username,omitempty" yaml:"username,omitempty"` // the username of a user + Remote RString `json:"remote,omitempty" yaml:"remote,omitempty"` // remote address or + Filters Filters `json:"filters,omitempty" yaml:"filters,omitempty"` // filters to match +} + +// Filters is a map of Access rules keyed on filter. +type Filters map[RString]Access + +// RString is a rule value string. +type RString string + +// Matches returns true if the rule matches a given string. +func (r RString) Matches(a string) bool { + rr := string(r) + if r == "" || r == "*" || a == rr { + return true + } + + i := strings.Index(rr, "*") + if i > 0 && len(a) > i && strings.Compare(rr[:i], a[:i]) == 0 { + return true + } + + return false +} + +// FilterMatches returns true if a filter matches a topic rule. +func (f RString) FilterMatches(a string) bool { + _, ok := MatchTopic(string(f), a) + return ok +} + +// MatchTopic checks if a given topic matches a filter, accounting for filter +// wildcards. Eg. filter /a/b/+/c == topic a/b/d/c. +func MatchTopic(filter string, topic string) (elements []string, matched bool) { + filterParts := strings.Split(filter, "/") + topicParts := strings.Split(topic, "/") + + elements = make([]string, 0) + for i := 0; i < len(filterParts); i++ { + if i >= len(topicParts) { + matched = false + return + } + + if filterParts[i] == "+" { + elements = append(elements, topicParts[i]) + continue + } + + if filterParts[i] == "#" { + matched = true + elements = append(elements, strings.Join(topicParts[i:], "/")) + return + } + + if filterParts[i] != topicParts[i] { + matched = false + return + } + } + + return elements, true +} + +// Ledger is an auth ledger containing access rules for users and topics. +type Ledger struct { + sync.Mutex `json:"-" yaml:"-"` + Users Users `json:"users" yaml:"users"` + Auth AuthRules `json:"auth" yaml:"auth"` + ACL ACLRules `json:"acl" yaml:"acl"` +} + +// Update updates the internal values of the ledger. +func (l *Ledger) Update(ln *Ledger) { + l.Lock() + defer l.Unlock() + l.Auth = ln.Auth + l.ACL = ln.ACL +} + +// AuthOk returns true if the rules indicate the user is allowed to authenticate. +func (l *Ledger) AuthOk(cl *mqtt.Client, pk packets.Packet) (n int, ok bool) { + // If the users map is set, always check for a predefined user first instead + // of iterating through global rules. + if l.Users != nil { + if u, ok := l.Users[string(cl.Properties.Username)]; ok && + u.Password != "" && + u.Password == RString(pk.Connect.Password) { + return 0, !u.Disallow + } + } + + // If there's no users map, or no user was found, attempt to find a matching + // rule (which may also contain a user). + for n, rule := range l.Auth { + if rule.Client.Matches(cl.ID) && + rule.Username.Matches(string(cl.Properties.Username)) && + rule.Password.Matches(string(pk.Connect.Password)) && + rule.Remote.Matches(cl.Net.Remote) { + return n, rule.Allow + } + } + + return 0, false +} + +// ACLOk returns true if the rules indicate the user is allowed to read or write to +// a specific filter or topic respectively, based on the write bool. +func (l *Ledger) ACLOk(cl *mqtt.Client, topic string, write bool) (n int, ok bool) { + // If the users map is set, always check for a predefined user first instead + // of iterating through global rules. + if l.Users != nil { + if u, ok := l.Users[string(cl.Properties.Username)]; ok && len(u.ACL) > 0 { + for filter, access := range u.ACL { + if filter.FilterMatches(topic) { + if !write && (access == ReadOnly || access == ReadWrite) { + return n, true + } else if write && (access == WriteOnly || access == ReadWrite) { + return n, true + } else { + return n, false + } + } + } + } + } + + for n, rule := range l.ACL { + if rule.Client.Matches(cl.ID) && + rule.Username.Matches(string(cl.Properties.Username)) && + rule.Remote.Matches(cl.Net.Remote) { + if len(rule.Filters) == 0 { + return n, true + } + + if write { + for filter, access := range rule.Filters { + if access == WriteOnly || access == ReadWrite { + if filter.FilterMatches(topic) { + return n, true + } + } + } + } + + if !write { + for filter, access := range rule.Filters { + if access == ReadOnly || access == ReadWrite { + if filter.FilterMatches(topic) { + return n, true + } + } + } + } + + for filter, _ := range rule.Filters { + if filter.FilterMatches(topic) { + return n, false + } + } + } + } + + return 0, true +} + +// ToJSON encodes the values into a JSON string. +func (l *Ledger) ToJSON() (data []byte, err error) { + return json.Marshal(l) +} + +// ToYAML encodes the values into a YAML string. +func (l *Ledger) ToYAML() (data []byte, err error) { + return yaml.Marshal(l) +} + +// Unmarshal decodes a JSON or YAML string (such as a rule config from a file) into a struct. +func (l *Ledger) Unmarshal(data []byte) error { + l.Lock() + defer l.Unlock() + if len(data) == 0 { + return nil + } + + if data[0] == '{' { + return json.Unmarshal(data, l) + } + + return yaml.Unmarshal(data, &l) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go b/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go new file mode 100644 index 00000000..12ade7b2 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/hooks/storage/storage.go @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package storage + +import ( + "encoding/json" + "errors" + + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" +) + +const ( + SubscriptionKey = "SUB" // unique key to denote Subscriptions in a store + SysInfoKey = "SYS" // unique key to denote server system information in a store + RetainedKey = "RET" // unique key to denote retained messages in a store + InflightKey = "IFM" // unique key to denote inflight messages in a store + ClientKey = "CL" // unique key to denote clients in a store +) + +var ( + // ErrDBFileNotOpen indicates that the file database (e.g. bolt/badger) wasn't open for reading. + ErrDBFileNotOpen = errors.New("db file not open") +) + +// Client is a storable representation of an mqtt client. +type Client struct { + Will ClientWill `json:"will"` // will topic and payload data if applicable + Properties ClientProperties `json:"properties"` // the connect properties for the client + Username []byte `json:"username"` // the username of the client + ID string `json:"id" storm:"id"` // the client id / storage key + T string `json:"t"` // the data type (client) + Remote string `json:"remote"` // the remote address of the client + Listener string `json:"listener"` // the listener the client connected on + ProtocolVersion byte `json:"protocolVersion"` // mqtt protocol version of the client + Clean bool `json:"clean"` // if the client requested a clean start/session +} + +// ClientProperties contains a limited set of the mqtt v5 properties specific to a client connection. +type ClientProperties struct { + AuthenticationData []byte `json:"authenticationData"` + User []packets.UserProperty `json:"user"` + AuthenticationMethod string `json:"authenticationMethod"` + SessionExpiryInterval uint32 `json:"sessionExpiryInterval"` + MaximumPacketSize uint32 `json:"maximumPacketSize"` + ReceiveMaximum uint16 `json:"receiveMaximum"` + TopicAliasMaximum uint16 `json:"topicAliasMaximum"` + SessionExpiryIntervalFlag bool `json:"sessionExpiryIntervalFlag"` + RequestProblemInfo byte `json:"requestProblemInfo"` + RequestProblemInfoFlag bool `json:"requestProblemInfoFlag"` + RequestResponseInfo byte `json:"requestResponseInfo"` +} + +// ClientWill contains a will message for a client, and limited mqtt v5 properties. +type ClientWill struct { + Payload []byte `json:"payload"` + User []packets.UserProperty `json:"user"` + TopicName string `json:"topicName"` + Flag uint32 `json:"flag"` + WillDelayInterval uint32 `json:"willDelayInterval"` + Qos byte `json:"qos"` + Retain bool `json:"retain"` +} + +// MarshalBinary encodes the values into a json string. +func (d Client) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Client) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// Message is a storable representation of an MQTT message (specifically publish). +type Message struct { + Properties MessageProperties `json:"properties"` // - + Payload []byte `json:"payload"` // the message payload (if retained) + T string `json:"t"` // the data type + ID string `json:"id" storm:"id"` // the storage key + Origin string `json:"origin"` // the id of the client who sent the message + TopicName string `json:"topic_name"` // the topic the message was sent to (if retained) + FixedHeader packets.FixedHeader `json:"fixedheader"` // the header properties of the message + Created int64 `json:"created"` // the time the message was created in unixtime + Sent int64 `json:"sent"` // the last time the message was sent (for retries) in unixtime (if inflight) + PacketID uint16 `json:"packet_id"` // the unique id of the packet (if inflight) +} + +// MessageProperties contains a limited subset of mqtt v5 properties specific to publish messages. +type MessageProperties struct { + CorrelationData []byte `json:"correlationData"` + SubscriptionIdentifier []int `json:"subscriptionIdentifier"` + User []packets.UserProperty `json:"user"` + ContentType string `json:"contentType"` + ResponseTopic string `json:"responseTopic"` + MessageExpiryInterval uint32 `json:"messageExpiry"` + TopicAlias uint16 `json:"topicAlias"` + PayloadFormat byte `json:"payloadFormat"` + PayloadFormatFlag bool `json:"payloadFormatFlag"` +} + +// MarshalBinary encodes the values into a json string. +func (d Message) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Message) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// ToPacket converts a storage.Message to a standard packet. +func (d *Message) ToPacket() packets.Packet { + pk := packets.Packet{ + FixedHeader: d.FixedHeader, + PacketID: d.PacketID, + TopicName: d.TopicName, + Payload: d.Payload, + Origin: d.Origin, + Created: d.Created, + Properties: packets.Properties{ + PayloadFormat: d.Properties.PayloadFormat, + PayloadFormatFlag: d.Properties.PayloadFormatFlag, + MessageExpiryInterval: d.Properties.MessageExpiryInterval, + ContentType: d.Properties.ContentType, + ResponseTopic: d.Properties.ResponseTopic, + CorrelationData: d.Properties.CorrelationData, + SubscriptionIdentifier: d.Properties.SubscriptionIdentifier, + TopicAlias: d.Properties.TopicAlias, + User: d.Properties.User, + }, + } + + // Return a deep copy of the packet data otherwise the slices will + // continue pointing at the values from the storage packet. + pk = pk.Copy(true) + pk.FixedHeader.Dup = d.FixedHeader.Dup + + return pk +} + +// Subscription is a storable representation of an mqtt subscription. +type Subscription struct { + T string `json:"t"` + ID string `json:"id" storm:"id"` + Client string `json:"client"` + Filter string `json:"filter"` + Identifier int `json:"identifier"` + RetainHandling byte `json:"retain_handling"` + Qos byte `json:"qos"` + RetainAsPublished bool `json:"retain_as_pub"` + NoLocal bool `json:"no_local"` +} + +// MarshalBinary encodes the values into a json string. +func (d Subscription) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *Subscription) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} + +// SystemInfo is a storable representation of the system information values. +type SystemInfo struct { + system.Info // embed the system info struct + T string `json:"t"` // the data type + ID string `json:"id" storm:"id"` // the storage key +} + +// MarshalBinary encodes the values into a json string. +func (d SystemInfo) MarshalBinary() (data []byte, err error) { + return json.Marshal(d) +} + +// UnmarshalBinary decodes a json string into a struct. +func (d *SystemInfo) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + return json.Unmarshal(data, d) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/inflight.go b/vendor/github.com/mochi-mqtt/server/v2/inflight.go new file mode 100644 index 00000000..9d949584 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/inflight.go @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "sort" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/packets" +) + +// Inflight is a map of InflightMessage keyed on packet id. +type Inflight struct { + sync.RWMutex + internal map[uint16]packets.Packet // internal contains the inflight packets + receiveQuota int32 // remaining inbound qos quota for flow control + sendQuota int32 // remaining outbound qos quota for flow control + maximumReceiveQuota int32 // maximum allowed receive quota + maximumSendQuota int32 // maximum allowed send quota +} + +// NewInflights returns a new instance of an Inflight packets map. +func NewInflights() *Inflight { + return &Inflight{ + internal: map[uint16]packets.Packet{}, + } +} + +// Set adds or updates an inflight packet by packet id. +func (i *Inflight) Set(m packets.Packet) bool { + i.Lock() + defer i.Unlock() + + _, ok := i.internal[m.PacketID] + i.internal[m.PacketID] = m + return !ok +} + +// Get returns an inflight packet by packet id. +func (i *Inflight) Get(id uint16) (packets.Packet, bool) { + i.RLock() + defer i.RUnlock() + + if m, ok := i.internal[id]; ok { + return m, true + } + + return packets.Packet{}, false +} + +// Len returns the size of the inflight messages map. +func (i *Inflight) Len() int { + i.RLock() + defer i.RUnlock() + return len(i.internal) +} + +// Clone returns a new instance of Inflight with the same message data. +// This is used when transferring inflights from a taken-over session. +func (i *Inflight) Clone() *Inflight { + c := NewInflights() + i.RLock() + defer i.RUnlock() + for k, v := range i.internal { + c.internal[k] = v + } + return c +} + +// GetAll returns all the inflight messages. +func (i *Inflight) GetAll(immediate bool) []packets.Packet { + i.RLock() + defer i.RUnlock() + + m := []packets.Packet{} + for _, v := range i.internal { + if !immediate || (immediate && v.Expiry < 0) { + m = append(m, v) + } + } + + sort.Slice(m, func(i, j int) bool { + return uint16(m[i].Created) < uint16(m[j].Created) + }) + + return m +} + +// NextImmediate returns the next inflight packet which is indicated to be sent immediately. +// This typically occurs when the quota has been exhausted, and we need to wait until new quota +// is free to continue sending. +func (i *Inflight) NextImmediate() (packets.Packet, bool) { + i.RLock() + defer i.RUnlock() + + m := i.GetAll(true) + if len(m) > 0 { + return m[0], true + } + + return packets.Packet{}, false +} + +// Delete removes an in-flight message from the map. Returns true if the message existed. +func (i *Inflight) Delete(id uint16) bool { + i.Lock() + defer i.Unlock() + + _, ok := i.internal[id] + delete(i.internal, id) + + return ok +} + +// TakeRecieveQuota reduces the receive quota by 1. +func (i *Inflight) DecreaseReceiveQuota() { + if atomic.LoadInt32(&i.receiveQuota) > 0 { + atomic.AddInt32(&i.receiveQuota, -1) + } +} + +// TakeRecieveQuota increases the receive quota by 1. +func (i *Inflight) IncreaseReceiveQuota() { + if atomic.LoadInt32(&i.receiveQuota) < atomic.LoadInt32(&i.maximumReceiveQuota) { + atomic.AddInt32(&i.receiveQuota, 1) + } +} + +// ResetReceiveQuota resets the receive quota to the maximum allowed value. +func (i *Inflight) ResetReceiveQuota(n int32) { + atomic.StoreInt32(&i.receiveQuota, n) + atomic.StoreInt32(&i.maximumReceiveQuota, n) +} + +// DecreaseSendQuota reduces the send quota by 1. +func (i *Inflight) DecreaseSendQuota() { + if atomic.LoadInt32(&i.sendQuota) > 0 { + atomic.AddInt32(&i.sendQuota, -1) + } +} + +// IncreaseSendQuota increases the send quota by 1. +func (i *Inflight) IncreaseSendQuota() { + if atomic.LoadInt32(&i.sendQuota) < atomic.LoadInt32(&i.maximumSendQuota) { + atomic.AddInt32(&i.sendQuota, 1) + } +} + +// ResetSendQuota resets the send quota to the maximum allowed value. +func (i *Inflight) ResetSendQuota(n int32) { + atomic.StoreInt32(&i.sendQuota, n) + atomic.StoreInt32(&i.maximumSendQuota, n) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go new file mode 100644 index 00000000..e8fc0274 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_healthcheck.go @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: Derek Duncan + +package listeners + +import ( + "context" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/rs/zerolog" +) + +// HTTPHealthCheck is a listener for providing an HTTP healthcheck endpoint. +type HTTPHealthCheck struct { + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // the http server + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewHTTPHealthCheck initialises and returns a new HTTP listener, listening on an address. +func NewHTTPHealthCheck(id, address string, config *Config) *HTTPHealthCheck { + if config == nil { + config = new(Config) + } + return &HTTPHealthCheck{ + id: id, + address: address, + config: config, + } +} + +// ID returns the id of the listener. +func (l *HTTPHealthCheck) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *HTTPHealthCheck) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *HTTPHealthCheck) Protocol() string { + if l.listen != nil && l.listen.TLSConfig != nil { + return "https" + } + + return "http" +} + +// Init initializes the listener. +func (l *HTTPHealthCheck) Init(log *zerolog.Logger) error { + l.log = log + + mux := http.NewServeMux() + mux.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + } + }) + l.listen = &http.Server{ + ReadTimeout: 5 * time.Second, + WriteTimeout: 5 * time.Second, + Addr: l.address, + Handler: mux, + } + + if l.config.TLSConfig != nil { + l.listen.TLSConfig = l.config.TLSConfig + } + + return nil +} + +// Serve starts listening for new connections and serving responses. +func (l *HTTPHealthCheck) Serve(establish EstablishFn) { + if l.listen.TLSConfig != nil { + l.listen.ListenAndServeTLS("", "") + } else { + l.listen.ListenAndServe() + } +} + +// Close closes the listener and any client connections. +func (l *HTTPHealthCheck) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go new file mode 100644 index 00000000..aa32c91c --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/http_sysinfo.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "context" + "encoding/json" + "io" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/mochi-mqtt/server/v2/system" + + "github.com/rs/zerolog" +) + +// HTTPStats is a listener for presenting the server $SYS stats on a JSON http endpoint. +type HTTPStats struct { + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // the http server + log *zerolog.Logger // server logger + sysInfo *system.Info // pointers to the server data + end uint32 // ensure the close methods are only called once +} + +// NewHTTPStats initialises and returns a new HTTP listener, listening on an address. +func NewHTTPStats(id, address string, config *Config, sysInfo *system.Info) *HTTPStats { + if config == nil { + config = new(Config) + } + return &HTTPStats{ + id: id, + address: address, + sysInfo: sysInfo, + config: config, + } +} + +// ID returns the id of the listener. +func (l *HTTPStats) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *HTTPStats) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *HTTPStats) Protocol() string { + if l.listen != nil && l.listen.TLSConfig != nil { + return "https" + } + + return "http" +} + +// Init initializes the listener. +func (l *HTTPStats) Init(log *zerolog.Logger) error { + l.log = log + + mux := http.NewServeMux() + mux.HandleFunc("/", l.jsonHandler) + l.listen = &http.Server{ + ReadTimeout: 5 * time.Second, + WriteTimeout: 5 * time.Second, + Addr: l.address, + Handler: mux, + } + + if l.config.TLSConfig != nil { + l.listen.TLSConfig = l.config.TLSConfig + } + + return nil +} + +// Serve starts listening for new connections and serving responses. +func (l *HTTPStats) Serve(establish EstablishFn) { + if l.listen.TLSConfig != nil { + l.listen.ListenAndServeTLS("", "") + } else { + l.listen.ListenAndServe() + } +} + +// Close closes the listener and any client connections. +func (l *HTTPStats) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} + +// jsonHandler is an HTTP handler which outputs the $SYS stats as JSON. +func (l *HTTPStats) jsonHandler(w http.ResponseWriter, req *http.Request) { + info := *l.sysInfo.Clone() + + out, err := json.MarshalIndent(info, "", "\t") + if err != nil { + io.WriteString(w, err.Error()) + } + + w.Write(out) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go new file mode 100644 index 00000000..24031e00 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/listeners.go @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "crypto/tls" + "net" + "sync" + + "github.com/rs/zerolog" +) + +// Config contains configuration values for a listener. +type Config struct { + // TLSConfig is a tls.Config configuration to be used with the listener. + // See examples folder for basic and mutual-tls use. + TLSConfig *tls.Config +} + +// EstablishFn is a callback function for establishing new clients. +type EstablishFn func(id string, c net.Conn) error + +// CloseFunc is a callback function for closing all listener clients. +type CloseFn func(id string) + +// Listener is an interface for network listeners. A network listener listens +// for incoming client connections and adds them to the server. +type Listener interface { + Init(*zerolog.Logger) error // open the network address + Serve(EstablishFn) // starting actively listening for new connections + ID() string // return the id of the listener + Address() string // the address of the listener + Protocol() string // the protocol in use by the listener + Close(CloseFn) // stop and close the listener +} + +// Listeners contains the network listeners for the broker. +type Listeners struct { + wg sync.WaitGroup // a waitgroup that waits for all listeners to finish. + internal map[string]Listener // a map of active listeners. + sync.RWMutex +} + +// New returns a new instance of Listeners. +func New() *Listeners { + return &Listeners{ + internal: map[string]Listener{}, + } +} + +// Add adds a new listener to the listeners map, keyed on id. +func (l *Listeners) Add(val Listener) { + l.Lock() + defer l.Unlock() + l.internal[val.ID()] = val +} + +// Get returns the value of a listener if it exists. +func (l *Listeners) Get(id string) (Listener, bool) { + l.RLock() + defer l.RUnlock() + val, ok := l.internal[id] + return val, ok +} + +// Len returns the length of the listeners map. +func (l *Listeners) Len() int { + l.RLock() + defer l.RUnlock() + return len(l.internal) +} + +// Delete removes a listener from the internal map. +func (l *Listeners) Delete(id string) { + l.Lock() + defer l.Unlock() + delete(l.internal, id) +} + +// Serve starts a listener serving from the internal map. +func (l *Listeners) Serve(id string, establisher EstablishFn) { + l.RLock() + defer l.RUnlock() + listener := l.internal[id] + + go func(e EstablishFn) { + defer l.wg.Done() + l.wg.Add(1) + listener.Serve(e) + }(establisher) +} + +// ServeAll starts all listeners serving from the internal map. +func (l *Listeners) ServeAll(establisher EstablishFn) { + l.RLock() + i := 0 + ids := make([]string, len(l.internal)) + for id := range l.internal { + ids[i] = id + i++ + } + l.RUnlock() + + for _, id := range ids { + l.Serve(id, establisher) + } +} + +// Close stops a listener from the internal map. +func (l *Listeners) Close(id string, closer CloseFn) { + l.RLock() + defer l.RUnlock() + if listener, ok := l.internal[id]; ok { + listener.Close(closer) + } +} + +// CloseAll iterates and closes all registered listeners. +func (l *Listeners) CloseAll(closer CloseFn) { + l.RLock() + i := 0 + ids := make([]string, len(l.internal)) + for id := range l.internal { + ids[i] = id + i++ + } + l.RUnlock() + + for _, id := range ids { + l.Close(id, closer) + } + l.wg.Wait() +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go new file mode 100644 index 00000000..8847af64 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/mock.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "fmt" + "net" + "sync" + + "github.com/rs/zerolog" +) + +// MockEstablisher is a function signature which can be used in testing. +func MockEstablisher(id string, c net.Conn) error { + return nil +} + +// MockCloser is a function signature which can be used in testing. +func MockCloser(id string) {} + +// MockListener is a mock listener for establishing client connections. +type MockListener struct { + sync.RWMutex + id string // the id of the listener + address string // the network address the listener binds to + Config *Config // configuration for the listener + done chan bool // indicate the listener is done + Serving bool // indicate the listener is serving + Listening bool // indiciate the listener is listening + ErrListen bool // throw an error on listen +} + +// NewMockListener returns a new instance of MockListener. +func NewMockListener(id, address string) *MockListener { + return &MockListener{ + id: id, + address: address, + done: make(chan bool), + } +} + +// Serve serves the mock listener. +func (l *MockListener) Serve(establisher EstablishFn) { + l.Lock() + l.Serving = true + l.Unlock() + + for range l.done { + return + } +} + +// Init initializes the listener. +func (l *MockListener) Init(log *zerolog.Logger) error { + if l.ErrListen { + return fmt.Errorf("listen failure") + } + + l.Lock() + defer l.Unlock() + l.Listening = true + return nil +} + +// ID returns the id of the mock listener. +func (l *MockListener) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *MockListener) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *MockListener) Protocol() string { + return "mock" +} + +// Close closes the mock listener. +func (l *MockListener) Close(closer CloseFn) { + l.Lock() + defer l.Unlock() + l.Serving = false + closer(l.id) + close(l.done) +} + +// IsServing indicates whether the mock listener is serving. +func (l *MockListener) IsServing() bool { + l.Lock() + defer l.Unlock() + return l.Serving +} + +// IsListening indicates whether the mock listener is listening. +func (l *MockListener) IsListening() bool { + l.Lock() + defer l.Unlock() + return l.Listening +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go new file mode 100644 index 00000000..662b9ec0 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/net.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: Jeroen Rinzema + +package listeners + +import ( + "net" + "sync" + "sync/atomic" + + "github.com/rs/zerolog" +) + +// Net is a listener for establishing client connections on basic TCP protocol. +type Net struct { // [MQTT-4.2.0-1] + mu sync.Mutex + listener net.Listener // a net.Listener which will listen for new clients + id string // the internal id of the listener + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewNet initialises and returns a listener serving incoming connections on the given net.Listener +func NewNet(id string, listener net.Listener) *Net { + return &Net{ + id: id, + listener: listener, + } +} + +// ID returns the id of the listener. +func (l *Net) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *Net) Address() string { + return l.listener.Addr().String() +} + +// Protocol returns the network of the listener. +func (l *Net) Protocol() string { + return l.listener.Addr().Network() +} + +// Init initializes the listener. +func (l *Net) Init(log *zerolog.Logger) error { + l.log = log + return nil +} + +// Serve starts waiting for new TCP connections, and calls the establish +// connection callback for any received. +func (l *Net) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listener.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn().Err(err).Send() + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *Net) Close(closeClients CloseFn) { + l.mu.Lock() + defer l.mu.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listener != nil { + err := l.listener.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go new file mode 100644 index 00000000..1fc34da8 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/tcp.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "crypto/tls" + "net" + "sync" + "sync/atomic" + + "github.com/rs/zerolog" +) + +// TCP is a listener for establishing client connections on basic TCP protocol. +type TCP struct { // [MQTT-4.2.0-1] + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + listen net.Listener // a net.Listener which will listen for new clients + config *Config // configuration values for the listener + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once +} + +// NewTCP initialises and returns a new TCP listener, listening on an address. +func NewTCP(id, address string, config *Config) *TCP { + if config == nil { + config = new(Config) + } + + return &TCP{ + id: id, + address: address, + config: config, + } +} + +// ID returns the id of the listener. +func (l *TCP) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *TCP) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *TCP) Protocol() string { + return "tcp" +} + +// Init initializes the listener. +func (l *TCP) Init(log *zerolog.Logger) error { + l.log = log + + var err error + if l.config.TLSConfig != nil { + l.listen, err = tls.Listen("tcp", l.address, l.config.TLSConfig) + } else { + l.listen, err = net.Listen("tcp", l.address) + } + + return err +} + +// Serve starts waiting for new TCP connections, and calls the establish +// connection callback for any received. +func (l *TCP) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listen.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn().Err(err).Send() + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *TCP) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listen != nil { + err := l.listen.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go new file mode 100644 index 00000000..ebe54c3f --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/unixsock.go @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: jason@zgwit.com + +package listeners + +import ( + "net" + "os" + "sync" + "sync/atomic" + + "github.com/rs/zerolog" +) + +// UnixSock is a listener for establishing client connections on basic UnixSock protocol. +type UnixSock struct { + sync.RWMutex + id string // the internal id of the listener. + address string // the network address to bind to. + listen net.Listener // a net.Listener which will listen for new clients. + log *zerolog.Logger // server logger + end uint32 // ensure the close methods are only called once. +} + +// NewUnixSock initialises and returns a new UnixSock listener, listening on an address. +func NewUnixSock(id, address string) *UnixSock { + return &UnixSock{ + id: id, + address: address, + } +} + +// ID returns the id of the listener. +func (l *UnixSock) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *UnixSock) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *UnixSock) Protocol() string { + return "unix" +} + +// Init initializes the listener. +func (l *UnixSock) Init(log *zerolog.Logger) error { + l.log = log + + var err error + _ = os.Remove(l.address) + l.listen, err = net.Listen("unix", l.address) + return err +} + +// Serve starts waiting for new UnixSock connections, and calls the establish +// connection callback for any received. +func (l *UnixSock) Serve(establish EstablishFn) { + for { + if atomic.LoadUint32(&l.end) == 1 { + return + } + + conn, err := l.listen.Accept() + if err != nil { + return + } + + if atomic.LoadUint32(&l.end) == 0 { + go func() { + err = establish(l.id, conn) + if err != nil { + l.log.Warn().Err(err).Send() + } + }() + } + } +} + +// Close closes the listener and any client connections. +func (l *UnixSock) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + closeClients(l.id) + } + + if l.listen != nil { + err := l.listen.Close() + if err != nil { + return + } + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go b/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go new file mode 100644 index 00000000..0b06c86f --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/listeners/websocket.go @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package listeners + +import ( + "context" + "errors" + "io" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" + "github.com/rs/zerolog" +) + +var ( + // ErrInvalidMessage indicates that a message payload was not valid. + ErrInvalidMessage = errors.New("message type not binary") +) + +// Websocket is a listener for establishing websocket connections. +type Websocket struct { // [MQTT-4.2.0-1] + sync.RWMutex + id string // the internal id of the listener + address string // the network address to bind to + config *Config // configuration values for the listener + listen *http.Server // an http server for serving websocket connections + log *zerolog.Logger // server logger + establish EstablishFn // the server's establish connection handler + upgrader *websocket.Upgrader // upgrade the incoming http/tcp connection to a websocket compliant connection. + end uint32 // ensure the close methods are only called once +} + +// NewWebsocket initialises and returns a new Websocket listener, listening on an address. +func NewWebsocket(id, address string, config *Config) *Websocket { + if config == nil { + config = new(Config) + } + + return &Websocket{ + id: id, + address: address, + config: config, + upgrader: &websocket.Upgrader{ + Subprotocols: []string{"mqtt"}, + CheckOrigin: func(r *http.Request) bool { + return true + }, + }, + } +} + +// ID returns the id of the listener. +func (l *Websocket) ID() string { + return l.id +} + +// Address returns the address of the listener. +func (l *Websocket) Address() string { + return l.address +} + +// Protocol returns the address of the listener. +func (l *Websocket) Protocol() string { + if l.config.TLSConfig != nil { + return "wss" + } + + return "ws" +} + +// Init initializes the listener. +func (l *Websocket) Init(log *zerolog.Logger) error { + l.log = log + + mux := http.NewServeMux() + mux.HandleFunc("/", l.handler) + l.listen = &http.Server{ + Addr: l.address, + Handler: mux, + TLSConfig: l.config.TLSConfig, + ReadTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + } + + return nil +} + +// handler upgrades and handles an incoming websocket connection. +func (l *Websocket) handler(w http.ResponseWriter, r *http.Request) { + c, err := l.upgrader.Upgrade(w, r, nil) + if err != nil { + return + } + defer c.Close() + + err = l.establish(l.id, &wsConn{Conn: c.UnderlyingConn(), c: c}) + if err != nil { + l.log.Warn().Err(err).Send() + } +} + +// Serve starts waiting for new Websocket connections, and calls the connection +// establishment callback for any received. +func (l *Websocket) Serve(establish EstablishFn) { + l.establish = establish + + if l.listen.TLSConfig != nil { + l.listen.ListenAndServeTLS("", "") + } else { + l.listen.ListenAndServe() + } +} + +// Close closes the listener and any client connections. +func (l *Websocket) Close(closeClients CloseFn) { + l.Lock() + defer l.Unlock() + + if atomic.CompareAndSwapUint32(&l.end, 0, 1) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + l.listen.Shutdown(ctx) + } + + closeClients(l.id) +} + +// wsConn is a websocket connection which satisfies the net.Conn interface. +type wsConn struct { + net.Conn + c *websocket.Conn + + // reader for the current message (may be nil) + r io.Reader +} + +// Read reads the next span of bytes from the websocket connection and returns the number of bytes read. +func (ws *wsConn) Read(p []byte) (int, error) { + if ws.r == nil { + op, r, err := ws.c.NextReader() + if err != nil { + return 0, err + } + + if op != websocket.BinaryMessage { + err = ErrInvalidMessage + return 0, err + } + + ws.r = r + } + + var n int + for { + // buffer is full, return what we've read so far + if n == len(p) { + return n, nil + } + + br, err := ws.r.Read(p[n:]) + n += br + if err != nil { + // when ANY error occurs, we consider this the end of the current message (either because it really is, via + // io.EOF, or because something bad happened, in which case we want to drop the remainder) + ws.r = nil + + if errors.Is(err, io.EOF) { + err = nil + } + return n, err + } + } +} + +// Write writes bytes to the websocket connection. +func (ws *wsConn) Write(p []byte) (int, error) { + err := ws.c.WriteMessage(websocket.BinaryMessage, p) + if err != nil { + return 0, err + } + + return len(p), nil +} + +// Close signals the underlying websocket conn to close. +func (ws *wsConn) Close() error { + return ws.Conn.Close() +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go b/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go new file mode 100644 index 00000000..152d777e --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/codec.go @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "encoding/binary" + "io" + "unicode/utf8" + "unsafe" +) + +// bytesToString provides a zero-alloc no-copy byte to string conversion. +// via https://github.com/golang/go/issues/25484#issuecomment-391415660 +func bytesToString(bs []byte) string { + return *(*string)(unsafe.Pointer(&bs)) +} + +// decodeUint16 extracts the value of two bytes from a byte array. +func decodeUint16(buf []byte, offset int) (uint16, int, error) { + if len(buf) < offset+2 { + return 0, 0, ErrMalformedOffsetUintOutOfRange + } + + return binary.BigEndian.Uint16(buf[offset : offset+2]), offset + 2, nil +} + +// decodeUint32 extracts the value of four bytes from a byte array. +func decodeUint32(buf []byte, offset int) (uint32, int, error) { + if len(buf) < offset+4 { + return 0, 0, ErrMalformedOffsetUintOutOfRange + } + + return binary.BigEndian.Uint32(buf[offset : offset+4]), offset + 4, nil +} + +// decodeString extracts a string from a byte array, beginning at an offset. +func decodeString(buf []byte, offset int) (string, int, error) { + b, n, err := decodeBytes(buf, offset) + if err != nil { + return "", 0, err + } + + if !validUTF8(b) { // [MQTT-1.5.4-1] [MQTT-3.1.3-5] + return "", 0, ErrMalformedInvalidUTF8 + } + + return bytesToString(b), n, nil +} + +// validUTF8 checks if the byte array contains valid UTF-8 characters. +func validUTF8(b []byte) bool { + return utf8.Valid(b) && bytes.IndexByte(b, 0x00) == -1 // [MQTT-1.5.4-1] [MQTT-1.5.4-2] +} + +// decodeBytes extracts a byte array from a byte array, beginning at an offset. Used primarily for message payloads. +func decodeBytes(buf []byte, offset int) ([]byte, int, error) { + length, next, err := decodeUint16(buf, offset) + if err != nil { + return make([]byte, 0), 0, err + } + + if next+int(length) > len(buf) { + return make([]byte, 0), 0, ErrMalformedOffsetBytesOutOfRange + } + + return buf[next : next+int(length)], next + int(length), nil +} + +// decodeByte extracts the value of a byte from a byte array. +func decodeByte(buf []byte, offset int) (byte, int, error) { + if len(buf) <= offset { + return 0, 0, ErrMalformedOffsetByteOutOfRange + } + return buf[offset], offset + 1, nil +} + +// decodeByteBool extracts the value of a byte from a byte array and returns a bool. +func decodeByteBool(buf []byte, offset int) (bool, int, error) { + if len(buf) <= offset { + return false, 0, ErrMalformedOffsetBoolOutOfRange + } + return 1&buf[offset] > 0, offset + 1, nil +} + +// encodeBool returns a byte instead of a bool. +func encodeBool(b bool) byte { + if b { + return 1 + } + return 0 +} + +// encodeBytes encodes a byte array to a byte array. Used primarily for message payloads. +func encodeBytes(val []byte) []byte { + // In most circumstances the number of bytes being encoded is small. + // Setting the cap to a low amount allows us to account for those without + // triggering allocation growth on append unless we need to. + buf := make([]byte, 2, 32) + binary.BigEndian.PutUint16(buf, uint16(len(val))) + return append(buf, val...) +} + +// encodeUint16 encodes a uint16 value to a byte array. +func encodeUint16(val uint16) []byte { + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, val) + return buf +} + +// encodeUint32 encodes a uint16 value to a byte array. +func encodeUint32(val uint32) []byte { + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, val) + return buf +} + +// encodeString encodes a string to a byte array. +func encodeString(val string) []byte { + // Like encodeBytes, we set the cap to a small number to avoid + // triggering allocation growth on append unless we absolutely need to. + buf := make([]byte, 2, 32) + binary.BigEndian.PutUint16(buf, uint16(len(val))) + return append(buf, []byte(val)...) +} + +// encodeLength writes length bits for the header. +func encodeLength(b *bytes.Buffer, length int64) { + // 1.5.5 Variable Byte Integer encode non-normative + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901027 + for { + eb := byte(length % 128) + length /= 128 + if length > 0 { + eb |= 0x80 + } + b.WriteByte(eb) + if length == 0 { + break // [MQTT-1.5.5-1] + } + } +} + +func DecodeLength(b io.ByteReader) (n, bu int, err error) { + // see 1.5.5 Variable Byte Integer decode non-normative + // https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901027 + var multiplier uint32 + var value uint32 + bu = 1 + for { + eb, err := b.ReadByte() + if err != nil { + return 0, bu, err + } + + value |= uint32(eb&127) << multiplier + if value > 268435455 { + return 0, bu, ErrMalformedVariableByteInteger + } + + if (eb & 128) == 0 { + break + } + + multiplier += 7 + bu++ + } + + return int(value), bu, nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go b/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go new file mode 100644 index 00000000..154d7ae5 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/codes.go @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +// Code contains a reason code and reason string for a response. +type Code struct { + Reason string + Code byte +} + +// String returns the readable reason for a code. +func (c Code) String() string { + return c.Reason +} + +// Error returns the readable reason for a code. +func (c Code) Error() string { + return c.Reason +} + +var ( + // QosCodes indicates the reason codes for each Qos byte. + QosCodes = map[byte]Code{ + 0: CodeGrantedQos0, + 1: CodeGrantedQos1, + 2: CodeGrantedQos2, + } + + CodeSuccessIgnore = Code{Code: 0x00, Reason: "ignore packet"} + CodeSuccess = Code{Code: 0x00, Reason: "success"} + CodeDisconnect = Code{Code: 0x00, Reason: "disconnected"} + CodeGrantedQos0 = Code{Code: 0x00, Reason: "granted qos 0"} + CodeGrantedQos1 = Code{Code: 0x01, Reason: "granted qos 1"} + CodeGrantedQos2 = Code{Code: 0x02, Reason: "granted qos 2"} + CodeDisconnectWillMessage = Code{Code: 0x04, Reason: "disconnect with will message"} + CodeNoMatchingSubscribers = Code{Code: 0x10, Reason: "no matching subscribers"} + CodeNoSubscriptionExisted = Code{Code: 0x11, Reason: "no subscription existed"} + CodeContinueAuthentication = Code{Code: 0x18, Reason: "continue authentication"} + CodeReAuthenticate = Code{Code: 0x19, Reason: "re-authenticate"} + ErrUnspecifiedError = Code{Code: 0x80, Reason: "unspecified error"} + ErrMalformedPacket = Code{Code: 0x81, Reason: "malformed packet"} + ErrMalformedProtocolName = Code{Code: 0x81, Reason: "malformed packet: protocol name"} + ErrMalformedProtocolVersion = Code{Code: 0x81, Reason: "malformed packet: protocol version"} + ErrMalformedFlags = Code{Code: 0x81, Reason: "malformed packet: flags"} + ErrMalformedKeepalive = Code{Code: 0x81, Reason: "malformed packet: keepalive"} + ErrMalformedPacketID = Code{Code: 0x81, Reason: "malformed packet: packet identifier"} + ErrMalformedTopic = Code{Code: 0x81, Reason: "malformed packet: topic"} + ErrMalformedWillTopic = Code{Code: 0x81, Reason: "malformed packet: will topic"} + ErrMalformedWillPayload = Code{Code: 0x81, Reason: "malformed packet: will message"} + ErrMalformedUsername = Code{Code: 0x81, Reason: "malformed packet: username"} + ErrMalformedPassword = Code{Code: 0x81, Reason: "malformed packet: password"} + ErrMalformedQos = Code{Code: 0x81, Reason: "malformed packet: qos"} + ErrMalformedOffsetUintOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset uint out of range"} + ErrMalformedOffsetBytesOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset bytes out of range"} + ErrMalformedOffsetByteOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset byte out of range"} + ErrMalformedOffsetBoolOutOfRange = Code{Code: 0x81, Reason: "malformed packet: offset boolean out of range"} + ErrMalformedInvalidUTF8 = Code{Code: 0x81, Reason: "malformed packet: invalid utf-8 string"} + ErrMalformedVariableByteInteger = Code{Code: 0x81, Reason: "malformed packet: variable byte integer out of range"} + ErrMalformedBadProperty = Code{Code: 0x81, Reason: "malformed packet: unknown property"} + ErrMalformedProperties = Code{Code: 0x81, Reason: "malformed packet: properties"} + ErrMalformedWillProperties = Code{Code: 0x81, Reason: "malformed packet: will properties"} + ErrMalformedSessionPresent = Code{Code: 0x81, Reason: "malformed packet: session present"} + ErrMalformedReasonCode = Code{Code: 0x81, Reason: "malformed packet: reason code"} + ErrProtocolViolation = Code{Code: 0x82, Reason: "protocol violation"} + ErrProtocolViolationProtocolName = Code{Code: 0x82, Reason: "protocol violation: protocol name"} + ErrProtocolViolationProtocolVersion = Code{Code: 0x82, Reason: "protocol violation: protocol version"} + ErrProtocolViolationReservedBit = Code{Code: 0x82, Reason: "protocol violation: reserved bit not 0"} + ErrProtocolViolationFlagNoUsername = Code{Code: 0x82, Reason: "protocol violation: username flag set but no value"} + ErrProtocolViolationFlagNoPassword = Code{Code: 0x82, Reason: "protocol violation: password flag set but no value"} + ErrProtocolViolationUsernameNoFlag = Code{Code: 0x82, Reason: "protocol violation: username set but no flag"} + ErrProtocolViolationPasswordNoFlag = Code{Code: 0x82, Reason: "protocol violation: username set but no flag"} + ErrProtocolViolationPasswordTooLong = Code{Code: 0x82, Reason: "protocol violation: password too long"} + ErrProtocolViolationUsernameTooLong = Code{Code: 0x82, Reason: "protocol violation: username too long"} + ErrProtocolViolationNoPacketID = Code{Code: 0x82, Reason: "protocol violation: missing packet id"} + ErrProtocolViolationSurplusPacketID = Code{Code: 0x82, Reason: "protocol violation: surplus packet id"} + ErrProtocolViolationQosOutOfRange = Code{Code: 0x82, Reason: "protocol violation: qos out of range"} + ErrProtocolViolationSecondConnect = Code{Code: 0x82, Reason: "protocol violation: second connect packet"} + ErrProtocolViolationZeroNonZeroExpiry = Code{Code: 0x82, Reason: "protocol violation: non-zero expiry"} + ErrProtocolViolationRequireFirstConnect = Code{Code: 0x82, Reason: "protocol violation: first packet must be connect"} + ErrProtocolViolationWillFlagNoPayload = Code{Code: 0x82, Reason: "protocol violation: will flag no payload"} + ErrProtocolViolationWillFlagSurplusRetain = Code{Code: 0x82, Reason: "protocol violation: will flag surplus retain"} + ErrProtocolViolationSurplusWildcard = Code{Code: 0x82, Reason: "protocol violation: topic contains wildcards"} + ErrProtocolViolationSurplusSubID = Code{Code: 0x82, Reason: "protocol violation: contained subscription identifier"} + ErrProtocolViolationInvalidTopic = Code{Code: 0x82, Reason: "protocol violation: invalid topic"} + ErrProtocolViolationInvalidSharedNoLocal = Code{Code: 0x82, Reason: "protocol violation: invalid shared no local"} + ErrProtocolViolationNoFilters = Code{Code: 0x82, Reason: "protocol violation: must contain at least one filter"} + ErrProtocolViolationInvalidReason = Code{Code: 0x82, Reason: "protocol violation: invalid reason"} + ErrProtocolViolationOversizeSubID = Code{Code: 0x82, Reason: "protocol violation: oversize subscription id"} + ErrProtocolViolationDupNoQos = Code{Code: 0x82, Reason: "protocol violation: dup true with no qos"} + ErrProtocolViolationUnsupportedProperty = Code{Code: 0x82, Reason: "protocol violation: unsupported property"} + ErrProtocolViolationNoTopic = Code{Code: 0x82, Reason: "protocol violation: no topic or alias"} + ErrImplementationSpecificError = Code{Code: 0x83, Reason: "implementation specific error"} + ErrRejectPacket = Code{Code: 0x83, Reason: "packet rejected"} + ErrUnsupportedProtocolVersion = Code{Code: 0x84, Reason: "unsupported protocol version"} + ErrClientIdentifierNotValid = Code{Code: 0x85, Reason: "client identifier not valid"} + ErrClientIdentifierTooLong = Code{Code: 0x85, Reason: "client identifier too long"} + ErrBadUsernameOrPassword = Code{Code: 0x86, Reason: "bad username or password"} + ErrNotAuthorized = Code{Code: 0x87, Reason: "not authorized"} + ErrServerUnavailable = Code{Code: 0x88, Reason: "server unavailable"} + ErrServerBusy = Code{Code: 0x89, Reason: "server busy"} + ErrBanned = Code{Code: 0x8A, Reason: "banned"} + ErrServerShuttingDown = Code{Code: 0x8B, Reason: "server shutting down"} + ErrBadAuthenticationMethod = Code{Code: 0x8C, Reason: "bad authentication method"} + ErrKeepAliveTimeout = Code{Code: 0x8D, Reason: "keep alive timeout"} + ErrSessionTakenOver = Code{Code: 0x8E, Reason: "session takeover"} + ErrTopicFilterInvalid = Code{Code: 0x8F, Reason: "topic filter invalid"} + ErrTopicNameInvalid = Code{Code: 0x90, Reason: "topic name invalid"} + ErrPacketIdentifierInUse = Code{Code: 0x91, Reason: "packet identifier in use"} + ErrPacketIdentifierNotFound = Code{Code: 0x92, Reason: "packet identifier not found"} + ErrReceiveMaximum = Code{Code: 0x93, Reason: "receive maximum exceeded"} + ErrTopicAliasInvalid = Code{Code: 0x94, Reason: "topic alias invalid"} + ErrPacketTooLarge = Code{Code: 0x95, Reason: "packet too large"} + ErrMessageRateTooHigh = Code{Code: 0x96, Reason: "message rate too high"} + ErrQuotaExceeded = Code{Code: 0x97, Reason: "quota exceeded"} + ErrPendingClientWritesExceeded = Code{Code: 0x97, Reason: "too many pending writes"} + ErrAdministrativeAction = Code{Code: 0x98, Reason: "administrative action"} + ErrPayloadFormatInvalid = Code{Code: 0x99, Reason: "payload format invalid"} + ErrRetainNotSupported = Code{Code: 0x9A, Reason: "retain not supported"} + ErrQosNotSupported = Code{Code: 0x9B, Reason: "qos not supported"} + ErrUseAnotherServer = Code{Code: 0x9C, Reason: "use another server"} + ErrServerMoved = Code{Code: 0x9D, Reason: "server moved"} + ErrSharedSubscriptionsNotSupported = Code{Code: 0x9E, Reason: "shared subscriptions not supported"} + ErrConnectionRateExceeded = Code{Code: 0x9F, Reason: "connection rate exceeded"} + ErrMaxConnectTime = Code{Code: 0xA0, Reason: "maximum connect time"} + ErrSubscriptionIdentifiersNotSupported = Code{Code: 0xA1, Reason: "subscription identifiers not supported"} + ErrWildcardSubscriptionsNotSupported = Code{Code: 0xA2, Reason: "wildcard subscriptions not supported"} + + // MQTTv3 specific bytes. + Err3UnsupportedProtocolVersion = Code{Code: 0x01} + Err3ClientIdentifierNotValid = Code{Code: 0x02} + Err3ServerUnavailable = Code{Code: 0x03} + ErrMalformedUsernameOrPassword = Code{Code: 0x04} + Err3NotAuthorized = Code{Code: 0x05} + + // V5CodesToV3 maps MQTTv5 Connack reason codes to MQTTv3 return codes. + // This is required because MQTTv3 has different return byte specification. + // See http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc385349257 + V5CodesToV3 = map[Code]Code{ + ErrUnsupportedProtocolVersion: Err3UnsupportedProtocolVersion, + ErrClientIdentifierNotValid: Err3ClientIdentifierNotValid, + ErrServerUnavailable: Err3ServerUnavailable, + ErrMalformedUsername: ErrMalformedUsernameOrPassword, + ErrMalformedPassword: ErrMalformedUsernameOrPassword, + ErrBadUsernameOrPassword: Err3NotAuthorized, + } +) diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go b/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go new file mode 100644 index 00000000..eb20451b --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/fixedheader.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" +) + +// FixedHeader contains the values of the fixed header portion of the MQTT packet. +type FixedHeader struct { + Remaining int `json:"remaining"` // the number of remaining bytes in the payload. + Type byte `json:"type"` // the type of the packet (PUBLISH, SUBSCRIBE, etc) from bits 7 - 4 (byte 1). + Qos byte `json:"qos"` // indicates the quality of service expected. + Dup bool `json:"dup"` // indicates if the packet was already sent at an earlier time. + Retain bool `json:"retain"` // whether the message should be retained. +} + +// Encode encodes the FixedHeader and returns a bytes buffer. +func (fh *FixedHeader) Encode(buf *bytes.Buffer) { + buf.WriteByte(fh.Type<<4 | encodeBool(fh.Dup)<<3 | fh.Qos<<1 | encodeBool(fh.Retain)) + encodeLength(buf, int64(fh.Remaining)) +} + +// Decode extracts the specification bits from the header byte. +func (fh *FixedHeader) Decode(hb byte) error { + fh.Type = hb >> 4 // Get the message type from the first 4 bytes. + + switch fh.Type { + case Publish: + if (hb>>1)&0x01 > 0 && (hb>>1)&0x02 > 0 { + return ErrProtocolViolationQosOutOfRange // [MQTT-3.3.1-4] + } + + fh.Dup = (hb>>3)&0x01 > 0 // is duplicate + fh.Qos = (hb >> 1) & 0x03 // qos flag + fh.Retain = hb&0x01 > 0 // is retain flag + case Pubrel: + fallthrough + case Subscribe: + fallthrough + case Unsubscribe: + if (hb>>0)&0x01 != 0 || (hb>>1)&0x01 != 1 || (hb>>2)&0x01 != 0 || (hb>>3)&0x01 != 0 { // [MQTT-3.8.1-1] [MQTT-3.10.1-1] + return ErrMalformedFlags + } + + fh.Qos = (hb >> 1) & 0x03 + default: + if (hb>>0)&0x01 != 0 || + (hb>>1)&0x01 != 0 || + (hb>>2)&0x01 != 0 || + (hb>>3)&0x01 != 0 { // [MQTT-3.8.3-5] [MQTT-3.14.1-1] [MQTT-3.15.1-1] + return ErrMalformedFlags + } + } + + if fh.Qos == 0 && fh.Dup { + return ErrProtocolViolationDupNoQos // [MQTT-3.3.1-2] + } + + return nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go b/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go new file mode 100644 index 00000000..2611bcb4 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/packets.go @@ -0,0 +1,1148 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" +) + +// All of the valid packet types and their packet identifier. +const ( + Reserved byte = iota // 0 - we use this in packet tests to indicate special-test or all packets. + Connect // 1 + Connack // 2 + Publish // 3 + Puback // 4 + Pubrec // 5 + Pubrel // 6 + Pubcomp // 7 + Subscribe // 8 + Suback // 9 + Unsubscribe // 10 + Unsuback // 11 + Pingreq // 12 + Pingresp // 13 + Disconnect // 14 + Auth // 15 + WillProperties byte = 99 // Special byte for validating Will Properties. +) + +var ( + // ErrNoValidPacketAvailable indicates the packet type byte provided does not exist in the mqtt specification. + ErrNoValidPacketAvailable error = errors.New("no valid packet available") + + // PacketNames is a map of packet bytes to human readable names, for easier debugging. + PacketNames = map[byte]string{ + 0: "Reserved", + 1: "Connect", + 2: "Connack", + 3: "Publish", + 4: "Puback", + 5: "Pubrec", + 6: "Pubrel", + 7: "Pubcomp", + 8: "Subscribe", + 9: "Suback", + 10: "Unsubscribe", + 11: "Unsuback", + 12: "Pingreq", + 13: "Pingresp", + 14: "Disconnect", + 15: "Auth", + } +) + +// Packets is a concurrency safe map of packets. +type Packets struct { + internal map[string]Packet + sync.RWMutex +} + +// NewPackets returns a new instance of Packets. +func NewPackets() *Packets { + return &Packets{ + internal: map[string]Packet{}, + } +} + +// Add adds a new packet to the map. +func (p *Packets) Add(id string, val Packet) { + p.Lock() + defer p.Unlock() + p.internal[id] = val +} + +// GetAll returns all packets in the map. +func (p *Packets) GetAll() map[string]Packet { + p.RLock() + defer p.RUnlock() + m := map[string]Packet{} + for k, v := range p.internal { + m[k] = v + } + return m +} + +// Get returns a specific packet in the map by packet id. +func (p *Packets) Get(id string) (val Packet, ok bool) { + p.RLock() + defer p.RUnlock() + val, ok = p.internal[id] + return val, ok +} + +// Len returns the number of packets in the map. +func (p *Packets) Len() int { + p.RLock() + defer p.RUnlock() + val := len(p.internal) + return val +} + +// Delete removes a packet from the map by packet id. +func (p *Packets) Delete(id string) { + p.Lock() + defer p.Unlock() + delete(p.internal, id) +} + +// Packet represents an MQTT packet. Instead of providing a packet interface +// variant packet structs, this is a single concrete packet type to cover all packet +// types, which allows us to take advantage of various compiler optimizations. It +// contains a combination of mqtt spec values and internal broker control codes. +type Packet struct { + Connect ConnectParams // parameters for connect packets (just for organisation) + Properties Properties // all mqtt v5 packet properties + Payload []byte // a message/payload for publish packets + ReasonCodes []byte // one or more reason codes for multi-reason responses (suback, etc) + Filters Subscriptions // a list of subscription filters and their properties (subscribe, unsubscribe) + TopicName string // the topic a payload is being published to + Origin string // client id of the client who is issuing the packet (mostly internal use) + FixedHeader FixedHeader // - + Created int64 // unix timestamp indicating time packet was created/received on the server + Expiry int64 // unix timestamp indicating when the packet will expire and should be deleted + Mods Mods // internal broker control values for controlling certain mqtt v5 compliance + PacketID uint16 // packet id for the packet (publish, qos, etc) + ProtocolVersion byte // protocol version of the client the packet belongs to + SessionPresent bool // session existed for connack + ReasonCode byte // reason code for a packet response (acks, etc) + ReservedBit byte // reserved, do not use (except in testing) + Ignore bool // if true, do not perform any message forwarding operations +} + +// Mods specifies certain values required for certain mqtt v5 compliance within packet encoding/decoding. +type Mods struct { + MaxSize uint32 // the maximum packet size specified by the client / server + DisallowProblemInfo bool // if problem info is disallowed + AllowResponseInfo bool // if response info is disallowed +} + +// ConnectParams contains packet values which are specifically related to connect packets. +type ConnectParams struct { + WillProperties Properties `json:"willProperties"` // - + Password []byte `json:"password"` // - + Username []byte `json:"username"` // - + ProtocolName []byte `json:"protocolName"` // - + WillPayload []byte `json:"willPayload"` // - + ClientIdentifier string `json:"clientId"` // - + WillTopic string `json:"willTopic"` // - + Keepalive uint16 `json:"keepalive"` // - + PasswordFlag bool `json:"passwordFlag"` // - + UsernameFlag bool `json:"usernameFlag"` // - + WillQos byte `json:"willQos"` // - + WillFlag bool `json:"willFlag"` // - + WillRetain bool `json:"willRetain"` // - + Clean bool `json:"clean"` // CleanSession in v3.1.1, CleanStart in v5 +} + +// Subscriptions is a slice of Subscription. +type Subscriptions []Subscription // must be a slice to retain order. + +// Subscription contains details about a client subscription to a topic filter. +type Subscription struct { + ShareName []string + Filter string + Identifier int + Identifiers map[string]int + RetainHandling byte + Qos byte + RetainAsPublished bool + NoLocal bool + FwdRetainedFlag bool // true if the subscription forms part of a publish response to a client subscription and packet is retained. +} + +// Copy creates a new instance of a packet, but with an empty header for inheriting new QoS flags, etc. +func (pk *Packet) Copy(allowTransfer bool) Packet { + p := Packet{ + FixedHeader: FixedHeader{ + Remaining: pk.FixedHeader.Remaining, + Type: pk.FixedHeader.Type, + Retain: pk.FixedHeader.Retain, + Dup: false, // [MQTT-4.3.1-1] [MQTT-4.3.2-2] + Qos: pk.FixedHeader.Qos, + }, + Mods: Mods{ + MaxSize: pk.Mods.MaxSize, + }, + ReservedBit: pk.ReservedBit, + ProtocolVersion: pk.ProtocolVersion, + Connect: ConnectParams{ + ClientIdentifier: pk.Connect.ClientIdentifier, + Keepalive: pk.Connect.Keepalive, + WillQos: pk.Connect.WillQos, + WillTopic: pk.Connect.WillTopic, + WillFlag: pk.Connect.WillFlag, + WillRetain: pk.Connect.WillRetain, + WillProperties: pk.Connect.WillProperties.Copy(allowTransfer), + Clean: pk.Connect.Clean, + }, + TopicName: pk.TopicName, + Properties: pk.Properties.Copy(allowTransfer), + SessionPresent: pk.SessionPresent, + ReasonCode: pk.ReasonCode, + Filters: pk.Filters, + Created: pk.Created, + Expiry: pk.Expiry, + Origin: pk.Origin, + } + + if allowTransfer { + p.PacketID = pk.PacketID + } + + if len(pk.Connect.ProtocolName) > 0 { + p.Connect.ProtocolName = append([]byte{}, pk.Connect.ProtocolName...) + } + + if len(pk.Connect.Password) > 0 { + p.Connect.PasswordFlag = true + p.Connect.Password = append([]byte{}, pk.Connect.Password...) + } + + if len(pk.Connect.Username) > 0 { + p.Connect.UsernameFlag = true + p.Connect.Username = append([]byte{}, pk.Connect.Username...) + } + + if len(pk.Connect.WillPayload) > 0 { + p.Connect.WillPayload = append([]byte{}, pk.Connect.WillPayload...) + } + + if len(pk.Payload) > 0 { + p.Payload = append([]byte{}, pk.Payload...) + } + + if len(pk.ReasonCodes) > 0 { + p.ReasonCodes = append([]byte{}, pk.ReasonCodes...) + } + + return p +} + +// Merge merges a new subscription with a base subscription, preserving the highest +// qos value, matched identifiers and any special properties. +func (s Subscription) Merge(n Subscription) Subscription { + if s.Identifiers == nil { + s.Identifiers = map[string]int{ + s.Filter: s.Identifier, + } + } + + if n.Identifier > 0 { + s.Identifiers[n.Filter] = n.Identifier + } + + if n.Qos > s.Qos { + s.Qos = n.Qos // [MQTT-3.3.4-2] + } + + if n.NoLocal { + s.NoLocal = true // [MQTT-3.8.3-3] + } + + return s +} + +// encode encodes a subscription and properties into bytes. +func (p Subscription) encode() byte { + var flag byte + flag |= p.Qos + + if p.NoLocal { + flag |= 1 << 2 + } + + if p.RetainAsPublished { + flag |= 1 << 3 + } + + flag |= p.RetainHandling << 4 + return flag +} + +// decode decodes subscription bytes into a subscription struct. +func (p *Subscription) decode(b byte) { + p.Qos = b & 3 // byte + p.NoLocal = 1&(b>>2) > 0 // bool + p.RetainAsPublished = 1&(b>>3) > 0 // bool + p.RetainHandling = 3 & (b >> 4) // byte +} + +// ConnectEncode encodes a connect packet. +func (pk *Packet) ConnectEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeBytes(pk.Connect.ProtocolName)) + nb.WriteByte(pk.ProtocolVersion) + + nb.WriteByte( + encodeBool(pk.Connect.Clean)<<1 | + encodeBool(pk.Connect.WillFlag)<<2 | + pk.Connect.WillQos<<3 | + encodeBool(pk.Connect.WillRetain)<<5 | + encodeBool(pk.Connect.PasswordFlag)<<6 | + encodeBool(pk.Connect.UsernameFlag)<<7 | + 0, // [MQTT-2.1.3-1] + ) + + nb.Write(encodeUint16(pk.Connect.Keepalive)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + (&pk.Properties).Encode(pk.FixedHeader.Type, pk.Mods, pb, 0) + nb.Write(pb.Bytes()) + } + + nb.Write(encodeString(pk.Connect.ClientIdentifier)) + + if pk.Connect.WillFlag { + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + (&pk.Connect).WillProperties.Encode(WillProperties, pk.Mods, pb, 0) + nb.Write(pb.Bytes()) + } + + nb.Write(encodeString(pk.Connect.WillTopic)) + nb.Write(encodeBytes(pk.Connect.WillPayload)) + } + + if pk.Connect.UsernameFlag { + nb.Write(encodeBytes(pk.Connect.Username)) + } + + if pk.Connect.PasswordFlag { + nb.Write(encodeBytes(pk.Connect.Password)) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// ConnectDecode decodes a connect packet. +func (pk *Packet) ConnectDecode(buf []byte) error { + var offset int + var err error + + pk.Connect.ProtocolName, offset, err = decodeBytes(buf, 0) + if err != nil { + return ErrMalformedProtocolName + } + + pk.ProtocolVersion, offset, err = decodeByte(buf, offset) + if err != nil { + return ErrMalformedProtocolVersion + } + + flags, offset, err := decodeByte(buf, offset) + if err != nil { + return ErrMalformedFlags + } + + pk.ReservedBit = 1 & flags + pk.Connect.Clean = 1&(flags>>1) > 0 + pk.Connect.WillFlag = 1&(flags>>2) > 0 + pk.Connect.WillQos = 3 & (flags >> 3) // this one is not a bool + pk.Connect.WillRetain = 1&(flags>>5) > 0 + pk.Connect.PasswordFlag = 1&(flags>>6) > 0 + pk.Connect.UsernameFlag = 1&(flags>>7) > 0 + + pk.Connect.Keepalive, offset, err = decodeUint16(buf, offset) + if err != nil { + return ErrMalformedKeepalive + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + pk.Connect.ClientIdentifier, offset, err = decodeString(buf, offset) // [MQTT-3.1.3-1] [MQTT-3.1.3-2] [MQTT-3.1.3-3] [MQTT-3.1.3-4] + if err != nil { + return ErrClientIdentifierNotValid // [MQTT-3.1.3-8] + } + + if pk.Connect.WillFlag { // [MQTT-3.1.2-7] + if pk.ProtocolVersion == 5 { + n, err := pk.Connect.WillProperties.Decode(WillProperties, bytes.NewBuffer(buf[offset:])) + if err != nil { + return ErrMalformedWillProperties + } + offset += n + } + + pk.Connect.WillTopic, offset, err = decodeString(buf, offset) + if err != nil { + return ErrMalformedWillTopic + } + + pk.Connect.WillPayload, offset, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedWillPayload + } + } + + if pk.Connect.UsernameFlag { // [MQTT-3.1.3-12] + if offset >= len(buf) { // we are at the end of the packet + return ErrProtocolViolationFlagNoUsername // [MQTT-3.1.2-17] + } + + pk.Connect.Username, offset, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedUsername + } + } + + if pk.Connect.PasswordFlag { + pk.Connect.Password, _, err = decodeBytes(buf, offset) + if err != nil { + return ErrMalformedPassword + } + } + + return nil +} + +// ConnectValidate ensures the connect packet is compliant. +func (pk *Packet) ConnectValidate() Code { + if !bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'I', 's', 'd', 'p'}) && !bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'T', 'T'}) { + return ErrProtocolViolationProtocolName // [MQTT-3.1.2-1] + } + + if (bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'I', 's', 'd', 'p'}) && pk.ProtocolVersion != 3) || + (bytes.Equal(pk.Connect.ProtocolName, []byte{'M', 'Q', 'T', 'T'}) && pk.ProtocolVersion != 4 && pk.ProtocolVersion != 5) { + return ErrProtocolViolationProtocolVersion // [MQTT-3.1.2-2] + } + + if pk.ReservedBit != 0 { + return ErrProtocolViolationReservedBit // [MQTT-3.1.2-3] + } + + if len(pk.Connect.Password) > math.MaxUint16 { + return ErrProtocolViolationPasswordTooLong + } + + if len(pk.Connect.Username) > math.MaxUint16 { + return ErrProtocolViolationUsernameTooLong + } + + if !pk.Connect.UsernameFlag && len(pk.Connect.Username) > 0 { + return ErrProtocolViolationUsernameNoFlag // [MQTT-3.1.2-16] + } + + if pk.Connect.PasswordFlag && len(pk.Connect.Password) == 0 { + return ErrProtocolViolationFlagNoPassword // [MQTT-3.1.2-19] + } + + if !pk.Connect.PasswordFlag && len(pk.Connect.Password) > 0 { + return ErrProtocolViolationPasswordNoFlag // [MQTT-3.1.2-18] + } + + if len(pk.Connect.ClientIdentifier) > math.MaxUint16 { + return ErrClientIdentifierNotValid + } + + if pk.Connect.WillFlag { + if len(pk.Connect.WillPayload) == 0 || pk.Connect.WillTopic == "" { + return ErrProtocolViolationWillFlagNoPayload // [MQTT-3.1.2-9] + } + + if pk.Connect.WillQos > 2 { + return ErrProtocolViolationQosOutOfRange // [MQTT-3.1.2-12] + } + } + + if !pk.Connect.WillFlag && pk.Connect.WillRetain { + return ErrProtocolViolationWillFlagSurplusRetain // [MQTT-3.1.2-13] + } + + return CodeSuccess +} + +// ConnackEncode encodes a Connack packet. +func (pk *Packet) ConnackEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.WriteByte(encodeBool(pk.SessionPresent)) + nb.WriteByte(pk.ReasonCode) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+2) // +SessionPresent +ReasonCode + nb.Write(pb.Bytes()) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + return nil +} + +// ConnackDecode decodes a Connack packet. +func (pk *Packet) ConnackDecode(buf []byte) error { + var offset int + var err error + + pk.SessionPresent, offset, err = decodeByteBool(buf, 0) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedSessionPresent) + } + + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.ProtocolVersion == 5 { + _, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + + return nil +} + +// DisconnectEncode encodes a Disconnect packet. +func (pk *Packet) DisconnectEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + + if pk.ProtocolVersion == 5 { + nb.WriteByte(pk.ReasonCode) + + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// DisconnectDecode decodes a Disconnect packet. +func (pk *Packet) DisconnectDecode(buf []byte) error { + if pk.ProtocolVersion == 5 && pk.FixedHeader.Remaining > 1 { + var err error + var offset int + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.FixedHeader.Remaining > 2 { + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + } + + return nil +} + +// PingreqEncode encodes a Pingreq packet. +func (pk *Packet) PingreqEncode(buf *bytes.Buffer) error { + pk.FixedHeader.Encode(buf) + return nil +} + +// PingreqDecode decodes a Pingreq packet. +func (pk *Packet) PingreqDecode(buf []byte) error { + return nil +} + +// PingrespEncode encodes a Pingresp packet. +func (pk *Packet) PingrespEncode(buf *bytes.Buffer) error { + pk.FixedHeader.Encode(buf) + return nil +} + +// PingrespDecode decodes a Pingres packet. +func (pk *Packet) PingrespDecode(buf []byte) error { + return nil +} + +// PublishEncode encodes a Publish packet. +func (pk *Packet) PublishEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + + nb.Write(encodeString(pk.TopicName)) // [MQTT-3.3.2-1] + + if pk.FixedHeader.Qos > 0 { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-2] + } + nb.Write(encodeUint16(pk.PacketID)) + } + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.Payload)) + nb.Write(pb.Bytes()) + } + + nb.Write(pk.Payload) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// PublishDecode extracts the data values from the packet. +func (pk *Packet) PublishDecode(buf []byte) error { + var offset int + var err error + + pk.TopicName, offset, err = decodeString(buf, 0) // [MQTT-3.3.2-1] + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedTopic) + } + + if pk.FixedHeader.Qos > 0 { + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + offset += n + } + + pk.Payload = buf[offset:] + + return nil +} + +// PublishValidate validates a publish packet. +func (pk *Packet) PublishValidate(topicAliasMaximum uint16) Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if pk.FixedHeader.Qos == 0 && pk.PacketID > 0 { + return ErrProtocolViolationSurplusPacketID // [MQTT-2.2.1-2] + } + + if strings.ContainsAny(pk.TopicName, "+#") { + return ErrProtocolViolationSurplusWildcard // [MQTT-3.3.2-2] + } + + if pk.Properties.TopicAlias > topicAliasMaximum { + return ErrTopicAliasInvalid // [MQTT-3.2.2-17] [MQTT-3.3.2-9] ~[MQTT-3.3.2-10] [MQTT-3.3.2-12] + } + + if pk.TopicName == "" && pk.Properties.TopicAlias == 0 { + return ErrProtocolViolationNoTopic // ~[MQTT-3.3.2-8] + } + + if pk.Properties.TopicAliasFlag && pk.Properties.TopicAlias == 0 { + return ErrTopicAliasInvalid // [MQTT-3.3.2-8] + } + + if len(pk.Properties.SubscriptionIdentifier) > 0 { + return ErrProtocolViolationSurplusSubID // [MQTT-3.3.4-6] + } + + return CodeSuccess +} + +// encodePubAckRelRecComp encodes a Puback, Pubrel, Pubrec, or Pubcomp packet. +func (pk *Packet) encodePubAckRelRecComp(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + if pk.ReasonCode >= ErrUnspecifiedError.Code || pb.Len() > 1 { + nb.WriteByte(pk.ReasonCode) + } + + if pb.Len() > 1 { + nb.Write(pb.Bytes()) + } + } + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + return nil +} + +// decode extracts the data values from a Puback, Pubrel, Pubrec, or Pubcomp packet. +func (pk *Packet) decodePubAckRelRecComp(buf []byte) error { + var offset int + var err error + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 && pk.FixedHeader.Remaining > 2 { + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + if pk.FixedHeader.Remaining > 3 { + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + } + } + + return nil +} + +// PubackEncode encodes a Puback packet. +func (pk *Packet) PubackEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubackDecode decodes a Puback packet. +func (pk *Packet) PubackDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubcompEncode encodes a Pubcomp packet. +func (pk *Packet) PubcompEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubcompDecode decodes a Pubcomp packet. +func (pk *Packet) PubcompDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubrecEncode encodes a Pubrec packet. +func (pk *Packet) PubrecEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubrecDecode decodes a Pubrec packet. +func (pk *Packet) PubrecDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// PubrelEncode encodes a Pubrel packet. +func (pk *Packet) PubrelEncode(buf *bytes.Buffer) error { + return pk.encodePubAckRelRecComp(buf) +} + +// PubrelDecode decodes a Pubrel packet. +func (pk *Packet) PubrelDecode(buf []byte) error { + return pk.decodePubAckRelRecComp(buf) +} + +// ReasonCodeValid returns true if the provided reason code is valid for the packet type. +func (pk *Packet) ReasonCodeValid() bool { + switch pk.FixedHeader.Type { + case Pubrec: + return bytes.Contains([]byte{ + CodeSuccess.Code, + CodeNoMatchingSubscribers.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicNameInvalid.Code, + ErrPacketIdentifierInUse.Code, + ErrQuotaExceeded.Code, + ErrPayloadFormatInvalid.Code, + }, []byte{pk.ReasonCode}) + case Pubrel: + fallthrough + case Pubcomp: + return bytes.Contains([]byte{ + CodeSuccess.Code, + ErrPacketIdentifierNotFound.Code, + }, []byte{pk.ReasonCode}) + case Suback: + return bytes.Contains([]byte{ + CodeGrantedQos0.Code, + CodeGrantedQos1.Code, + CodeGrantedQos2.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicFilterInvalid.Code, + ErrPacketIdentifierInUse.Code, + ErrQuotaExceeded.Code, + ErrSharedSubscriptionsNotSupported.Code, + ErrSubscriptionIdentifiersNotSupported.Code, + ErrWildcardSubscriptionsNotSupported.Code, + }, []byte{pk.ReasonCode}) + case Unsuback: + return bytes.Contains([]byte{ + CodeSuccess.Code, + CodeNoSubscriptionExisted.Code, + ErrUnspecifiedError.Code, + ErrImplementationSpecificError.Code, + ErrNotAuthorized.Code, + ErrTopicFilterInvalid.Code, + ErrPacketIdentifierInUse.Code, + }, []byte{pk.ReasonCode}) + } + + return true +} + +// SubackEncode encodes a Suback packet. +func (pk *Packet) SubackEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.ReasonCodes)) + nb.Write(pb.Bytes()) + } + + nb.Write(pk.ReasonCodes) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// SubackDecode decodes a Suback packet. +func (pk *Packet) SubackDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + pk.ReasonCodes = buf[offset:] + + return nil +} + +// SubscribeEncode encodes a Subscribe packet. +func (pk *Packet) SubscribeEncode(buf *bytes.Buffer) error { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID + } + + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + xb := bytes.NewBuffer([]byte{}) // capture and write filters after length checks + for _, opts := range pk.Filters { + xb.Write(encodeString(opts.Filter)) // [MQTT-3.8.3-1] + if pk.ProtocolVersion == 5 { + xb.WriteByte(opts.encode()) + } else { + xb.WriteByte(opts.Qos) + } + } + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(xb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// SubscribeDecode decodes a Subscribe packet. +func (pk *Packet) SubscribeDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return ErrMalformedPacketID + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + var filter string + pk.Filters = Subscriptions{} + for offset < len(buf) { + filter, offset, err = decodeString(buf, offset) // [MQTT-3.8.3-1] + if err != nil { + return ErrMalformedTopic + } + + var option byte + sub := &Subscription{ + Filter: filter, + } + + if pk.ProtocolVersion == 5 { + sub.decode(buf[offset]) + offset += 1 + } else { + option, offset, err = decodeByte(buf, offset) + if err != nil { + return ErrMalformedQos + } + sub.Qos = option + } + + if len(pk.Properties.SubscriptionIdentifier) > 0 { + sub.Identifier = pk.Properties.SubscriptionIdentifier[0] + } + + if sub.Qos > 2 { + return ErrProtocolViolationQosOutOfRange + } + + pk.Filters = append(pk.Filters, *sub) + } + + return nil +} + +// SubscribeValidate ensures the packet is compliant. +func (pk *Packet) SubscribeValidate() Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if len(pk.Filters) == 0 { + return ErrProtocolViolationNoFilters // [MQTT-3.10.3-2] + } + + for _, v := range pk.Filters { + if v.Identifier > 268435455 { // 3.3.2.3.8 The Subscription Identifier can have the value of 1 to 268,435,455. + return ErrProtocolViolationOversizeSubID // + } + } + + return CodeSuccess +} + +// UnsubackEncode encodes an Unsuback packet. +func (pk *Packet) UnsubackEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(pk.ReasonCodes) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// UnsubackDecode decodes an Unsuback packet. +func (pk *Packet) UnsubackDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + offset += n + + pk.ReasonCodes = buf[offset:] + } + + return nil +} + +// UnsubscribeEncode encodes an Unsubscribe packet. +func (pk *Packet) UnsubscribeEncode(buf *bytes.Buffer) error { + if pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID + } + + nb := bytes.NewBuffer([]byte{}) + nb.Write(encodeUint16(pk.PacketID)) + + xb := bytes.NewBuffer([]byte{}) // capture filters and write after length checks + for _, sub := range pk.Filters { + xb.Write(encodeString(sub.Filter)) // [MQTT-3.10.3-1] + } + + if pk.ProtocolVersion == 5 { + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len()) + nb.Write(pb.Bytes()) + } + + nb.Write(xb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + + return nil +} + +// UnsubscribeDecode decodes an Unsubscribe packet. +func (pk *Packet) UnsubscribeDecode(buf []byte) error { + var offset int + var err error + + pk.PacketID, offset, err = decodeUint16(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedPacketID) + } + + if pk.ProtocolVersion == 5 { + n, err := pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + offset += n + } + + var filter string + pk.Filters = Subscriptions{} + for offset < len(buf) { + filter, offset, err = decodeString(buf, offset) // [MQTT-3.10.3-1] + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedTopic) + } + pk.Filters = append(pk.Filters, Subscription{Filter: filter}) + } + + return nil +} + +// UnsubscribeValidate validates an Unsubscribe packet. +func (pk *Packet) UnsubscribeValidate() Code { + if pk.FixedHeader.Qos > 0 && pk.PacketID == 0 { + return ErrProtocolViolationNoPacketID // [MQTT-2.2.1-3] [MQTT-2.2.1-4] + } + + if len(pk.Filters) == 0 { + return ErrProtocolViolationNoFilters // [MQTT-3.10.3-2] + } + + return CodeSuccess +} + +// AuthEncode encodes an Auth packet. +func (pk *Packet) AuthEncode(buf *bytes.Buffer) error { + nb := bytes.NewBuffer([]byte{}) + nb.WriteByte(pk.ReasonCode) + + pb := bytes.NewBuffer([]byte{}) + pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()) + nb.Write(pb.Bytes()) + + pk.FixedHeader.Remaining = nb.Len() + pk.FixedHeader.Encode(buf) + nb.WriteTo(buf) + return nil +} + +// AuthDecode decodes an Auth packet. +func (pk *Packet) AuthDecode(buf []byte) error { + var offset int + var err error + + pk.ReasonCode, offset, err = decodeByte(buf, offset) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedReasonCode) + } + + _, err = pk.Properties.Decode(pk.FixedHeader.Type, bytes.NewBuffer(buf[offset:])) + if err != nil { + return fmt.Errorf("%s: %w", err, ErrMalformedProperties) + } + + return nil +} + +// AuthValidate returns success if the auth packet is valid. +func (pk *Packet) AuthValidate() Code { + if pk.ReasonCode != CodeSuccess.Code && + pk.ReasonCode != CodeContinueAuthentication.Code && + pk.ReasonCode != CodeReAuthenticate.Code { + return ErrProtocolViolationInvalidReason // [MQTT-3.15.2-1] + } + + return CodeSuccess +} + +// FormatID returns the PacketID field as a decimal integer. +func (pk *Packet) FormatID() string { + return strconv.FormatUint(uint64(pk.PacketID), 10) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go b/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go new file mode 100644 index 00000000..c5eefc1a --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/properties.go @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +import ( + "bytes" + "fmt" + "strings" +) + +const ( + PropPayloadFormat byte = 1 + PropMessageExpiryInterval byte = 2 + PropContentType byte = 3 + PropResponseTopic byte = 8 + PropCorrelationData byte = 9 + PropSubscriptionIdentifier byte = 11 + PropSessionExpiryInterval byte = 17 + PropAssignedClientID byte = 18 + PropServerKeepAlive byte = 19 + PropAuthenticationMethod byte = 21 + PropAuthenticationData byte = 22 + PropRequestProblemInfo byte = 23 + PropWillDelayInterval byte = 24 + PropRequestResponseInfo byte = 25 + PropResponseInfo byte = 26 + PropServerReference byte = 28 + PropReasonString byte = 31 + PropReceiveMaximum byte = 33 + PropTopicAliasMaximum byte = 34 + PropTopicAlias byte = 35 + PropMaximumQos byte = 36 + PropRetainAvailable byte = 37 + PropUser byte = 38 + PropMaximumPacketSize byte = 39 + PropWildcardSubAvailable byte = 40 + PropSubIDAvailable byte = 41 + PropSharedSubAvailable byte = 42 +) + +// validPacketProperties indicates which properties are valid for which packet types. +var validPacketProperties = map[byte]map[byte]byte{ + PropPayloadFormat: {Publish: 1, WillProperties: 1}, + PropMessageExpiryInterval: {Publish: 1, WillProperties: 1}, + PropContentType: {Publish: 1, WillProperties: 1}, + PropResponseTopic: {Publish: 1, WillProperties: 1}, + PropCorrelationData: {Publish: 1, WillProperties: 1}, + PropSubscriptionIdentifier: {Publish: 1, Subscribe: 1}, + PropSessionExpiryInterval: {Connect: 1, Connack: 1, Disconnect: 1}, + PropAssignedClientID: {Connack: 1}, + PropServerKeepAlive: {Connack: 1}, + PropAuthenticationMethod: {Connect: 1, Connack: 1, Auth: 1}, + PropAuthenticationData: {Connect: 1, Connack: 1, Auth: 1}, + PropRequestProblemInfo: {Connect: 1}, + PropWillDelayInterval: {WillProperties: 1}, + PropRequestResponseInfo: {Connect: 1}, + PropResponseInfo: {Connack: 1}, + PropServerReference: {Connack: 1, Disconnect: 1}, + PropReasonString: {Connack: 1, Puback: 1, Pubrec: 1, Pubrel: 1, Pubcomp: 1, Suback: 1, Unsuback: 1, Disconnect: 1, Auth: 1}, + PropReceiveMaximum: {Connect: 1, Connack: 1}, + PropTopicAliasMaximum: {Connect: 1, Connack: 1}, + PropTopicAlias: {Publish: 1}, + PropMaximumQos: {Connack: 1}, + PropRetainAvailable: {Connack: 1}, + PropUser: {Connect: 1, Connack: 1, Publish: 1, Puback: 1, Pubrec: 1, Pubrel: 1, Pubcomp: 1, Subscribe: 1, Suback: 1, Unsubscribe: 1, Unsuback: 1, Disconnect: 1, Auth: 1, WillProperties: 1}, + PropMaximumPacketSize: {Connect: 1, Connack: 1}, + PropWildcardSubAvailable: {Connack: 1}, + PropSubIDAvailable: {Connack: 1}, + PropSharedSubAvailable: {Connack: 1}, +} + +// UserProperty is an arbitrary key-value pair for a packet user properties array. +type UserProperty struct { // [MQTT-1.5.7-1] + Key string `json:"k"` + Val string `json:"v"` +} + +// Properties contains all of the mqtt v5 properties available for a packet. +// Some properties have valid values of 0 or not-present. In this case, we opt for +// property flags to indicate the usage of property. +// Refer to mqtt v5 2.2.2.2 Property spec for more information. +type Properties struct { + CorrelationData []byte `json:"cd"` + SubscriptionIdentifier []int `json:"si"` + AuthenticationData []byte `json:"ad"` + User []UserProperty `json:"user"` + ContentType string `json:"ct"` + ResponseTopic string `json:"rt"` + AssignedClientID string `json:"aci"` + AuthenticationMethod string `json:"am"` + ResponseInfo string `json:"ri"` + ServerReference string `json:"sr"` + ReasonString string `json:"rs"` + MessageExpiryInterval uint32 `json:"me"` + SessionExpiryInterval uint32 `json:"sei"` + WillDelayInterval uint32 `json:"wdi"` + MaximumPacketSize uint32 `json:"mps"` + ServerKeepAlive uint16 `json:"ska"` + ReceiveMaximum uint16 `json:"rm"` + TopicAliasMaximum uint16 `json:"tam"` + TopicAlias uint16 `json:"ta"` + PayloadFormat byte `json:"pf"` + PayloadFormatFlag bool `json:"fpf"` + SessionExpiryIntervalFlag bool `json:"fsei"` + ServerKeepAliveFlag bool `json:"fska"` + RequestProblemInfo byte `json:"rpi"` + RequestProblemInfoFlag bool `json:"frpi"` + RequestResponseInfo byte `json:"rri"` + TopicAliasFlag bool `json:"fta"` + MaximumQos byte `json:"mqos"` + MaximumQosFlag bool `json:"fmqos"` + RetainAvailable byte `json:"ra"` + RetainAvailableFlag bool `json:"fra"` + WildcardSubAvailable byte `json:"wsa"` + WildcardSubAvailableFlag bool `json:"fwsa"` + SubIDAvailable byte `json:"sida"` + SubIDAvailableFlag bool `json:"fsida"` + SharedSubAvailable byte `json:"ssa"` + SharedSubAvailableFlag bool `json:"fssa"` +} + +// Copy creates a new Properties struct with copies of the values. +func (p *Properties) Copy(allowTransfer bool) Properties { + pr := Properties{ + PayloadFormat: p.PayloadFormat, // [MQTT-3.3.2-4] + PayloadFormatFlag: p.PayloadFormatFlag, + MessageExpiryInterval: p.MessageExpiryInterval, + ContentType: p.ContentType, // [MQTT-3.3.2-20] + ResponseTopic: p.ResponseTopic, // [MQTT-3.3.2-15] + SessionExpiryInterval: p.SessionExpiryInterval, + SessionExpiryIntervalFlag: p.SessionExpiryIntervalFlag, + AssignedClientID: p.AssignedClientID, + ServerKeepAlive: p.ServerKeepAlive, + ServerKeepAliveFlag: p.ServerKeepAliveFlag, + AuthenticationMethod: p.AuthenticationMethod, + RequestProblemInfo: p.RequestProblemInfo, + RequestProblemInfoFlag: p.RequestProblemInfoFlag, + WillDelayInterval: p.WillDelayInterval, + RequestResponseInfo: p.RequestResponseInfo, + ResponseInfo: p.ResponseInfo, + ServerReference: p.ServerReference, + ReasonString: p.ReasonString, + ReceiveMaximum: p.ReceiveMaximum, + TopicAliasMaximum: p.TopicAliasMaximum, + TopicAlias: 0, // NB; do not copy topic alias [MQTT-3.3.2-7] + we do not send to clients (currently) [MQTT-3.1.2-26] [MQTT-3.1.2-27] + MaximumQos: p.MaximumQos, + MaximumQosFlag: p.MaximumQosFlag, + RetainAvailable: p.RetainAvailable, + RetainAvailableFlag: p.RetainAvailableFlag, + MaximumPacketSize: p.MaximumPacketSize, + WildcardSubAvailable: p.WildcardSubAvailable, + WildcardSubAvailableFlag: p.WildcardSubAvailableFlag, + SubIDAvailable: p.SubIDAvailable, + SubIDAvailableFlag: p.SubIDAvailableFlag, + SharedSubAvailable: p.SharedSubAvailable, + SharedSubAvailableFlag: p.SharedSubAvailableFlag, + } + + if allowTransfer { + pr.TopicAlias = p.TopicAlias + pr.TopicAliasFlag = p.TopicAliasFlag + } + + if len(p.CorrelationData) > 0 { + pr.CorrelationData = append([]byte{}, p.CorrelationData...) // [MQTT-3.3.2-16] + } + + if len(p.SubscriptionIdentifier) > 0 { + pr.SubscriptionIdentifier = append([]int{}, p.SubscriptionIdentifier...) + } + + if len(p.AuthenticationData) > 0 { + pr.AuthenticationData = append([]byte{}, p.AuthenticationData...) + } + + if len(p.User) > 0 { + pr.User = []UserProperty{} + for _, v := range p.User { + pr.User = append(pr.User, UserProperty{ // [MQTT-3.3.2-17] + Key: v.Key, + Val: v.Val, + }) + } + } + + return pr +} + +// canEncode returns true if the property type is valid for the packet type. +func (p *Properties) canEncode(pkt byte, k byte) bool { + return validPacketProperties[k][pkt] == 1 +} + +// Encode encodes properties into a bytes buffer. +func (p *Properties) Encode(pkt byte, mods Mods, b *bytes.Buffer, n int) { + if p == nil { + return + } + + var buf bytes.Buffer + if p.canEncode(pkt, PropPayloadFormat) && p.PayloadFormatFlag { + buf.WriteByte(PropPayloadFormat) + buf.WriteByte(p.PayloadFormat) + } + + if p.canEncode(pkt, PropMessageExpiryInterval) && p.MessageExpiryInterval > 0 { + buf.WriteByte(PropMessageExpiryInterval) + buf.Write(encodeUint32(p.MessageExpiryInterval)) + } + + if p.canEncode(pkt, PropContentType) && p.ContentType != "" { + buf.WriteByte(PropContentType) + buf.Write(encodeString(p.ContentType)) // [MQTT-3.3.2-19] + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropResponseTopic) && // [MQTT-3.3.2-14] + p.ResponseTopic != "" && !strings.ContainsAny(p.ResponseTopic, "+#") { // [MQTT-3.1.2-28] + buf.WriteByte(PropResponseTopic) + buf.Write(encodeString(p.ResponseTopic)) // [MQTT-3.3.2-13] + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropCorrelationData) && len(p.CorrelationData) > 0 { // [MQTT-3.1.2-28] + buf.WriteByte(PropCorrelationData) + buf.Write(encodeBytes(p.CorrelationData)) + } + + if p.canEncode(pkt, PropSubscriptionIdentifier) && len(p.SubscriptionIdentifier) > 0 { + for _, v := range p.SubscriptionIdentifier { + if v > 0 { + buf.WriteByte(PropSubscriptionIdentifier) + encodeLength(&buf, int64(v)) + } + } + } + + if p.canEncode(pkt, PropSessionExpiryInterval) && p.SessionExpiryIntervalFlag { // [MQTT-3.14.2-2] + buf.WriteByte(PropSessionExpiryInterval) + buf.Write(encodeUint32(p.SessionExpiryInterval)) + } + + if p.canEncode(pkt, PropAssignedClientID) && p.AssignedClientID != "" { + buf.WriteByte(PropAssignedClientID) + buf.Write(encodeString(p.AssignedClientID)) + } + + if p.canEncode(pkt, PropServerKeepAlive) && p.ServerKeepAliveFlag { + buf.WriteByte(PropServerKeepAlive) + buf.Write(encodeUint16(p.ServerKeepAlive)) + } + + if p.canEncode(pkt, PropAuthenticationMethod) && p.AuthenticationMethod != "" { + buf.WriteByte(PropAuthenticationMethod) + buf.Write(encodeString(p.AuthenticationMethod)) + } + + if p.canEncode(pkt, PropAuthenticationData) && len(p.AuthenticationData) > 0 { + buf.WriteByte(PropAuthenticationData) + buf.Write(encodeBytes(p.AuthenticationData)) + } + + if p.canEncode(pkt, PropRequestProblemInfo) && p.RequestProblemInfoFlag { + buf.WriteByte(PropRequestProblemInfo) + buf.WriteByte(p.RequestProblemInfo) + } + + if p.canEncode(pkt, PropWillDelayInterval) && p.WillDelayInterval > 0 { + buf.WriteByte(PropWillDelayInterval) + buf.Write(encodeUint32(p.WillDelayInterval)) + } + + if p.canEncode(pkt, PropRequestResponseInfo) && p.RequestResponseInfo > 0 { + buf.WriteByte(PropRequestResponseInfo) + buf.WriteByte(p.RequestResponseInfo) + } + + if mods.AllowResponseInfo && p.canEncode(pkt, PropResponseInfo) && len(p.ResponseInfo) > 0 { // [MQTT-3.1.2-28] + buf.WriteByte(PropResponseInfo) + buf.Write(encodeString(p.ResponseInfo)) + } + + if p.canEncode(pkt, PropServerReference) && len(p.ServerReference) > 0 { + buf.WriteByte(PropServerReference) + buf.Write(encodeString(p.ServerReference)) + } + + // [MQTT-3.2.2-19] [MQTT-3.14.2-3] [MQTT-3.4.2-2] [MQTT-3.5.2-2] + // [MQTT-3.6.2-2] [MQTT-3.9.2-1] [MQTT-3.11.2-1] [MQTT-3.15.2-2] + if !mods.DisallowProblemInfo && p.canEncode(pkt, PropReasonString) && p.ReasonString != "" { + b := encodeString(p.ReasonString) + if mods.MaxSize == 0 || uint32(n+len(b)+1) < mods.MaxSize { + buf.WriteByte(PropReasonString) + buf.Write(b) + } + } + + if p.canEncode(pkt, PropReceiveMaximum) && p.ReceiveMaximum > 0 { + buf.WriteByte(PropReceiveMaximum) + buf.Write(encodeUint16(p.ReceiveMaximum)) + } + + if p.canEncode(pkt, PropTopicAliasMaximum) && p.TopicAliasMaximum > 0 { + buf.WriteByte(PropTopicAliasMaximum) + buf.Write(encodeUint16(p.TopicAliasMaximum)) + } + + if p.canEncode(pkt, PropTopicAlias) && p.TopicAliasFlag && p.TopicAlias > 0 { // [MQTT-3.3.2-8] + buf.WriteByte(PropTopicAlias) + buf.Write(encodeUint16(p.TopicAlias)) + } + + if p.canEncode(pkt, PropMaximumQos) && p.MaximumQosFlag && p.MaximumQos < 2 { + buf.WriteByte(PropMaximumQos) + buf.WriteByte(p.MaximumQos) + } + + if p.canEncode(pkt, PropRetainAvailable) && p.RetainAvailableFlag { + buf.WriteByte(PropRetainAvailable) + buf.WriteByte(p.RetainAvailable) + } + + if !mods.DisallowProblemInfo && p.canEncode(pkt, PropUser) { + pb := bytes.NewBuffer([]byte{}) + for _, v := range p.User { + pb.WriteByte(PropUser) + pb.Write(encodeString(v.Key)) + pb.Write(encodeString(v.Val)) + } + // [MQTT-3.2.2-20] [MQTT-3.14.2-4] [MQTT-3.4.2-3] [MQTT-3.5.2-3] + // [MQTT-3.6.2-3] [MQTT-3.9.2-2] [MQTT-3.11.2-2] [MQTT-3.15.2-3] + if mods.MaxSize == 0 || uint32(n+pb.Len()+1) < mods.MaxSize { + buf.Write(pb.Bytes()) + } + } + + if p.canEncode(pkt, PropMaximumPacketSize) && p.MaximumPacketSize > 0 { + buf.WriteByte(PropMaximumPacketSize) + buf.Write(encodeUint32(p.MaximumPacketSize)) + } + + if p.canEncode(pkt, PropWildcardSubAvailable) && p.WildcardSubAvailableFlag { + buf.WriteByte(PropWildcardSubAvailable) + buf.WriteByte(p.WildcardSubAvailable) + } + + if p.canEncode(pkt, PropSubIDAvailable) && p.SubIDAvailableFlag { + buf.WriteByte(PropSubIDAvailable) + buf.WriteByte(p.SubIDAvailable) + } + + if p.canEncode(pkt, PropSharedSubAvailable) && p.SharedSubAvailableFlag { + buf.WriteByte(PropSharedSubAvailable) + buf.WriteByte(p.SharedSubAvailable) + } + + encodeLength(b, int64(buf.Len())) + buf.WriteTo(b) // [MQTT-3.1.3-10] +} + +// Decode decodes property bytes into a properties struct. +func (p *Properties) Decode(pkt byte, b *bytes.Buffer) (n int, err error) { + if p == nil { + return 0, nil + } + + var bu int + n, bu, err = DecodeLength(b) + if err != nil { + return n + bu, err + } + + if n == 0 { + return n + bu, nil + } + + bt := b.Bytes() + var k byte + for offset := 0; offset < n; { + k, offset, err = decodeByte(bt, offset) + if err != nil { + return n + bu, err + } + + if _, ok := validPacketProperties[k][pkt]; !ok { + return n + bu, fmt.Errorf("property type %v not valid for packet type %v: %w", k, pkt, ErrProtocolViolationUnsupportedProperty) + } + + switch k { + case PropPayloadFormat: + p.PayloadFormat, offset, err = decodeByte(bt, offset) + p.PayloadFormatFlag = true + case PropMessageExpiryInterval: + p.MessageExpiryInterval, offset, err = decodeUint32(bt, offset) + case PropContentType: + p.ContentType, offset, err = decodeString(bt, offset) + case PropResponseTopic: + p.ResponseTopic, offset, err = decodeString(bt, offset) + case PropCorrelationData: + p.CorrelationData, offset, err = decodeBytes(bt, offset) + case PropSubscriptionIdentifier: + if p.SubscriptionIdentifier == nil { + p.SubscriptionIdentifier = []int{} + } + + n, bu, err := DecodeLength(bytes.NewBuffer(bt[offset:])) + if err != nil { + return n + bu, err + } + p.SubscriptionIdentifier = append(p.SubscriptionIdentifier, n) + offset += bu + case PropSessionExpiryInterval: + p.SessionExpiryInterval, offset, err = decodeUint32(bt, offset) + p.SessionExpiryIntervalFlag = true + case PropAssignedClientID: + p.AssignedClientID, offset, err = decodeString(bt, offset) + case PropServerKeepAlive: + p.ServerKeepAlive, offset, err = decodeUint16(bt, offset) + p.ServerKeepAliveFlag = true + case PropAuthenticationMethod: + p.AuthenticationMethod, offset, err = decodeString(bt, offset) + case PropAuthenticationData: + p.AuthenticationData, offset, err = decodeBytes(bt, offset) + case PropRequestProblemInfo: + p.RequestProblemInfo, offset, err = decodeByte(bt, offset) + p.RequestProblemInfoFlag = true + case PropWillDelayInterval: + p.WillDelayInterval, offset, err = decodeUint32(bt, offset) + case PropRequestResponseInfo: + p.RequestResponseInfo, offset, err = decodeByte(bt, offset) + case PropResponseInfo: + p.ResponseInfo, offset, err = decodeString(bt, offset) + case PropServerReference: + p.ServerReference, offset, err = decodeString(bt, offset) + case PropReasonString: + p.ReasonString, offset, err = decodeString(bt, offset) + case PropReceiveMaximum: + p.ReceiveMaximum, offset, err = decodeUint16(bt, offset) + case PropTopicAliasMaximum: + p.TopicAliasMaximum, offset, err = decodeUint16(bt, offset) + case PropTopicAlias: + p.TopicAlias, offset, err = decodeUint16(bt, offset) + p.TopicAliasFlag = true + case PropMaximumQos: + p.MaximumQos, offset, err = decodeByte(bt, offset) + p.MaximumQosFlag = true + case PropRetainAvailable: + p.RetainAvailable, offset, err = decodeByte(bt, offset) + p.RetainAvailableFlag = true + case PropUser: + var k, v string + k, offset, err = decodeString(bt, offset) + if err != nil { + return n + bu, err + } + v, offset, err = decodeString(bt, offset) + p.User = append(p.User, UserProperty{Key: k, Val: v}) + case PropMaximumPacketSize: + p.MaximumPacketSize, offset, err = decodeUint32(bt, offset) + case PropWildcardSubAvailable: + p.WildcardSubAvailable, offset, err = decodeByte(bt, offset) + p.WildcardSubAvailableFlag = true + case PropSubIDAvailable: + p.SubIDAvailable, offset, err = decodeByte(bt, offset) + p.SubIDAvailableFlag = true + case PropSharedSubAvailable: + p.SharedSubAvailable, offset, err = decodeByte(bt, offset) + p.SharedSubAvailableFlag = true + } + + if err != nil { + return n + bu, err + } + } + + return n + bu, nil +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go b/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go new file mode 100644 index 00000000..9e44c12e --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/packets/tpackets.go @@ -0,0 +1,3939 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package packets + +// TPacketCase contains data for cross-checking the encoding and decoding +// of packets and expected scenarios. +type TPacketCase struct { + RawBytes []byte // the bytes that make the packet + ActualBytes []byte // the actual byte array that is created in the event of a byte mutation + Group string // a group that should run the test, blank for all + Desc string // a description of the test + FailFirst error // expected fail result to be run immediately after the method is called + Packet *Packet // the packet that is Expected + ActualPacket *Packet // the actual packet after mutations + Expect error // generic Expected fail result to be checked + Isolate bool // isolate can be used to isolate a test + Primary bool // primary is a test that should be run using readPackets + Case byte // the identifying byte of the case +} + +// TPacketCases is a slice of TPacketCase. +type TPacketCases []TPacketCase + +// Get returns a case matching a given T byte. +func (f TPacketCases) Get(b byte) TPacketCase { + for _, v := range f { + if v.Case == b { + return v + } + } + + return TPacketCase{} +} + +const ( + TConnectMqtt31 byte = iota + TConnectMqtt311 + TConnectMqtt5 + TConnectMqtt5LWT + TConnectClean + TConnectCleanLWT + TConnectUserPass + TConnectUserPassLWT + TConnectMalProtocolName + TConnectMalProtocolVersion + TConnectMalFlags + TConnectMalKeepalive + TConnectMalClientID + TConnectMalWillTopic + TConnectMalWillFlag + TConnectMalUsername + TConnectMalPassword + TConnectMalFixedHeader + TConnectMalReservedBit + TConnectMalProperties + TConnectMalWillProperties + TConnectInvalidProtocolName + TConnectInvalidProtocolVersion + TConnectInvalidProtocolVersion2 + TConnectInvalidReservedBit + TConnectInvalidClientIDTooLong + TConnectInvalidPasswordNoUsername + TConnectInvalidFlagNoUsername + TConnectInvalidFlagNoPassword + TConnectInvalidUsernameNoFlag + TConnectInvalidPasswordNoFlag + TConnectInvalidUsernameTooLong + TConnectInvalidPasswordTooLong + TConnectInvalidWillFlagNoPayload + TConnectInvalidWillFlagQosOutOfRange + TConnectInvalidWillSurplusRetain + TConnectZeroByteUsername + TConnectSpecInvalidUTF8D800 + TConnectSpecInvalidUTF8DFFF + TConnectSpecInvalidUTF80000 + TConnectSpecInvalidUTF8NoSkip + TConnackAcceptedNoSession + TConnackAcceptedSessionExists + TConnackAcceptedMqtt5 + TConnackAcceptedAdjustedExpiryInterval + TConnackMinMqtt5 + TConnackMinCleanMqtt5 + TConnackServerKeepalive + TConnackInvalidMinMqtt5 + TConnackBadProtocolVersion + TConnackProtocolViolationNoSession + TConnackBadClientID + TConnackServerUnavailable + TConnackBadUsernamePassword + TConnackBadUsernamePasswordNoSession + TConnackMqtt5BadUsernamePasswordNoSession + TConnackNotAuthorised + TConnackMalSessionPresent + TConnackMalReturnCode + TConnackMalProperties + TConnackDropProperties + TConnackDropPropertiesPartial + TPublishNoPayload + TPublishBasic + TPublishBasicTopicAliasOnly + TPublishBasicMqtt5 + TPublishMqtt5 + TPublishQos1 + TPublishQos1Mqtt5 + TPublishQos1NoPayload + TPublishQos1Dup + TPublishQos2 + TPublishQos2Mqtt5 + TPublishQos2Upgraded + TPublishSubscriberIdentifier + TPublishRetain + TPublishRetainMqtt5 + TPublishDup + TPublishMalTopicName + TPublishMalPacketID + TPublishMalProperties + TPublishCopyBasic + TPublishSpecQos0NoPacketID + TPublishSpecQosMustPacketID + TPublishDropOversize + TPublishInvalidQos0NoPacketID + TPublishInvalidQosMustPacketID + TPublishInvalidSurplusSubID + TPublishInvalidSurplusWildcard + TPublishInvalidSurplusWildcard2 + TPublishInvalidNoTopic + TPublishInvalidTopicAlias + TPublishInvalidExcessTopicAlias + TPublishSpecDenySysTopic + TPuback + TPubackMqtt5 + TPubackMalPacketID + TPubackMalProperties + TPubackUnexpectedError + TPubrec + TPubrecMqtt5 + TPubrecMqtt5IDInUse + TPubrecMalPacketID + TPubrecMalProperties + TPubrecMalReasonCode + TPubrecInvalidReason + TPubrel + TPubrelMqtt5 + TPubrelMqtt5AckNoPacket + TPubrelMalPacketID + TPubrelMalProperties + TPubrelInvalidReason + TPubcomp + TPubcompMqtt5 + TPubcompMqtt5AckNoPacket + TPubcompMalPacketID + TPubcompMalProperties + TPubcompInvalidReason + TSubscribe + TSubscribeMany + TSubscribeMqtt5 + TSubscribeRetainHandling1 + TSubscribeRetainHandling2 + TSubscribeRetainAsPublished + TSubscribeMalPacketID + TSubscribeMalTopic + TSubscribeMalQos + TSubscribeMalQosRange + TSubscribeMalProperties + TSubscribeInvalidQosMustPacketID + TSubscribeSpecQosMustPacketID + TSubscribeInvalidNoFilters + TSubscribeInvalidSharedNoLocal + TSubscribeInvalidFilter + TSubscribeInvalidIdentifierOversize + TSuback + TSubackMany + TSubackDeny + TSubackUnspecifiedError + TSubackUnspecifiedErrorMqtt5 + TSubackMqtt5 + TSubackPacketIDInUse + TSubackInvalidFilter + TSubackInvalidSharedNoLocal + TSubackMalPacketID + TSubackMalProperties + TUnsubscribe + TUnsubscribeMany + TUnsubscribeMqtt5 + TUnsubscribeDropProperties + TUnsubscribeMalPacketID + TUnsubscribeMalTopicName + TUnsubscribeMalProperties + TUnsubscribeInvalidQosMustPacketID + TUnsubscribeSpecQosMustPacketID + TUnsubscribeInvalidNoFilters + TUnsuback + TUnsubackMany + TUnsubackMqtt5 + TUnsubackPacketIDInUse + TUnsubackMalPacketID + TUnsubackMalProperties + TPingreq + TPingresp + TDisconnect + TDisconnectTakeover + TDisconnectMqtt5 + TDisconnectNormalMqtt5 + TDisconnectSecondConnect + TDisconnectReceiveMaximum + TDisconnectDropProperties + TDisconnectShuttingDown + TDisconnectMalProperties + TDisconnectMalReasonCode + TDisconnectZeroNonZeroExpiry + TAuth + TAuthMalReasonCode + TAuthMalProperties + TAuthInvalidReason + TAuthInvalidReason2 +) + +// TPacketData contains individual encoding and decoding scenarios for each packet type. +var TPacketData = map[byte]TPacketCases{ + Connect: { + { + Case: TConnectMqtt31, + Desc: "mqtt v3.1", + Primary: true, + RawBytes: []byte{ + Connect << 4, 17, // Fixed header + 0, 6, // Protocol Name - MSB+LSB + 'M', 'Q', 'I', 's', 'd', 'p', // Protocol Name + 3, // Protocol Version + 0, // Packet Flags + 0, 30, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 17, + }, + ProtocolVersion: 3, + Connect: ConnectParams{ + ProtocolName: []byte("MQIsdp"), + Clean: false, + Keepalive: 30, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt311, + Desc: "mqtt v3.1.1", + Primary: true, + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Packet Flags + 0, 60, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 15, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 60, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Connect << 4, 87, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 0, // Packet Flags + 0, 30, // Keepalive + + // Properties + 71, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 23, 1, // Request Problem Info (23) + 25, 1, // Request Response Info (25) + 33, 1, 244, // Receive Maximum (33) + 34, 3, 231, // Topic Alias Maximum (34) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 38, // User Properties (38) + 0, 4, 'k', 'e', 'y', '2', + 0, 6, 'v', 'a', 'l', 'u', 'e', '2', + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 87, + }, + ProtocolVersion: 5, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 30, + ClientIdentifier: "zen", + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + RequestProblemInfo: byte(1), + RequestProblemInfoFlag: true, + RequestResponseInfo: byte(1), + ReceiveMaximum: uint16(500), + TopicAliasMaximum: uint16(999), + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + { + Key: "key2", + Val: "value2", + }, + }, + MaximumPacketSize: uint32(32000), + }, + }, + }, + { + Case: TConnectClean, + Desc: "mqtt 3.1.1, clean session", + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 2, // Packet Flags + 0, 45, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 15, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 45, + ClientIdentifier: "zen", + }, + }, + }, + { + Case: TConnectMqtt5LWT, + Desc: "mqtt 5 clean session, lwt", + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + + // Properties + 10, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 5, // will properties length + 24, 0, 0, 2, 88, // will delay interval (24) + + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 8, // Will Message MSB+LSB + 'n', 'o', 't', 'a', 'g', 'a', 'i', 'n', + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 42, + }, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 30, + ClientIdentifier: "zen", + WillFlag: true, + WillTopic: "lwt", + WillPayload: []byte("notagain"), + WillQos: 1, + WillProperties: Properties{ + WillDelayInterval: uint32(600), + }, + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + MaximumPacketSize: uint32(32000), + }, + }, + }, + { + Case: TConnectUserPass, + Desc: "mqtt 3.1.1, username, password", + RawBytes: []byte{ + Connect << 4, 28, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0 | 1<<6 | 1<<7, // Packet Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', '/', ';', + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 28, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: false, + Keepalive: 20, + ClientIdentifier: "zen", + UsernameFlag: true, + PasswordFlag: true, + Username: []byte("mochi"), + Password: []byte(",./;"), + }, + }, + }, + { + Case: TConnectUserPassLWT, + Desc: "mqtt 3.1.1, username, password, lwt", + Primary: true, + RawBytes: []byte{ + Connect << 4, 44, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Packet Flags + 0, 120, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', '/', ';', + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 44, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 120, + ClientIdentifier: "zen", + UsernameFlag: true, + PasswordFlag: true, + Username: []byte("mochi"), + Password: []byte(",./;"), + WillFlag: true, + WillTopic: "lwt", + WillPayload: []byte("not again"), + WillQos: 1, + }, + }, + }, + { + Case: TConnectZeroByteUsername, + Desc: "username flag but 0 byte username", + Group: "decode", + RawBytes: []byte{ + Connect << 4, 23, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 130, // Packet Flags + 0, 30, // Keepalive + 5, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 0, // Username MSB+LSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 23, + }, + ProtocolVersion: 5, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Clean: true, + Keepalive: 30, + ClientIdentifier: "zen", + Username: []byte{}, + UsernameFlag: true, + }, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + }, + }, + }, + + // Fail States + { + Case: TConnectMalProtocolName, + Desc: "malformed protocol name", + Group: "decode", + FailFirst: ErrMalformedProtocolName, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 7, // Protocol Name - MSB+LSB + 'M', 'Q', 'I', 's', 'd', // Protocol Name + }, + }, + { + Case: TConnectMalProtocolVersion, + Desc: "malformed protocol version", + Group: "decode", + FailFirst: ErrMalformedProtocolVersion, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + }, + }, + { + Case: TConnectMalFlags, + Desc: "malformed flags", + Group: "decode", + FailFirst: ErrMalformedFlags, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + + }, + }, + { + Case: TConnectMalKeepalive, + Desc: "malformed keepalive", + Group: "decode", + FailFirst: ErrMalformedKeepalive, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + }, + }, + { + Case: TConnectMalClientID, + Desc: "malformed client id", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', // Client ID "zen" + }, + }, + { + Case: TConnectMalWillTopic, + Desc: "malformed will topic", + Group: "decode", + FailFirst: ErrMalformedWillTopic, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 14, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 6, // Will Topic - MSB+LSB + 'l', + }, + }, + { + Case: TConnectMalWillFlag, + Desc: "malformed will flag", + Group: "decode", + FailFirst: ErrMalformedWillPayload, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 14, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', + }, + }, + { + Case: TConnectMalUsername, + Desc: "malformed username", + Group: "decode", + FailFirst: ErrMalformedUsername, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', + }, + }, + + { + Case: TConnectInvalidFlagNoUsername, + Desc: "username flag with no username bytes", + Group: "decode", + FailFirst: ErrProtocolViolationFlagNoUsername, + RawBytes: []byte{ + Connect << 4, 17, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 130, // Flags + 0, 20, // Keepalive + 0, + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + }, + }, + { + Case: TConnectMalPassword, + Desc: "malformed password", + Group: "decode", + FailFirst: ErrMalformedPassword, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 206, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 0, 3, // Will Topic - MSB+LSB + 'l', 'w', 't', + 0, 9, // Will Message MSB+LSB + 'n', 'o', 't', ' ', 'a', 'g', 'a', 'i', 'n', + 0, 5, // Username MSB+LSB + 'm', 'o', 'c', 'h', 'i', + 0, 4, // Password MSB+LSB + ',', '.', + }, + }, + { + Case: TConnectMalFixedHeader, + Desc: "malformed fixedheader oversize", + Group: "decode", + FailFirst: ErrMalformedProtocolName, // packet test doesn't test fixedheader oversize + RawBytes: []byte{ + Connect << 4, 255, 255, 255, 255, 255, // Fixed header + }, + }, + { + Case: TConnectMalReservedBit, + Desc: "reserved bit not 0", + Group: "nodecode", + FailFirst: ErrProtocolViolation, + RawBytes: []byte{ + Connect << 4, 15, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 1, // Packet Flags + 0, 45, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', + }, + }, + { + Case: TConnectMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + 10, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TConnectMalWillProperties, + Desc: "malformed will properties", + Group: "decode", + FailFirst: ErrMalformedWillProperties, + RawBytes: []byte{ + Connect << 4, 47, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 5, // Protocol Version + 14, // Packet Flags + 0, 30, // Keepalive + 10, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 0, 3, // Client ID - MSB+LSB + 'z', 'e', 'n', // Client ID "zen" + 5, // will properties length + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Validation Tests + { + Case: TConnectInvalidProtocolName, + Desc: "invalid protocol name", + Group: "validate", + Expect: ErrProtocolViolationProtocolName, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + Connect: ConnectParams{ + ProtocolName: []byte("stuff"), + }, + }, + }, + { + Case: TConnectInvalidProtocolVersion, + Desc: "invalid protocol version", + Group: "validate", + Expect: ErrProtocolViolationProtocolVersion, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 2, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + }, + }, + }, + { + Case: TConnectInvalidProtocolVersion2, + Desc: "invalid protocol version", + Group: "validate", + Expect: ErrProtocolViolationProtocolVersion, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 2, + Connect: ConnectParams{ + ProtocolName: []byte("MQIsdp"), + }, + }, + }, + { + Case: TConnectInvalidReservedBit, + Desc: "reserved bit not 0", + Group: "validate", + Expect: ErrProtocolViolationReservedBit, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + }, + ReservedBit: 1, + }, + }, + { + Case: TConnectInvalidClientIDTooLong, + Desc: "client id too long", + Group: "validate", + Expect: ErrClientIdentifierNotValid, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + ClientIdentifier: func() string { + return string(make([]byte, 65536)) + }(), + }, + }, + }, + { + Case: TConnectInvalidUsernameNoFlag, + Desc: "has username but no flag", + Group: "validate", + Expect: ErrProtocolViolationUsernameNoFlag, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Username: []byte("username"), + }, + }, + }, + { + Case: TConnectInvalidFlagNoPassword, + Desc: "has password flag but no password", + Group: "validate", + Expect: ErrProtocolViolationFlagNoPassword, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + PasswordFlag: true, + }, + }, + }, + { + Case: TConnectInvalidPasswordNoFlag, + Desc: "has password flag but no password", + Group: "validate", + Expect: ErrProtocolViolationPasswordNoFlag, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Password: []byte("password"), + }, + }, + }, + { + Case: TConnectInvalidUsernameTooLong, + Desc: "username too long", + Group: "validate", + Expect: ErrProtocolViolationUsernameTooLong, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + UsernameFlag: true, + Username: func() []byte { + return make([]byte, 65536) + }(), + }, + }, + }, + { + Case: TConnectInvalidPasswordTooLong, + Desc: "password too long", + Group: "validate", + Expect: ErrProtocolViolationPasswordTooLong, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + UsernameFlag: true, + Username: []byte{}, + PasswordFlag: true, + Password: func() []byte { + return make([]byte, 65536) + }(), + }, + }, + }, + { + Case: TConnectInvalidWillFlagNoPayload, + Desc: "will flag no payload", + Group: "validate", + Expect: ErrProtocolViolationWillFlagNoPayload, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillFlag: true, + }, + }, + }, + { + Case: TConnectInvalidWillFlagQosOutOfRange, + Desc: "will flag no payload", + Group: "validate", + Expect: ErrProtocolViolationQosOutOfRange, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillFlag: true, + WillTopic: "a/b/c", + WillPayload: []byte{'b'}, + WillQos: 4, + }, + }, + }, + { + Case: TConnectInvalidWillSurplusRetain, + Desc: "no will flag surplus retain", + Group: "validate", + Expect: ErrProtocolViolationWillFlagSurplusRetain, + Packet: &Packet{ + FixedHeader: FixedHeader{Type: Connect}, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + WillRetain: true, + }, + }, + }, + + // Spec Tests + { + Case: TConnectSpecInvalidUTF8D800, + Desc: "invalid utf8 string (a) - code point U+D800", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 4, // Client ID - MSB+LSB + 'e', 0xed, 0xa0, 0x80, // Client id bearing U+D800 + }, + }, + { + Case: TConnectSpecInvalidUTF8DFFF, + Desc: "invalid utf8 string (b) - code point U+DFFF", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 4, // Client ID - MSB+LSB + 'e', 0xed, 0xa3, 0xbf, // Client id bearing U+D8FF + }, + }, + + { + Case: TConnectSpecInvalidUTF80000, + Desc: "invalid utf8 string (c) - code point U+0000", + Group: "decode", + FailFirst: ErrClientIdentifierNotValid, + RawBytes: []byte{ + Connect << 4, 0, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 3, // Client ID - MSB+LSB + 'e', 0xc0, 0x80, // Client id bearing U+0000 + }, + }, + + { + Case: TConnectSpecInvalidUTF8NoSkip, + Desc: "utf8 string must not skip or strip code point U+FEFF", + //Group: "decode", + //FailFirst: ErrMalformedClientID, + RawBytes: []byte{ + Connect << 4, 18, // Fixed header + 0, 4, // Protocol Name - MSB+LSB + 'M', 'Q', 'T', 'T', // Protocol Name + 4, // Protocol Version + 0, // Flags + 0, 20, // Keepalive + 0, 6, // Client ID - MSB+LSB + 'e', 'b', 0xEF, 0xBB, 0xBF, 'd', // Client id bearing U+FEFF + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connect, + Remaining: 16, + }, + ProtocolVersion: 4, + Connect: ConnectParams{ + ProtocolName: []byte("MQTT"), + Keepalive: 20, + ClientIdentifier: string([]byte{'e', 'b', 0xEF, 0xBB, 0xBF, 'd'}), + }, + }, + }, + }, + Connack: { + { + Case: TConnackAcceptedNoSession, + Desc: "accepted, no session", + Primary: true, + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // No existing session + CodeSuccess.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackAcceptedSessionExists, + Desc: "accepted, session exists", + Primary: true, + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + CodeSuccess.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackAcceptedAdjustedExpiryInterval, + Desc: "accepted, no session, adjusted expiry interval mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 8, // fixed header + 0, // Session present + CodeSuccess.Code, + 5, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 8, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + }, + }, + }, + { + Case: TConnackAcceptedMqtt5, + Desc: "accepted no session mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 124, // fixed header + 0, // No existing session + CodeSuccess.Code, + // Properties + 121, // length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 18, 0, 8, 'm', 'o', 'c', 'h', 'i', '-', 'v', '5', // Assigned Client ID (18) + 19, 0, 20, // Server Keep Alive (19) + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 26, 0, 8, 'r', 'e', 's', 'p', 'o', 'n', 's', 'e', // Response Info (26) + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 33, 1, 244, // Receive Maximum (33) + 34, 3, 231, // Topic Alias Maximum (34) + 36, 1, // Maximum Qos (36) + 37, 1, // Retain Available (37) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 38, // User Properties (38) + 0, 4, 'k', 'e', 'y', '2', + 0, 6, 'v', 'a', 'l', 'u', 'e', '2', + 39, 0, 0, 125, 0, // Maximum Packet Size (39) + 40, 1, // Wildcard Subscriptions Available (40) + 41, 1, // Subscription ID Available (41) + 42, 1, // Shared Subscriptions Available (42) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 124, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + SessionExpiryInterval: uint32(120), + SessionExpiryIntervalFlag: true, + AssignedClientID: "mochi-v5", + ServerKeepAlive: uint16(20), + ServerKeepAliveFlag: true, + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + ResponseInfo: "response", + ServerReference: "mochi-2", + ReasonString: "reason", + ReceiveMaximum: uint16(500), + TopicAliasMaximum: uint16(999), + MaximumQos: byte(1), + MaximumQosFlag: true, + RetainAvailable: byte(1), + RetainAvailableFlag: true, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + { + Key: "key2", + Val: "value2", + }, + }, + MaximumPacketSize: uint32(32000), + WildcardSubAvailable: byte(1), + WildcardSubAvailableFlag: true, + SubIDAvailable: byte(1), + SubIDAvailableFlag: true, + SharedSubAvailable: byte(1), + SharedSubAvailableFlag: true, + }, + }, + }, + { + Case: TConnackMinMqtt5, + Desc: "accepted min properties mqtt5", + Primary: true, + RawBytes: []byte{ + Connack << 4, 13, // fixed header + 1, // existing session + CodeSuccess.Code, + 10, // Properties length + 18, 0, 5, 'm', 'o', 'c', 'h', 'i', // Assigned Client ID (18) + 36, 1, // Maximum Qos (36) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 13, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + AssignedClientID: "mochi", + MaximumQos: byte(1), + MaximumQosFlag: true, + }, + }, + }, + { + Case: TConnackMinCleanMqtt5, + Desc: "accepted min properties mqtt5b", + Primary: true, + RawBytes: []byte{ + Connack << 4, 3, // fixed header + 0, // existing session + CodeSuccess.Code, + 0, // Properties length + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 16, + }, + SessionPresent: false, + ReasonCode: CodeSuccess.Code, + }, + }, + { + Case: TConnackServerKeepalive, + Desc: "server set keepalive", + Primary: true, + RawBytes: []byte{ + Connack << 4, 6, // fixed header + 1, // existing session + CodeSuccess.Code, + 3, // Properties length + 19, 0, 10, // server keepalive + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 6, + }, + SessionPresent: true, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ServerKeepAlive: uint16(10), + ServerKeepAliveFlag: true, + }, + }, + }, + { + Case: TConnackInvalidMinMqtt5, + Desc: "failure min properties mqtt5", + Primary: true, + RawBytes: append([]byte{ + Connack << 4, 23, // fixed header + 0, // No existing session + ErrUnspecifiedError.Code, + // Properties + 20, // length + 31, 0, 17, // Reason String (31) + }, []byte(ErrUnspecifiedError.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 23, + }, + SessionPresent: false, + ReasonCode: ErrUnspecifiedError.Code, + Properties: Properties{ + ReasonString: ErrUnspecifiedError.Reason, + }, + }, + }, + + { + Case: TConnackProtocolViolationNoSession, + Desc: "miscellaneous protocol violation", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // Session present + ErrProtocolViolation.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: ErrProtocolViolation.Code, + }, + }, + { + Case: TConnackBadProtocolVersion, + Desc: "bad protocol version", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrProtocolViolationProtocolVersion.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrProtocolViolationProtocolVersion.Code, + }, + }, + { + Case: TConnackBadClientID, + Desc: "bad client id", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrClientIdentifierNotValid.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrClientIdentifierNotValid.Code, + }, + }, + { + Case: TConnackServerUnavailable, + Desc: "server unavailable", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrServerUnavailable.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrServerUnavailable.Code, + }, + }, + { + Case: TConnackBadUsernamePassword, + Desc: "bad username or password", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrBadUsernameOrPassword.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrBadUsernameOrPassword.Code, + }, + }, + { + Case: TConnackBadUsernamePasswordNoSession, + Desc: "bad username or password no session", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 0, // No session present + Err3NotAuthorized.Code, // use v3 remapping + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: Err3NotAuthorized.Code, + }, + }, + { + Case: TConnackMqtt5BadUsernamePasswordNoSession, + Desc: "mqtt5 bad username or password no session", + RawBytes: []byte{ + Connack << 4, 3, // fixed header + 0, // No session present + ErrBadUsernameOrPassword.Code, + 0, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + ReasonCode: ErrBadUsernameOrPassword.Code, + }, + }, + + { + Case: TConnackNotAuthorised, + Desc: "not authorised", + RawBytes: []byte{ + Connack << 4, 2, // fixed header + 1, // Session present + ErrNotAuthorized.Code, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 2, + }, + SessionPresent: true, + ReasonCode: ErrNotAuthorized.Code, + }, + }, + { + Case: TConnackDropProperties, + Desc: "drop oversize properties", + Group: "encode", + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Connack << 4, 13, // fixed header + 0, // No existing session + CodeSuccess.Code, + 10, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 5, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TConnackDropPropertiesPartial, + Desc: "drop oversize properties partial", + Group: "encode", + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Connack << 4, 22, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 18, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Connack, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + // Fail States + { + Case: TConnackMalSessionPresent, + Desc: "malformed session present", + Group: "decode", + FailFirst: ErrMalformedSessionPresent, + RawBytes: []byte{ + Connect << 4, 2, // Fixed header + }, + }, + { + Case: TConnackMalReturnCode, + Desc: "malformed bad return Code", + Group: "decode", + //Primary: true, + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Connect << 4, 2, // Fixed header + 0, + }, + }, + { + Case: TConnackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Connack << 4, 40, // fixed header + 0, // No existing session + CodeSuccess.Code, + 19, // length + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Publish: { + { + Case: TPublishNoPayload, + Desc: "no payload", + Primary: true, + RawBytes: []byte{ + Publish << 4, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 7, + }, + TopicName: "a/b/c", + Payload: []byte{}, + }, + }, + { + Case: TPublishBasic, + Desc: "basic", + Primary: true, + RawBytes: []byte{ + Publish << 4, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 18, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishMqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish << 4, 77, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 58, // length + 1, 1, // Payload Format (1) + 2, 0, 0, 0, 2, // Message Expiry (2) + 3, 0, 10, 't', 'e', 'x', 't', '/', 'p', 'l', 'a', 'i', 'n', // Content Type (3) + 8, 0, 5, 'a', '/', 'b', '/', 'c', // Response Topic (8) + 9, 0, 4, 'd', 'a', 't', 'a', // Correlations Data (9) + 11, 202, 212, 19, // Subscription Identifier (11) + 35, 0, 3, // Topic Alias (35) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 77, + }, + TopicName: "a/b/c", + Properties: Properties{ + PayloadFormat: byte(1), // UTF-8 Format + PayloadFormatFlag: true, + MessageExpiryInterval: uint32(2), + ContentType: "text/plain", + ResponseTopic: "a/b/c", + CorrelationData: []byte("data"), + SubscriptionIdentifier: []int{322122}, + TopicAlias: uint16(3), + TopicAliasFlag: true, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishBasicTopicAliasOnly, + Desc: "mqtt v5 topic alias only", + Primary: true, + RawBytes: []byte{ + Publish << 4, 17, // Fixed header + 0, 0, // Topic Name - LSB+MSB + 3, // length + 35, 0, 1, // Topic Alias (35) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 17, + }, + Properties: Properties{ + TopicAlias: 1, + TopicAliasFlag: true, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishBasicMqtt5, + Desc: "mqtt basic v5", + Primary: true, + RawBytes: []byte{ + Publish << 4, 22, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 3, // length + 35, 0, 1, // Topic Alias (35) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 22, + }, + TopicName: "a/b/c", + Properties: Properties{ + TopicAlias: uint16(1), + TopicAliasFlag: true, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos1, + Desc: "qos:1, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 1<<1, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 20, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 7, + }, + }, + { + Case: TPublishQos1Mqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 1<<1, 37, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + // Properties + 16, // length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 37, + Qos: 1, + }, + PacketID: 7, + TopicName: "a/b/c", + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos1Dup, + Desc: "qos:1, dup:true, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2 | 8, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 20, + Dup: true, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 7, + }, + }, + { + Case: TPublishQos1NoPayload, + Desc: "qos:1, packet id, no payload", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2, 9, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'y', '/', 'u', '/', 'i', // Topic Name + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + Remaining: 9, + }, + TopicName: "y/u/i", + PacketID: 7, + Payload: []byte{}, + }, + }, + { + Case: TPublishQos2, + Desc: "qos:2, packet id", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 14, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 2, + Remaining: 14, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 7, + }, + }, + { + Case: TPublishQos2Mqtt5, + Desc: "mqtt v5", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 37, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 7, // Packet ID - LSB+MSB + // Properties + 16, // length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 37, + Qos: 2, + }, + PacketID: 7, + TopicName: "a/b/c", + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishSubscriberIdentifier, + Desc: "subscription identifiers", + Primary: true, + RawBytes: []byte{ + Publish << 4, 23, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 4, // properties length + 11, 2, // Subscription Identifier (11) + 11, 3, // Subscription Identifier (11) + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 23, + }, + TopicName: "a/b/c", + Properties: Properties{ + SubscriptionIdentifier: []int{2, 3}, + }, + Payload: []byte("hello mochi"), + }, + }, + + { + Case: TPublishQos2Upgraded, + Desc: "qos:2, upgraded from publish to qos2 sub", + Primary: true, + RawBytes: []byte{ + Publish<<4 | 2<<1, 20, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 1, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 2, + Remaining: 18, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + PacketID: 1, + }, + }, + { + Case: TPublishRetain, + Desc: "retain", + RawBytes: []byte{ + Publish<<4 | 1<<0, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Retain: true, + }, + TopicName: "a/b/c", + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishRetainMqtt5, + Desc: "retain mqtt5", + RawBytes: []byte{ + Publish<<4 | 1<<0, 19, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, // properties length + 'h', 'e', 'l', 'l', 'o', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Publish, + Retain: true, + Remaining: 19, + }, + TopicName: "a/b/c", + Properties: Properties{}, + Payload: []byte("hello mochi"), + }, + }, + { + Case: TPublishDup, + Desc: "dup", + RawBytes: []byte{ + Publish<<4 | 8, 10, // Fixed header + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Dup: true, + }, + TopicName: "a/b", + Payload: []byte("hello"), + }, + }, + + // Fail States + { + Case: TPublishMalTopicName, + Desc: "malformed topic name", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Publish << 4, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', + 0, 11, // Packet ID - LSB+MSB + }, + }, + { + Case: TPublishMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Publish<<4 | 2, 7, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPublishMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Publish << 4, 35, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Copy tests + { + Case: TPublishCopyBasic, + Desc: "basic copyable", + Group: "copy", + RawBytes: []byte{ + Publish << 4, 18, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'z', '/', 'e', '/', 'n', // Topic Name + 'm', 'o', 'c', 'h', 'i', ' ', 'm', 'o', 'c', 'h', 'i', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Dup: true, + Retain: true, + Qos: 1, + }, + TopicName: "z/e/n", + Payload: []byte("mochi mochi"), + }, + }, + + // Spec tests + { + Case: TPublishSpecQos0NoPacketID, + Desc: "packet id must be 0 if qos is 0 (a)", + Group: "encode", + // this version tests for correct byte array mutuation. + // this does not check if -incoming- Packets are parsed as correct, + // it is impossible for the parser to determine if the payload start is incorrect. + RawBytes: []byte{ + Publish << 4, 12, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 3, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + ActualBytes: []byte{ + Publish << 4, 12, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + // Packet ID is removed. + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 12, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + }, + }, + { + Case: TPublishSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Publish<<4 | 2, 14, // Fixed header + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, 0, // Packet ID - LSB+MSB + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 0, + }, + }, + { + Case: TPublishDropOversize, + Desc: "drop oversized publish packet", + Group: "encode", + FailFirst: ErrPacketTooLarge, + RawBytes: []byte{ + Publish << 4, 10, // Fixed header + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 'h', 'e', 'l', 'l', 'o', // Payload + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 2, + }, + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/b", + Payload: []byte("hello"), + }, + }, + + // Validation Tests + { + Case: TPublishInvalidQos0NoPacketID, + Desc: "packet id must be 0 if qos is 0 (b)", + Group: "validate", + Expect: ErrProtocolViolationSurplusPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Remaining: 12, + Qos: 0, + }, + TopicName: "a/b/c", + Payload: []byte("hello"), + PacketID: 3, + }, + }, + { + Case: TPublishInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + Qos: 1, + }, + PacketID: 0, + }, + }, + { + Case: TPublishInvalidSurplusSubID, + Desc: "surplus subscription identifier", + Group: "validate", + Expect: ErrProtocolViolationSurplusSubID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + SubscriptionIdentifier: []int{1}, + }, + TopicName: "a/b", + }, + }, + { + Case: TPublishInvalidSurplusWildcard, + Desc: "topic contains wildcards", + Group: "validate", + Expect: ErrProtocolViolationSurplusWildcard, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/+", + }, + }, + { + Case: TPublishInvalidSurplusWildcard2, + Desc: "topic contains wildcards 2", + Group: "validate", + Expect: ErrProtocolViolationSurplusWildcard, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "a/#", + }, + }, + { + Case: TPublishInvalidNoTopic, + Desc: "no topic or alias specified", + Group: "validate", + Expect: ErrProtocolViolationNoTopic, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + }, + }, + { + Case: TPublishInvalidExcessTopicAlias, + Desc: "topic alias over maximum", + Group: "validate", + Expect: ErrTopicAliasInvalid, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + TopicAlias: 1025, + }, + TopicName: "a/b", + }, + }, + { + Case: TPublishInvalidTopicAlias, + Desc: "topic alias flag and no alias", + Group: "validate", + Expect: ErrTopicAliasInvalid, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + Properties: Properties{ + TopicAliasFlag: true, + TopicAlias: 0, + }, + TopicName: "a/b/", + }, + }, + { + Case: TPublishSpecDenySysTopic, + Desc: "deny publishing to $SYS topics", + Group: "validate", + Expect: CodeSuccess, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Publish, + }, + TopicName: "$SYS/any", + Payload: []byte("y"), + }, + RawBytes: []byte{ + Publish << 4, 11, // Fixed header + 0, 5, // Topic Name - LSB+MSB + '$', 'S', 'Y', 'S', '/', 'a', 'n', 'y', // Topic Name + 'y', // Payload + }, + }, + }, + + Puback: { + { + Case: TPuback, + Desc: "puback", + Primary: true, + RawBytes: []byte{ + Puback << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Puback << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeGrantedQos0.Code, // Reason Code + 16, // Properties Length + // 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeGrantedQos0.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubackUnexpectedError, + Desc: "unexpected error", + Group: "decode", + RawBytes: []byte{ + Puback << 4, 29, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPayloadFormatInvalid.Code, // Reason Code + 25, // Properties Length + 31, 0, 22, 'p', 'a', 'y', 'l', 'o', 'a', 'd', + ' ', 'f', 'o', 'r', 'm', 'a', 't', + ' ', 'i', 'n', 'v', 'a', 'l', 'i', 'd', // Reason String (31) + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Puback, + Remaining: 28, + }, + PacketID: 7, + ReasonCode: ErrPayloadFormatInvalid.Code, + Properties: Properties{ + ReasonString: ErrPayloadFormatInvalid.Reason, + }, + }, + }, + + // Fail states + { + Case: TPubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Puback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Puback << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeGrantedQos0.Code, // Reason Code + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubrec: { + { + Case: TPubrec, + Desc: "pubrec", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubrecMqtt5, + Desc: "pubrec mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrecMqtt5IDInUse, + Desc: "packet id in use mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrec << 4, 47, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierInUse.Code, // Reason Code + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrec, + Remaining: 31, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierInUse.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrecMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Pubrec << 4, 31, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + // Validation + { + Case: TPubrecInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrec, + }, + PacketID: 7, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubrecMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubrec << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubrecMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubrec << 4, 31, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierInUse.Code, // Reason Code + 27, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubrel: { + { + Case: TPubrel, + Desc: "pubrel", + Primary: true, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 2, + Qos: 1, + }, + PacketID: 7, + }, + }, + { + Case: TPubrelMqtt5, + Desc: "pubrel mqtt5", + Primary: true, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 20, + Qos: 1, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubrelMqtt5AckNoPacket, + Desc: "mqtt5 no packet id ack", + Primary: true, + RawBytes: append([]byte{ + Pubrel<<4 | 1<<1, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 30, // Properties Length + 31, 0, byte(len(ErrPacketIdentifierNotFound.Reason)), + }, []byte(ErrPacketIdentifierNotFound.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubrel, + Remaining: 34, + Qos: 1, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierNotFound.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierNotFound.Reason, + }, + }, + }, + // Validation + { + Case: TPubrelInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubrel, + }, + PacketID: 7, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubrelMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubrel << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubrelMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubrel<<4 | 1<<1, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Pubcomp: { + { + Case: TPubcomp, + Desc: "pubcomp", + Primary: true, + RawBytes: []byte{ + Pubcomp << 4, 2, // Fixed header + 0, 7, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 2, + }, + PacketID: 7, + }, + }, + { + Case: TPubcompMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Pubcomp << 4, 20, // Fixed header + 0, 7, // Packet ID - LSB+MSB + CodeSuccess.Code, // Reason Code + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 20, + }, + PacketID: 7, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TPubcompMqtt5AckNoPacket, + Desc: "mqtt5 no packet id ack", + Primary: true, + RawBytes: append([]byte{ + Pubcomp << 4, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 30, // Properties Length + 31, 0, byte(len(ErrPacketIdentifierNotFound.Reason)), + }, []byte(ErrPacketIdentifierNotFound.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Pubcomp, + Remaining: 34, + }, + PacketID: 7, + ReasonCode: ErrPacketIdentifierNotFound.Code, + Properties: Properties{ + ReasonString: ErrPacketIdentifierNotFound.Reason, + }, + }, + }, + // Validation + { + Case: TPubcompInvalidReason, + Desc: "invalid reason code", + Group: "validate", + FailFirst: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pubcomp, + }, + ReasonCode: ErrConnectionRateExceeded.Code, + }, + }, + // Fail states + { + Case: TPubcompMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Pubcomp << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TPubcompMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Pubcomp << 4, 34, // Fixed header + 0, 7, // Packet ID - LSB+MSB + ErrPacketIdentifierNotFound.Code, // Reason Code + 22, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Subscribe: { + { + Case: TSubscribe, + Desc: "subscribe", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 10, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 10, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + { + Case: TSubscribeMany, + Desc: "many", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 30, // Fixed header + 0, 15, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + 0, // QoS + + 0, 11, // Topic Name - LSB+MSB + 'd', '/', 'e', '/', 'f', '/', 'g', '/', 'h', '/', 'i', // Topic Name + 1, // QoS + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + 2, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 30, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b", Qos: 0}, + {Filter: "d/e/f/g/h/i", Qos: 1}, + {Filter: "x/y/z", Qos: 2}, + }, + }, + }, + { + Case: TSubscribeMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 20, + 11, 202, 212, 19, // Subscription Identifier (11) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + + 0, 5, 'a', '/', 'b', '/', 'c', // Topic Name + 46, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 31, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + Qos: 2, + NoLocal: true, + RetainAsPublished: true, + RetainHandling: 2, + Identifier: 322122, + }, + }, + Properties: Properties{ + SubscriptionIdentifier: []int{322122}, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TSubscribeRetainHandling1, + Desc: "retain handling 1", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 1<<4, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainHandling: 1, + }, + }, + }, + }, + { + Case: TSubscribeRetainHandling2, + Desc: "retain handling 2", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 2<<4, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainHandling: 2, + }, + }, + }, + }, + { + Case: TSubscribeRetainAsPublished, + Desc: "retain as published", + Primary: true, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 0 | 1<<3, // subscription options + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Subscribe, + Remaining: 11, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + { + Filter: "a/b/c", + RetainAsPublished: true, + }, + }, + }, + }, + { + Case: TSubscribeInvalidFilter, + Desc: "invalid filter", + Group: "reference", + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "$SHARE/#", Identifier: 5}, + }, + }, + }, + { + Case: TSubscribeInvalidSharedNoLocal, + Desc: "shared and no local", + Group: "reference", + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "$SHARE/tmp/a/b/c", Identifier: 5, NoLocal: true}, + }, + }, + }, + + // Fail states + { + Case: TSubscribeMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TSubscribeMalTopic, + Desc: "malformed topic", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 21, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', + }, + }, + { + Case: TSubscribeMalQos, + Desc: "malformed subscribe - qos", + Group: "decode", + FailFirst: ErrMalformedQos, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 22, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'j', '/', 'b', // Topic Name + + }, + }, + { + Case: TSubscribeMalQosRange, + Desc: "malformed qos out of range", + Group: "decode", + FailFirst: ErrProtocolViolationQosOutOfRange, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 2, // Fixed header + 0, 22, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'c', '/', 'd', // Topic Name + 5, // QoS + + }, + }, + { + Case: TSubscribeMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Subscribe << 4, 11, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 4, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + // Validation + { + Case: TSubscribeInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 0, + Filters: Subscriptions{ + {Filter: "a/b"}, + }, + }, + }, + { + Case: TSubscribeInvalidNoFilters, + Desc: "no filters", + Group: "validate", + Expect: ErrProtocolViolationNoFilters, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 2, + }, + }, + + { + Case: TSubscribeInvalidIdentifierOversize, + Desc: "oversize identifier", + Group: "validate", + Expect: ErrProtocolViolationOversizeSubID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + }, + PacketID: 2, + Filters: Subscriptions{ + {Filter: "a/b", Identifier: 5}, + {Filter: "d/f", Identifier: 268435456}, + }, + }, + }, + + // Spec tests + { + Case: TSubscribeSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Subscribe<<4 | 1<<1, 10, // Fixed header + 0, 0, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + 1, // QoS + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Subscribe, + Qos: 1, + Remaining: 10, + }, + Filters: Subscriptions{ + {Filter: "a/b/c", Qos: 1}, + }, + PacketID: 0, + }, + }, + }, + Suback: { + { + Case: TSuback, + Desc: "suback", + Primary: true, + RawBytes: []byte{ + Suback << 4, 3, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // Return Code QoS 0 + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 3, + }, + PacketID: 15, + ReasonCodes: []byte{0}, + }, + }, + { + Case: TSubackMany, + Desc: "many", + Primary: true, + RawBytes: []byte{ + Suback << 4, 6, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // Return Code QoS 0 + 1, // Return Code QoS 1 + 2, // Return Code QoS 2 + 0x80, // Return Code fail + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 6, + }, + PacketID: 15, + ReasonCodes: []byte{0, 1, 2, 0x80}, + }, + }, + { + Case: TSubackDeny, + Desc: "deny mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 4, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + ErrNotAuthorized.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 4, + }, + PacketID: 15, + ReasonCodes: []byte{ErrNotAuthorized.Code}, + }, + }, + { + Case: TSubackUnspecifiedError, + Desc: "unspecified error", + Primary: true, + RawBytes: []byte{ + Suback << 4, 3, // Fixed header + 0, 15, // Packet ID - LSB+MSB + ErrUnspecifiedError.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 3, + }, + PacketID: 15, + ReasonCodes: []byte{ErrUnspecifiedError.Code}, + }, + }, + { + Case: TSubackUnspecifiedErrorMqtt5, + Desc: "unspecified error mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 4, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, // no properties + ErrUnspecifiedError.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 4, + }, + PacketID: 15, + ReasonCodes: []byte{ErrUnspecifiedError.Code}, + }, + }, + { + Case: TSubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Suback << 4, 20, // Fixed header + 0, 15, // Packet ID + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + CodeGrantedQos2.Code, // Return Code QoS 0 + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 20, + }, + PacketID: 15, + ReasonCodes: []byte{CodeGrantedQos2.Code}, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TSubackPacketIDInUse, + Desc: "packet id in use", + Primary: true, + RawBytes: []byte{ + Suback << 4, 47, // Fixed header + 0, 15, // Packet ID + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + ErrPacketIdentifierInUse.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Suback, + Remaining: 47, + }, + PacketID: 15, + ReasonCodes: []byte{ErrPacketIdentifierInUse.Code}, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + + // Fail states + { + Case: TSubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Suback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TSubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Suback << 4, 47, + 0, 15, + 43, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + { + Case: TSubackInvalidFilter, + Desc: "malformed packet id", + Group: "reference", + RawBytes: []byte{ + Suback << 4, 4, + 0, 15, + 0, // no properties + ErrTopicFilterInvalid.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TSubackInvalidSharedNoLocal, + Desc: "invalid shared no local", + Group: "reference", + RawBytes: []byte{ + Suback << 4, 4, + 0, 15, + 0, // no properties + ErrProtocolViolationInvalidSharedNoLocal.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Unsubscribe: { + { + Case: TUnsubscribe, + Desc: "unsubscribe", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 9, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 9, + Qos: 1, + }, + PacketID: 15, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + { + Case: TUnsubscribeMany, + Desc: "unsubscribe many", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 27, // Fixed header + 0, 35, // Packet ID - LSB+MSB + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', // Topic Name + + 0, 11, // Topic Name - LSB+MSB + 'd', '/', 'e', '/', 'f', '/', 'g', '/', 'h', '/', 'i', // Topic Name + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'z', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 27, + Qos: 1, + }, + PacketID: 35, + Filters: Subscriptions{ + {Filter: "a/b"}, + {Filter: "d/e/f/g/h/i"}, + {Filter: "x/y/z"}, + }, + }, + }, + { + Case: TUnsubscribeMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + + 16, + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + + 0, 3, // Topic Name - LSB+MSB + 'a', '/', 'b', + + 0, 5, // Topic Name - LSB+MSB + 'x', '/', 'y', '/', 'w', + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Remaining: 31, + Qos: 1, + }, + PacketID: 15, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + Filters: Subscriptions{ + {Filter: "a/b"}, + {Filter: "x/y/w"}, + }, + }, + }, + + // Fail states + { + Case: TUnsubscribeMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Unsubscribe << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TUnsubscribeMalTopicName, + Desc: "malformed topic", + Group: "decode", + FailFirst: ErrMalformedTopic, + RawBytes: []byte{ + Unsubscribe << 4, 2, // Fixed header + 0, 21, // Packet ID - LSB+MSB + 0, 3, // Topic Name - LSB+MSB + 'a', '/', + }, + }, + { + Case: TUnsubscribeMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 31, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 16, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + + { + Case: TUnsubscribeInvalidQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "validate", + Expect: ErrProtocolViolationNoPacketID, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + }, + PacketID: 0, + Filters: Subscriptions{ + Subscription{Filter: "a/b"}, + }, + }, + }, + { + Case: TUnsubscribeInvalidNoFilters, + Desc: "no filters", + Group: "validate", + Expect: ErrProtocolViolationNoFilters, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + }, + PacketID: 2, + }, + }, + + { + Case: TUnsubscribeSpecQosMustPacketID, + Desc: "no packet id with qos > 0", + Group: "encode", + Expect: ErrProtocolViolationNoPacketID, + RawBytes: []byte{ + Unsubscribe<<4 | 1<<1, 9, // Fixed header + 0, 0, // Packet ID - LSB+MSB + 0, 5, // Topic Name - LSB+MSB + 'a', '/', 'b', '/', 'c', // Topic Name + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsubscribe, + Qos: 1, + Remaining: 9, + }, + PacketID: 0, + Filters: Subscriptions{ + {Filter: "a/b/c"}, + }, + }, + }, + }, + Unsuback: { + { + Case: TUnsuback, + Desc: "unsuback", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 2, // Fixed header + 0, 15, // Packet ID - LSB+MSB + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 2, + }, + PacketID: 15, + }, + }, + { + Case: TUnsubackMany, + Desc: "unsuback many", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 5, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 0, + CodeSuccess.Code, CodeSuccess.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 5, + }, + PacketID: 15, + ReasonCodes: []byte{CodeSuccess.Code, CodeSuccess.Code}, + }, + }, + { + Case: TUnsubackMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 21, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 16, // Properties Length + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + CodeSuccess.Code, CodeNoSubscriptionExisted.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 21, + }, + PacketID: 15, + ReasonCodes: []byte{CodeSuccess.Code, CodeNoSubscriptionExisted.Code}, + Properties: Properties{ + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TUnsubackPacketIDInUse, + Desc: "packet id in use", + Primary: true, + RawBytes: []byte{ + Unsuback << 4, 48, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 43, // Properties Length + 31, 0, 24, 'p', 'a', 'c', 'k', 'e', 't', + ' ', 'i', 'd', 'e', 'n', 't', 'i', 'f', 'i', 'e', 'r', + ' ', 'i', 'n', + ' ', 'u', 's', 'e', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + ErrPacketIdentifierInUse.Code, ErrPacketIdentifierInUse.Code, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Unsuback, + Remaining: 48, + }, + PacketID: 15, + ReasonCodes: []byte{ErrPacketIdentifierInUse.Code, ErrPacketIdentifierInUse.Code}, + Properties: Properties{ + ReasonString: ErrPacketIdentifierInUse.Reason, + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + + // Fail states + { + Case: TUnsubackMalPacketID, + Desc: "malformed packet id", + Group: "decode", + FailFirst: ErrMalformedPacketID, + RawBytes: []byte{ + Unsuback << 4, 2, // Fixed header + 0, // Packet ID - LSB+MSB + }, + }, + { + Case: TUnsubackMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Unsuback << 4, 48, // Fixed header + 0, 15, // Packet ID - LSB+MSB + 43, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + + Pingreq: { + { + Case: TPingreq, + Desc: "ping request", + Primary: true, + RawBytes: []byte{ + Pingreq << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pingreq, + Remaining: 0, + }, + }, + }, + }, + Pingresp: { + { + Case: TPingresp, + Desc: "ping response", + Primary: true, + RawBytes: []byte{ + Pingresp << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Pingresp, + Remaining: 0, + }, + }, + }, + }, + + Disconnect: { + { + Case: TDisconnect, + Desc: "disconnect", + Primary: true, + RawBytes: []byte{ + Disconnect << 4, 0, // fixed header + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + }, + }, + { + Case: TDisconnectTakeover, + Desc: "takeover", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 21, // fixed header + ErrSessionTakenOver.Code, // Reason Code + 19, // Properties Length + 31, 0, 16, // Reason String (31) + }, []byte(ErrSessionTakenOver.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + ReasonCode: ErrSessionTakenOver.Code, + Properties: Properties{ + ReasonString: ErrSessionTakenOver.Reason, + }, + }, + }, + { + Case: TDisconnectShuttingDown, + Desc: "shutting down", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 25, // fixed header + ErrServerShuttingDown.Code, // Reason Code + 23, // Properties Length + 31, 0, 20, // Reason String (31) + }, []byte(ErrServerShuttingDown.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 0, + }, + ReasonCode: ErrServerShuttingDown.Code, + Properties: Properties{ + ReasonString: ErrServerShuttingDown.Reason, + }, + }, + }, + { + Case: TDisconnectMqtt5, + Desc: "mqtt5", + Primary: true, + RawBytes: append([]byte{ + Disconnect << 4, 22, // fixed header + CodeDisconnect.Code, // Reason Code + 20, // Properties Length + 17, 0, 0, 0, 120, // Session Expiry Interval (17) + 31, 0, 12, // Reason String (31) + }, []byte(CodeDisconnect.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 22, + }, + ReasonCode: CodeDisconnect.Code, + Properties: Properties{ + ReasonString: CodeDisconnect.Reason, + SessionExpiryInterval: 120, + SessionExpiryIntervalFlag: true, + }, + }, + }, + { + Case: TDisconnectSecondConnect, + Desc: "second connect packet mqtt5", + RawBytes: append([]byte{ + Disconnect << 4, 46, // fixed header + ErrProtocolViolationSecondConnect.Code, + 44, + 31, 0, 41, // Reason String (31) + }, []byte(ErrProtocolViolationSecondConnect.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 45, + }, + ReasonCode: ErrProtocolViolationSecondConnect.Code, + Properties: Properties{ + ReasonString: ErrProtocolViolationSecondConnect.Reason, + }, + }, + }, + { + Case: TDisconnectZeroNonZeroExpiry, + Desc: "zero non zero expiry", + RawBytes: []byte{ + Disconnect << 4, 2, // fixed header + ErrProtocolViolationZeroNonZeroExpiry.Code, + 0, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 2, + }, + ReasonCode: ErrProtocolViolationZeroNonZeroExpiry.Code, + }, + }, + { + Case: TDisconnectReceiveMaximum, + Desc: "receive maximum mqtt5", + RawBytes: append([]byte{ + Disconnect << 4, 29, // fixed header + ErrReceiveMaximum.Code, + 27, // Properties Length + 31, 0, 24, // Reason String (31) + }, []byte(ErrReceiveMaximum.Reason)...), + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 29, + }, + ReasonCode: ErrReceiveMaximum.Code, + Properties: Properties{ + ReasonString: ErrReceiveMaximum.Reason, + }, + }, + }, + { + Case: TDisconnectDropProperties, + Desc: "drop oversize properties partial", + Group: "encode", + RawBytes: []byte{ + Disconnect << 4, 39, // fixed header + CodeDisconnect.Code, + 19, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + ActualBytes: []byte{ + Disconnect << 4, 12, // fixed header + CodeDisconnect.Code, + 10, // length + 28, 0, 7, 'm', 'o', 'c', 'h', 'i', '-', '2', // Server Reference (28) + }, + Packet: &Packet{ + Mods: Mods{ + MaxSize: 3, + }, + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Disconnect, + Remaining: 40, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + ReasonString: "reason", + ServerReference: "mochi-2", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + // fail states + { + Case: TDisconnectMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Disconnect << 4, 48, // fixed header + CodeDisconnect.Code, // Reason Code + 46, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + { + Case: TDisconnectMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Disconnect << 4, 48, // fixed header + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + }, + Auth: { + { + Case: TAuth, + Desc: "auth", + Primary: true, + RawBytes: []byte{ + Auth << 4, 47, + CodeSuccess.Code, // reason code + 45, + 21, 0, 5, 'S', 'H', 'A', '-', '1', // Authentication Method (21) + 22, 0, 9, 'a', 'u', 't', 'h', '-', 'd', 'a', 't', 'a', // Authentication Data (22) + 31, 0, 6, 'r', 'e', 'a', 's', 'o', 'n', // Reason String (31) + 38, // User Properties (38) + 0, 5, 'h', 'e', 'l', 'l', 'o', + 0, 6, 228, 184, 150, 231, 149, 140, + }, + Packet: &Packet{ + ProtocolVersion: 5, + FixedHeader: FixedHeader{ + Type: Auth, + Remaining: 47, + }, + ReasonCode: CodeSuccess.Code, + Properties: Properties{ + AuthenticationMethod: "SHA-1", + AuthenticationData: []byte("auth-data"), + ReasonString: "reason", + User: []UserProperty{ + { + Key: "hello", + Val: "世界", + }, + }, + }, + }, + }, + { + Case: TAuthMalReasonCode, + Desc: "malformed reason code", + Group: "decode", + FailFirst: ErrMalformedReasonCode, + RawBytes: []byte{ + Auth << 4, 47, + }, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + // fail states + { + Case: TAuthMalProperties, + Desc: "malformed properties", + Group: "decode", + FailFirst: ErrMalformedProperties, + RawBytes: []byte{ + Auth << 4, 3, + CodeSuccess.Code, + 12, + }, + Packet: &Packet{ + ProtocolVersion: 5, + }, + }, + // Validation + { + Case: TAuthInvalidReason, + Desc: "invalid reason code", + Group: "validate", + Expect: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + { + Case: TAuthInvalidReason2, + Desc: "invalid reason code", + Group: "validate", + Expect: ErrProtocolViolationInvalidReason, + Packet: &Packet{ + FixedHeader: FixedHeader{ + Type: Auth, + }, + ReasonCode: CodeNoMatchingSubscribers.Code, + }, + }, + }, +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/server.go b/vendor/github.com/mochi-mqtt/server/v2/server.go new file mode 100644 index 00000000..0c50b6aa --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/server.go @@ -0,0 +1,1533 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +// package mqtt provides a high performance, fully compliant MQTT v5 broker server with v3.1.1 backward compatibility. +package mqtt + +import ( + "errors" + "fmt" + "math" + "net" + "os" + "runtime" + "sort" + "strconv" + "sync/atomic" + "time" + + "github.com/mochi-mqtt/server/v2/hooks/storage" + "github.com/mochi-mqtt/server/v2/listeners" + "github.com/mochi-mqtt/server/v2/packets" + "github.com/mochi-mqtt/server/v2/system" + + "github.com/rs/zerolog" +) + +const ( + Version = "2.3.0" // the current server version. + defaultSysTopicInterval int64 = 1 // the interval between $SYS topic publishes +) + +var ( + // DefaultServerCapabilities defines the default features and capabilities provided by the server. + DefaultServerCapabilities = &Capabilities{ + MaximumSessionExpiryInterval: math.MaxUint32, // maximum number of seconds to keep disconnected sessions + MaximumMessageExpiryInterval: 60 * 60 * 24, // maximum message expiry if message expiry is 0 or over + ReceiveMaximum: 1024, // maximum number of concurrent qos messages per client + MaximumQos: 2, // maxmimum qos value available to clients + RetainAvailable: 1, // retain messages is available + MaximumPacketSize: 0, // no maximum packet size + TopicAliasMaximum: math.MaxUint16, // maximum topic alias value + WildcardSubAvailable: 1, // wildcard subscriptions are available + SubIDAvailable: 1, // subscription identifiers are available + SharedSubAvailable: 1, // shared subscriptions are available + MinimumProtocolVersion: 3, // minimum supported mqtt version (3.0.0) + MaximumClientWritesPending: 1024 * 8, // maximum number of pending message writes for a client + } + + ErrListenerIDExists = errors.New("listener id already exists") // a listener with the same id already exists. + ErrConnectionClosed = errors.New("connection not open") // connection is closed +) + +// Capabilities indicates the capabilities and features provided by the server. +type Capabilities struct { + MaximumMessageExpiryInterval int64 + MaximumClientWritesPending int32 + MaximumSessionExpiryInterval uint32 + MaximumPacketSize uint32 + maximumPacketID uint32 // unexported, used for testing only + ReceiveMaximum uint16 + TopicAliasMaximum uint16 + SharedSubAvailable byte + MinimumProtocolVersion byte + Compatibilities Compatibilities + MaximumQos byte + RetainAvailable byte + WildcardSubAvailable byte + SubIDAvailable byte +} + +// Compatibilities provides flags for using compatibility modes. +type Compatibilities struct { + ObscureNotAuthorized bool // return unspecified errors instead of not authorized + PassiveClientDisconnect bool // don't disconnect the client forcefully after sending disconnect packet (paho - spec violation) + AlwaysReturnResponseInfo bool // always return response info (useful for testing) + RestoreSysInfoOnRestart bool // restore system info from store as if server never stopped + NoInheritedPropertiesOnAck bool // don't allow inherited user properties on ack (paho - spec violation) +} + +// Options contains configurable options for the server. +type Options struct { + // Capabilities defines the server features and behaviour. If you only wish to modify + // several of these values, set them explicitly - e.g. + // server.Options.Capabilities.MaximumClientWritesPending = 16 * 1024 + Capabilities *Capabilities + + // ClientNetWriteBufferSize specifies the size of the client *bufio.Writer write buffer. + ClientNetWriteBufferSize int + + // ClientNetReadBufferSize specifies the size of the client *bufio.Reader read buffer. + ClientNetReadBufferSize int + + // Logger specifies a custom configured implementation of zerolog to override + // the servers default logger configuration. If you wish to change the log level, + // of the default logger, you can do so by setting + // server := mqtt.New(nil) + // l := server.Log.Level(zerolog.DebugLevel) + // server.Log = &l + Logger *zerolog.Logger + + // SysTopicResendInterval specifies the interval between $SYS topic updates in seconds. + SysTopicResendInterval int64 +} + +// Server is an MQTT broker server. It should be created with server.New() +// in order to ensure all the internal fields are correctly populated. +type Server struct { + Options *Options // configurable server options + Listeners *listeners.Listeners // listeners are network interfaces which listen for new connections + Clients *Clients // clients known to the broker + Topics *TopicsIndex // an index of topic filter subscriptions and retained messages + Info *system.Info // values about the server commonly known as $SYS topics + loop *loop // loop contains tickers for the system event loop + done chan bool // indicate that the server is ending + Log *zerolog.Logger // minimal no-alloc logger + hooks *Hooks // hooks contains hooks for extra functionality such as auth and persistent storage. +} + +// loop contains interval tickers for the system events loop. +type loop struct { + sysTopics *time.Ticker // interval ticker for sending updating $SYS topics + clientExpiry *time.Ticker // interval ticker for cleaning expired clients + inflightExpiry *time.Ticker // interval ticker for cleaning up expired inflight messages + retainedExpiry *time.Ticker // interval ticker for cleaning retained messages + willDelaySend *time.Ticker // interval ticker for sending will messages with a delay + willDelayed *packets.Packets // activate LWT packets which will be sent after a delay +} + +// ops contains server values which can be propagated to other structs. +type ops struct { + options *Options // a pointer to the server options and capabilities, for referencing in clients + info *system.Info // pointers to server system info + hooks *Hooks // pointer to the server hooks + log *zerolog.Logger // a structured logger for the client +} + +// New returns a new instance of mochi mqtt broker. Optional parameters +// can be specified to override some default settings (see Options). +func New(opts *Options) *Server { + if opts == nil { + opts = new(Options) + } + + opts.ensureDefaults() + + s := &Server{ + done: make(chan bool), + Clients: NewClients(), + Topics: NewTopicsIndex(), + Listeners: listeners.New(), + loop: &loop{ + sysTopics: time.NewTicker(time.Second * time.Duration(opts.SysTopicResendInterval)), + clientExpiry: time.NewTicker(time.Second), + inflightExpiry: time.NewTicker(time.Second), + retainedExpiry: time.NewTicker(time.Second), + willDelaySend: time.NewTicker(time.Second), + willDelayed: packets.NewPackets(), + }, + Options: opts, + Info: &system.Info{ + Version: Version, + Started: time.Now().Unix(), + }, + Log: opts.Logger, + hooks: &Hooks{ + Log: opts.Logger, + }, + } + + return s +} + +// ensureDefaults ensures that the server starts with sane default values, if none are provided. +func (o *Options) ensureDefaults() { + if o.Capabilities == nil { + o.Capabilities = DefaultServerCapabilities + } + + o.Capabilities.maximumPacketID = math.MaxUint16 // spec maximum is 65535 + + if o.SysTopicResendInterval == 0 { + o.SysTopicResendInterval = defaultSysTopicInterval + } + + if o.ClientNetWriteBufferSize == 0 { + o.ClientNetWriteBufferSize = 1024 * 2 + } + + if o.ClientNetReadBufferSize == 0 { + o.ClientNetReadBufferSize = 1024 * 2 + } + + if o.Logger == nil { + log := zerolog.New(os.Stderr).With().Timestamp().Logger().Level(zerolog.InfoLevel).Output(zerolog.ConsoleWriter{Out: os.Stderr}) + o.Logger = &log + } +} + +// NewClient returns a new Client instance, populated with all the required values and +// references to be used with the server. If you are using this client to directly publish +// messages from the embedding application, set the inline flag to true to bypass ACL and +// topic validation checks. +func (s *Server) NewClient(c net.Conn, listener string, id string, inline bool) *Client { + cl := newClient(c, &ops{ // [MQTT-3.1.2-6] implicit + options: s.Options, + info: s.Info, + hooks: s.hooks, + log: s.Log, + }) + + cl.ID = id + cl.Net.Listener = listener + + if inline { // inline clients bypass acl and some validity checks. + cl.Net.Inline = true + // By default, we don't want to restrict developer publishes, + // but if you do, reset this after creating inline client. + cl.State.Inflight.ResetReceiveQuota(math.MaxInt32) + } else { + go cl.WriteLoop() // can only write to real clients + } + + return cl +} + +// AddHook attaches a new Hook to the server. Ideally, this should be called +// before the server is started with s.Serve(). +func (s *Server) AddHook(hook Hook, config any) error { + nl := s.Log.With().Str("hook", hook.ID()).Logger() + hook.SetOpts(&nl, &HookOptions{ + Capabilities: s.Options.Capabilities, + }) + + s.Log.Info().Str("hook", hook.ID()).Msg("added hook") + return s.hooks.Add(hook, config) +} + +// AddListener adds a new network listener to the server, for receiving incoming client connections. +func (s *Server) AddListener(l listeners.Listener) error { + if _, ok := s.Listeners.Get(l.ID()); ok { + return ErrListenerIDExists + } + + nl := s.Log.With().Str("listener", l.ID()).Logger() + err := l.Init(&nl) + if err != nil { + return err + } + + s.Listeners.Add(l) + + s.Log.Info().Str("id", l.ID()).Str("protocol", l.Protocol()).Str("address", l.Address()).Msg("attached listener") + return nil +} + +// Serve starts the event loops responsible for establishing client connections +// on all attached listeners, publishing the system topics, and starting all hooks. +func (s *Server) Serve() error { + s.Log.Info().Str("version", Version).Msg("mochi mqtt starting") + defer s.Log.Info().Msg("mochi mqtt server started") + + if s.hooks.Provides( + StoredClients, + StoredInflightMessages, + StoredRetainedMessages, + StoredSubscriptions, + StoredSysInfo, + ) { + err := s.readStore() + if err != nil { + return err + } + } + + go s.eventLoop() // spin up event loop for issuing $SYS values and closing server. + s.Listeners.ServeAll(s.EstablishConnection) // start listening on all listeners. + s.publishSysTopics() // begin publishing $SYS system values. + s.hooks.OnStarted() + + return nil +} + +// eventLoop loops forever, running various server housekeeping methods at different intervals. +func (s *Server) eventLoop() { + s.Log.Debug().Msg("system event loop started") + defer s.Log.Debug().Msg("system event loop halted") + + for { + select { + case <-s.done: + s.loop.sysTopics.Stop() + return + case <-s.loop.sysTopics.C: + s.publishSysTopics() + case <-s.loop.clientExpiry.C: + s.clearExpiredClients(time.Now().Unix()) + case <-s.loop.retainedExpiry.C: + s.clearExpiredRetainedMessages(time.Now().Unix()) + case <-s.loop.willDelaySend.C: + s.sendDelayedLWT(time.Now().Unix()) + case <-s.loop.inflightExpiry.C: + s.clearExpiredInflights(time.Now().Unix()) + } + } +} + +// EstablishConnection establishes a new client when a listener accepts a new connection. +func (s *Server) EstablishConnection(listener string, c net.Conn) error { + cl := s.NewClient(c, listener, "", false) + return s.attachClient(cl, listener) +} + +// attachClient validates an incoming client connection and if viable, attaches the client +// to the server, performs session housekeeping, and reads incoming packets. +func (s *Server) attachClient(cl *Client, listener string) error { + defer cl.Stop(nil) + pk, err := s.readConnectionPacket(cl) + if err != nil { + return fmt.Errorf("read connection: %w", err) + } + + cl.ParseConnect(listener, pk) + code := s.validateConnect(cl, pk) // [MQTT-3.1.4-1] [MQTT-3.1.4-2] + if code != packets.CodeSuccess { + if err := s.SendConnack(cl, code, false, nil); err != nil { + return fmt.Errorf("invalid connection send ack: %w", err) + } + return code // [MQTT-3.2.2-7] [MQTT-3.1.4-6] + } + + err = s.hooks.OnConnect(cl, pk) + if err != nil { + return err + } + + cl.refreshDeadline(cl.State.Keepalive) + if !s.hooks.OnConnectAuthenticate(cl, pk) { // [MQTT-3.1.4-2] + err := s.SendConnack(cl, packets.ErrBadUsernameOrPassword, false, nil) + if err != nil { + return fmt.Errorf("invalid connection send ack: %w", err) + } + + return packets.ErrBadUsernameOrPassword + } + + atomic.AddInt64(&s.Info.ClientsConnected, 1) + defer atomic.AddInt64(&s.Info.ClientsConnected, -1) + + s.hooks.OnSessionEstablish(cl, pk) + + sessionPresent := s.inheritClientSession(pk, cl) + s.Clients.Add(cl) // [MQTT-4.1.0-1] + + err = s.SendConnack(cl, code, sessionPresent, nil) // [MQTT-3.1.4-5] [MQTT-3.2.0-1] [MQTT-3.2.0-2] &[MQTT-3.14.0-1] + if err != nil { + return fmt.Errorf("ack connection packet: %w", err) + } + + s.loop.willDelayed.Delete(cl.ID) // [MQTT-3.1.3-9] + + if sessionPresent { + err = cl.ResendInflightMessages(true) + if err != nil { + return fmt.Errorf("resend inflight: %w", err) + } + } + + s.hooks.OnSessionEstablished(cl, pk) + + err = cl.Read(s.receivePacket) + if err != nil { + s.sendLWT(cl) + cl.Stop(err) + } else { + cl.Properties.Will = Will{} // [MQTT-3.14.4-3] [MQTT-3.1.2-10] + } + + s.Log.Debug().Str("client", cl.ID).Err(err).Str("remote", cl.Net.Remote).Str("listener", listener).Msg("client disconnected") + expire := (cl.Properties.ProtocolVersion == 5 && cl.Properties.Props.SessionExpiryInterval == 0) || (cl.Properties.ProtocolVersion < 5 && cl.Properties.Clean) + s.hooks.OnDisconnect(cl, err, expire) + + if expire && atomic.LoadUint32(&cl.State.isTakenOver) == 0 { + cl.ClearInflights(math.MaxInt64, 0) + s.UnsubscribeClient(cl) + s.Clients.Delete(cl.ID) // [MQTT-4.1.0-2] ![MQTT-3.1.2-23] + } + + return err +} + +// readConnectionPacket reads the first incoming header for a connection, and if +// acceptable, returns the valid connection packet. +func (s *Server) readConnectionPacket(cl *Client) (pk packets.Packet, err error) { + fh := new(packets.FixedHeader) + err = cl.ReadFixedHeader(fh) + if err != nil { + return + } + + if fh.Type != packets.Connect { + return pk, packets.ErrProtocolViolationRequireFirstConnect // [MQTT-3.1.0-1] + } + + pk, err = cl.ReadPacket(fh) + if err != nil { + return + } + + return +} + +// receivePacket processes an incoming packet for a client, and issues a disconnect to the client +// if an error has occurred (if mqtt v5). +func (s *Server) receivePacket(cl *Client, pk packets.Packet) error { + err := s.processPacket(cl, pk) + if err != nil { + if code, ok := err.(packets.Code); ok && + cl.Properties.ProtocolVersion == 5 && + code.Code >= packets.ErrUnspecifiedError.Code { + s.DisconnectClient(cl, code) + } + + s.Log.Warn().Err(err).Str("client", cl.ID).Str("listener", cl.Net.Listener).Interface("pk", pk).Msg("error processing packet") + + return err + } + + return nil +} + +// validateConnect validates that a connect packet is compliant. +func (s *Server) validateConnect(cl *Client, pk packets.Packet) packets.Code { + code := pk.ConnectValidate() // [MQTT-3.1.4-1] [MQTT-3.1.4-2] + if code != packets.CodeSuccess { + return code + } + + if cl.Properties.ProtocolVersion < 5 && !pk.Connect.Clean && pk.Connect.ClientIdentifier == "" { + return packets.ErrUnspecifiedError + } + + if cl.Properties.ProtocolVersion < s.Options.Capabilities.MinimumProtocolVersion { + return packets.ErrUnsupportedProtocolVersion // [MQTT-3.1.2-2] + } else if cl.Properties.Will.Qos > s.Options.Capabilities.MaximumQos { + return packets.ErrQosNotSupported // [MQTT-3.2.2-12] + } else if cl.Properties.Will.Retain && s.Options.Capabilities.RetainAvailable == 0x00 { + return packets.ErrRetainNotSupported // [MQTT-3.2.2-13] + } + + return code +} + +// inheritClientSession inherits the state of an existing client sharing the same +// connection ID. If clean is true, the state of any previously existing client +// session is abandoned. +func (s *Server) inheritClientSession(pk packets.Packet, cl *Client) bool { + if existing, ok := s.Clients.Get(pk.Connect.ClientIdentifier); ok { + s.DisconnectClient(existing, packets.ErrSessionTakenOver) // [MQTT-3.1.4-3] + if pk.Connect.Clean || (existing.Properties.Clean && existing.Properties.ProtocolVersion < 5) { // [MQTT-3.1.2-4] [MQTT-3.1.4-4] + s.UnsubscribeClient(existing) + existing.ClearInflights(math.MaxInt64, 0) + atomic.StoreUint32(&existing.State.isTakenOver, 1) // only set isTakenOver after unsubscribe has occurred + return false // [MQTT-3.2.2-3] + } + + atomic.StoreUint32(&existing.State.isTakenOver, 1) + if existing.State.Inflight.Len() > 0 { + cl.State.Inflight = existing.State.Inflight.Clone() // [MQTT-3.1.2-5] + if cl.State.Inflight.maximumReceiveQuota == 0 && cl.ops.options.Capabilities.ReceiveMaximum != 0 { + cl.State.Inflight.ResetReceiveQuota(int32(cl.ops.options.Capabilities.ReceiveMaximum)) // server receive max per client + cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum)) // client receive max + } + } + + for _, sub := range existing.State.Subscriptions.GetAll() { + existed := !s.Topics.Subscribe(cl.ID, sub) // [MQTT-3.8.4-3] + if !existed { + atomic.AddInt64(&s.Info.Subscriptions, 1) + } + cl.State.Subscriptions.Add(sub.Filter, sub) + } + + // Clean the state of the existing client to prevent sequential take-overs + // from increasing memory usage by inflights + subs * client-id. + s.UnsubscribeClient(existing) + existing.ClearInflights(math.MaxInt64, 0) + s.Log.Debug().Str("client", cl.ID). + Str("old_remote", existing.Net.Remote). + Str("new_remote", cl.Net.Remote). + Msg("session taken over") + + return true // [MQTT-3.2.2-3] + } + + if atomic.LoadInt64(&s.Info.ClientsConnected) > atomic.LoadInt64(&s.Info.ClientsMaximum) { + atomic.AddInt64(&s.Info.ClientsMaximum, 1) + } + + return false // [MQTT-3.2.2-2] +} + +// SendConnack returns a Connack packet to a client. +func (s *Server) SendConnack(cl *Client, reason packets.Code, present bool, properties *packets.Properties) error { + if properties == nil { + properties = &packets.Properties{ + ReceiveMaximum: s.Options.Capabilities.ReceiveMaximum, + } + } + + properties.ReceiveMaximum = s.Options.Capabilities.ReceiveMaximum // 3.2.2.3.3 Receive Maximum + if cl.State.ServerKeepalive { // You can set this dynamically using the OnConnect hook. + properties.ServerKeepAlive = cl.State.Keepalive // [MQTT-3.1.2-21] + properties.ServerKeepAliveFlag = true + } + + if reason.Code >= packets.ErrUnspecifiedError.Code { + if cl.Properties.ProtocolVersion < 5 { + if v3reason, ok := packets.V5CodesToV3[reason]; ok { // NB v3 3.2.2.3 Connack return codes + reason = v3reason + } + } + + properties.ReasonString = reason.Reason + ack := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Connack, + }, + SessionPresent: false, // [MQTT-3.2.2-6] + ReasonCode: reason.Code, // [MQTT-3.2.2-8] + Properties: *properties, + } + return cl.WritePacket(ack) + } + + if s.Options.Capabilities.MaximumQos < 2 { + properties.MaximumQos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + properties.MaximumQosFlag = true + } + + if cl.Properties.Props.AssignedClientID != "" { + properties.AssignedClientID = cl.Properties.Props.AssignedClientID // [MQTT-3.1.3-7] [MQTT-3.2.2-16] + } + + if cl.Properties.Props.SessionExpiryInterval > s.Options.Capabilities.MaximumSessionExpiryInterval { + properties.SessionExpiryInterval = s.Options.Capabilities.MaximumSessionExpiryInterval + properties.SessionExpiryIntervalFlag = true + cl.Properties.Props.SessionExpiryInterval = properties.SessionExpiryInterval + cl.Properties.Props.SessionExpiryIntervalFlag = true + } + + ack := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Connack, + }, + SessionPresent: present, + ReasonCode: reason.Code, // [MQTT-3.2.2-8] + Properties: *properties, + } + return cl.WritePacket(ack) +} + +// processPacket processes an inbound packet for a client. Since the method is +// typically called as a goroutine, errors are primarily for test checking purposes. +func (s *Server) processPacket(cl *Client, pk packets.Packet) error { + var err error + + switch pk.FixedHeader.Type { + case packets.Connect: + err = s.processConnect(cl, pk) + case packets.Disconnect: + err = s.processDisconnect(cl, pk) + case packets.Pingreq: + err = s.processPingreq(cl, pk) + case packets.Publish: + code := pk.PublishValidate(s.Options.Capabilities.TopicAliasMaximum) + if code != packets.CodeSuccess { + return code + } + err = s.processPublish(cl, pk) + case packets.Puback: + err = s.processPuback(cl, pk) + case packets.Pubrec: + err = s.processPubrec(cl, pk) + case packets.Pubrel: + err = s.processPubrel(cl, pk) + case packets.Pubcomp: + err = s.processPubcomp(cl, pk) + case packets.Subscribe: + code := pk.SubscribeValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processSubscribe(cl, pk) + case packets.Unsubscribe: + code := pk.UnsubscribeValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processUnsubscribe(cl, pk) + case packets.Auth: + code := pk.AuthValidate() + if code != packets.CodeSuccess { + return code + } + err = s.processAuth(cl, pk) + default: + return fmt.Errorf("no valid packet available; %v", pk.FixedHeader.Type) + } + + s.hooks.OnPacketProcessed(cl, pk, err) + if err != nil { + return err + } + + if cl.State.Inflight.Len() > 0 && atomic.LoadInt32(&cl.State.Inflight.sendQuota) > 0 { + next, ok := cl.State.Inflight.NextImmediate() + if ok { + _ = cl.WritePacket(next) + if ok := cl.State.Inflight.Delete(next.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.State.Inflight.DecreaseSendQuota() + } + } + + return nil +} + +// processConnect processes a Connect packet. The packet cannot be used to establish +// a new connection on an existing connection. See EstablishConnection instead. +func (s *Server) processConnect(cl *Client, _ packets.Packet) error { + s.sendLWT(cl) + return packets.ErrProtocolViolationSecondConnect // [MQTT-3.1.0-2] +} + +// processPingreq processes a Pingreq packet. +func (s *Server) processPingreq(cl *Client, _ packets.Packet) error { + return cl.WritePacket(packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Pingresp, // [MQTT-3.12.4-1] + }, + }) +} + +// Publish publishes a publish packet into the broker as if it were sent from the speicfied client. +// This is a convenience function which wraps InjectPacket. As such, this method can publish packets +// to any topic (including $SYS) and bypass ACL checks. The qos byte is used for limiting the +// outbound qos (mqtt v5) rather than issuing to the broker (we assume qos 2 complete). +func (s *Server) Publish(topic string, payload []byte, retain bool, qos byte) error { + cl := s.NewClient(nil, "local", "inline", true) + return s.InjectPacket(cl, packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Qos: qos, + Retain: retain, + }, + TopicName: topic, + Payload: payload, + PacketID: uint16(qos), // we never process the inbound qos, but we need a packet id for validity checks. + }) +} + +// InjectPacket injects a packet into the broker as if it were sent from the specified client. +// InlineClients using this method can publish packets to any topic (including $SYS) and bypass ACL checks. +func (s *Server) InjectPacket(cl *Client, pk packets.Packet) error { + pk.ProtocolVersion = cl.Properties.ProtocolVersion + + err := s.processPacket(cl, pk) + if err != nil { + return err + } + + atomic.AddInt64(&cl.ops.info.PacketsReceived, 1) + if pk.FixedHeader.Type == packets.Publish { + atomic.AddInt64(&cl.ops.info.MessagesReceived, 1) + } + + return nil +} + +// processPublish processes a Publish packet. +func (s *Server) processPublish(cl *Client, pk packets.Packet) error { + if !cl.Net.Inline && !IsValidFilter(pk.TopicName, true) { + return nil + } + + if atomic.LoadInt32(&cl.State.Inflight.receiveQuota) == 0 { + return s.DisconnectClient(cl, packets.ErrReceiveMaximum) // ~[MQTT-3.3.4-7] ~[MQTT-3.3.4-8] + } + + if !cl.Net.Inline && !s.hooks.OnACLCheck(cl, pk.TopicName, true) { + return nil + } + + pk.Origin = cl.ID + pk.Created = time.Now().Unix() + + if !cl.Net.Inline { + if pki, ok := cl.State.Inflight.Get(pk.PacketID); ok { + if pki.FixedHeader.Type == packets.Pubrec { // [MQTT-4.3.3-10] + ack := s.buildAck(pk.PacketID, packets.Pubrec, 0, pk.Properties, packets.ErrPacketIdentifierInUse) + return cl.WritePacket(ack) + } + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.2-5] + atomic.AddInt64(&s.Info.Inflight, -1) + } + } + } + + if pk.Properties.TopicAliasFlag && pk.Properties.TopicAlias > 0 { // [MQTT-3.3.2-11] + pk.TopicName = cl.State.TopicAliases.Inbound.Set(pk.Properties.TopicAlias, pk.TopicName) + } + + if pk.FixedHeader.Qos > s.Options.Capabilities.MaximumQos { + pk.FixedHeader.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] Reduce Qos based on server max qos capability + } + + pkx, err := s.hooks.OnPublish(cl, pk) + if err == nil { + pk = pkx + } else if errors.Is(err, packets.ErrRejectPacket) { + return nil + } else if errors.Is(err, packets.CodeSuccessIgnore) { + pk.Ignore = true + } else if cl.Properties.ProtocolVersion == 5 && pk.FixedHeader.Qos > 0 && errors.As(err, new(packets.Code)) { + err = cl.WritePacket(s.buildAck(pk.PacketID, packets.Puback, 0, pk.Properties, err.(packets.Code))) + if err != nil { + return err + } + return nil + } + + if pk.FixedHeader.Retain { // [MQTT-3.3.1-5] ![MQTT-3.3.1-8] + s.retainMessage(cl, pk) + } + + if pk.FixedHeader.Qos == 0 { + s.publishToSubscribers(pk) + s.hooks.OnPublished(cl, pk) + return nil + } + + cl.State.Inflight.DecreaseReceiveQuota() + ack := s.buildAck(pk.PacketID, packets.Puback, 0, pk.Properties, packets.QosCodes[pk.FixedHeader.Qos]) // [MQTT-4.3.2-4] + if pk.FixedHeader.Qos == 2 { + ack = s.buildAck(pk.PacketID, packets.Pubrec, 0, pk.Properties, packets.CodeSuccess) // [MQTT-3.3.4-1] [MQTT-4.3.3-8] + } + + if ok := cl.State.Inflight.Set(ack); ok { + atomic.AddInt64(&s.Info.Inflight, 1) + s.hooks.OnQosPublish(cl, ack, ack.Created, 0) + } + + err = cl.WritePacket(ack) + if err != nil { + return err + } + + if pk.FixedHeader.Qos == 1 { + if ok := cl.State.Inflight.Delete(ack.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.State.Inflight.IncreaseReceiveQuota() + s.hooks.OnQosComplete(cl, ack) + } + + s.publishToSubscribers(pk) + s.hooks.OnPublished(cl, pk) + + return nil +} + +// retainMessage adds a message to a topic, and if a persistent store is provided, +// adds the message to the store to be reloaded if necessary. +func (s *Server) retainMessage(cl *Client, pk packets.Packet) { + if s.Options.Capabilities.RetainAvailable == 0 || pk.Ignore { + return + } + + out := pk.Copy(false) + r := s.Topics.RetainMessage(out) + s.hooks.OnRetainMessage(cl, pk, r) + atomic.StoreInt64(&s.Info.Retained, int64(s.Topics.Retained.Len())) +} + +// publishToSubscribers publishes a publish packet to all subscribers with matching topic filters. +func (s *Server) publishToSubscribers(pk packets.Packet) { + if pk.Ignore { + return + } + + if pk.Created == 0 { + pk.Created = time.Now().Unix() + } + + pk.Expiry = pk.Created + s.Options.Capabilities.MaximumMessageExpiryInterval + if pk.Properties.MessageExpiryInterval > 0 { + pk.Expiry = pk.Created + int64(pk.Properties.MessageExpiryInterval) + } + + subscribers := s.Topics.Subscribers(pk.TopicName) + if len(subscribers.Shared) > 0 { + subscribers = s.hooks.OnSelectSubscribers(subscribers, pk) + if len(subscribers.SharedSelected) == 0 { + subscribers.SelectShared() + } + subscribers.MergeSharedSelected() + } + + for id, subs := range subscribers.Subscriptions { + if cl, ok := s.Clients.Get(id); ok { + _, err := s.publishToClient(cl, subs, pk) + if err != nil { + s.Log.Debug().Err(err).Str("client", cl.ID).Interface("packet", pk).Msg("failed publishing packet") + } + } + } +} + +func (s *Server) publishToClient(cl *Client, sub packets.Subscription, pk packets.Packet) (packets.Packet, error) { + if sub.NoLocal && pk.Origin == cl.ID { + return pk, nil // [MQTT-3.8.3-3] + } + + out := pk.Copy(false) + if !sub.FwdRetainedFlag && ((cl.Properties.ProtocolVersion == 5 && !sub.RetainAsPublished) || cl.Properties.ProtocolVersion < 5) { // ![MQTT-3.3.1-13] [v3 MQTT-3.3.1-9] + out.FixedHeader.Retain = false // [MQTT-3.3.1-12] + } + + if len(sub.Identifiers) > 0 { // [MQTT-3.3.4-3] + out.Properties.SubscriptionIdentifier = []int{} + for _, id := range sub.Identifiers { + out.Properties.SubscriptionIdentifier = append(out.Properties.SubscriptionIdentifier, id) // [MQTT-3.3.4-4] ![MQTT-3.3.4-5] + } + sort.Ints(out.Properties.SubscriptionIdentifier) + } + + if out.FixedHeader.Qos > sub.Qos { + out.FixedHeader.Qos = sub.Qos + } + + if out.FixedHeader.Qos > s.Options.Capabilities.MaximumQos { + out.FixedHeader.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + } + + if cl.Properties.Props.TopicAliasMaximum > 0 { + var aliasExists bool + out.Properties.TopicAlias, aliasExists = cl.State.TopicAliases.Outbound.Set(pk.TopicName) + if out.Properties.TopicAlias > 0 { + out.Properties.TopicAliasFlag = true + if aliasExists { + out.TopicName = "" + } + } + } + + if out.FixedHeader.Qos > 0 { + i, err := cl.NextPacketID() // [MQTT-4.3.2-1] [MQTT-4.3.3-1] + if err != nil { + s.hooks.OnPacketIDExhausted(cl, pk) + s.Log.Warn().Err(err).Str("client", cl.ID).Str("listener", cl.Net.Listener).Msg("packet ids exhausted") + return out, packets.ErrQuotaExceeded + } + + out.PacketID = uint16(i) // [MQTT-2.2.1-4] + sentQuota := atomic.LoadInt32(&cl.State.Inflight.sendQuota) + + if ok := cl.State.Inflight.Set(out); ok { // [MQTT-4.3.2-3] [MQTT-4.3.3-3] + atomic.AddInt64(&s.Info.Inflight, 1) + s.hooks.OnQosPublish(cl, out, out.Created, 0) + cl.State.Inflight.DecreaseSendQuota() + } + + if sentQuota == 0 && atomic.LoadInt32(&cl.State.Inflight.maximumSendQuota) > 0 { + out.Expiry = -1 + cl.State.Inflight.Set(out) + return out, nil + } + } + + if cl.Net.Conn == nil || cl.Closed() { + return out, packets.CodeDisconnect + } + + select { + case cl.State.outbound <- &out: + atomic.AddInt32(&cl.State.outboundQty, 1) + default: + atomic.AddInt64(&s.Info.MessagesDropped, 1) + cl.ops.hooks.OnPublishDropped(cl, pk) + cl.State.Inflight.Delete(out.PacketID) // packet was dropped due to irregular circumstances, so rollback inflight. + cl.State.Inflight.IncreaseSendQuota() + return out, packets.ErrPendingClientWritesExceeded + } + + return out, nil +} + +func (s *Server) publishRetainedToClient(cl *Client, sub packets.Subscription, existed bool) { + if IsSharedFilter(sub.Filter) { + return // 4.8.2 Non-normative - Shared Subscriptions - No Retained Messages are sent to the Session when it first subscribes. + } + + if sub.RetainHandling == 1 && existed || sub.RetainHandling == 2 { // [MQTT-3.3.1-10] [MQTT-3.3.1-11] + return + } + + sub.FwdRetainedFlag = true + for _, pkv := range s.Topics.Messages(sub.Filter) { // [MQTT-3.8.4-4] + _, err := s.publishToClient(cl, sub, pkv) + if err != nil { + s.Log.Debug().Err(err).Str("client", cl.ID).Str("listener", cl.Net.Listener).Interface("packet", pkv).Msg("failed to publish retained message") + continue + } + s.hooks.OnRetainPublished(cl, pkv) + } +} + +// buildAck builds a standardised ack message for Puback, Pubrec, Pubrel, Pubcomp packets. +func (s *Server) buildAck(packetID uint16, pkt, qos byte, properties packets.Properties, reason packets.Code) packets.Packet { + if s.Options.Capabilities.Compatibilities.NoInheritedPropertiesOnAck { + properties = packets.Properties{} + } + if reason.Code >= packets.ErrUnspecifiedError.Code { + properties.ReasonString = reason.Reason + } + + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: pkt, + Qos: qos, + }, + PacketID: packetID, // [MQTT-2.2.1-5] + ReasonCode: reason.Code, // [MQTT-3.4.2-1] + Properties: properties, + Created: time.Now().Unix(), + Expiry: time.Now().Unix() + s.Options.Capabilities.MaximumMessageExpiryInterval, + } + + return pk +} + +// processPuback processes a Puback packet, denoting completion of a QOS 1 packet sent from the server. +func (s *Server) processPuback(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { + return nil // omit, but would be packets.ErrPacketIdentifierNotFound + } + + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.2-5] + cl.State.Inflight.IncreaseSendQuota() + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processPubrec processes a Pubrec packet, denoting receipt of a QOS 2 packet sent from the server. +func (s *Server) processPubrec(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { // [MQTT-4.3.3-7] [MQTT-4.3.3-13] + return cl.WritePacket(s.buildAck(pk.PacketID, packets.Pubrel, 1, pk.Properties, packets.ErrPacketIdentifierNotFound)) + } + + if pk.ReasonCode >= packets.ErrUnspecifiedError.Code || !pk.ReasonCodeValid() { // [MQTT-4.3.3-4] + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.ops.hooks.OnQosDropped(cl, pk) + return nil // as per MQTT5 Section 4.13.2 paragraph 2 + } + + ack := s.buildAck(pk.PacketID, packets.Pubrel, 1, pk.Properties, packets.CodeSuccess) // [MQTT-4.3.3-4] ![MQTT-4.3.3-6] + cl.State.Inflight.DecreaseReceiveQuota() // -1 RECV QUOTA + cl.State.Inflight.Set(ack) // [MQTT-4.3.3-5] + return cl.WritePacket(ack) +} + +// processPubrel processes a Pubrel packet, denoting completion of a QOS 2 packet sent from the client. +func (s *Server) processPubrel(cl *Client, pk packets.Packet) error { + if _, ok := cl.State.Inflight.Get(pk.PacketID); !ok { // [MQTT-4.3.3-7] [MQTT-4.3.3-13] + return cl.WritePacket(s.buildAck(pk.PacketID, packets.Pubcomp, 0, pk.Properties, packets.ErrPacketIdentifierNotFound)) + } + + if pk.ReasonCode >= packets.ErrUnspecifiedError.Code || !pk.ReasonCodeValid() { // [MQTT-4.3.3-9] + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + } + cl.ops.hooks.OnQosDropped(cl, pk) + return nil + } + + ack := s.buildAck(pk.PacketID, packets.Pubcomp, 0, pk.Properties, packets.CodeSuccess) // [MQTT-4.3.3-11] + cl.State.Inflight.Set(ack) + + err := cl.WritePacket(ack) + if err != nil { + return err + } + + cl.State.Inflight.IncreaseReceiveQuota() // +1 RECV QUOTA + cl.State.Inflight.IncreaseSendQuota() // +1 SENT QUOTA + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { // [MQTT-4.3.3-12] + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processPubcomp processes a Pubcomp packet, denoting completion of a QOS 2 packet sent from the server. +func (s *Server) processPubcomp(cl *Client, pk packets.Packet) error { + // regardless of whether the pubcomp is a success or failure, we end the qos flow, delete inflight, and restore the quotas. + cl.State.Inflight.IncreaseReceiveQuota() // +1 RECV QUOTA + cl.State.Inflight.IncreaseSendQuota() // +1 SENT QUOTA + if ok := cl.State.Inflight.Delete(pk.PacketID); ok { + atomic.AddInt64(&s.Info.Inflight, -1) + s.hooks.OnQosComplete(cl, pk) + } + + return nil +} + +// processSubscribe processes a Subscribe packet. +func (s *Server) processSubscribe(cl *Client, pk packets.Packet) error { + pk = s.hooks.OnSubscribe(cl, pk) + code := packets.CodeSuccess + if _, ok := cl.State.Inflight.Get(pk.PacketID); ok { + code = packets.ErrPacketIdentifierInUse + } + + filterExisted := make([]bool, len(pk.Filters)) + reasonCodes := make([]byte, len(pk.Filters)) + for i, sub := range pk.Filters { + if code != packets.CodeSuccess { + reasonCodes[i] = code.Code // NB 3.9.3 Non-normative 0x91 + continue + } else if !IsValidFilter(sub.Filter, false) { + reasonCodes[i] = packets.ErrTopicFilterInvalid.Code + } else if sub.NoLocal && IsSharedFilter(sub.Filter) { + reasonCodes[i] = packets.ErrProtocolViolationInvalidSharedNoLocal.Code // [MQTT-3.8.3-4] + } else if !s.hooks.OnACLCheck(cl, sub.Filter, false) { + reasonCodes[i] = packets.ErrNotAuthorized.Code + if s.Options.Capabilities.Compatibilities.ObscureNotAuthorized { + reasonCodes[i] = packets.ErrUnspecifiedError.Code + } + } else { + isNew := s.Topics.Subscribe(cl.ID, sub) // [MQTT-3.8.4-3] + if isNew { + atomic.AddInt64(&s.Info.Subscriptions, 1) + } + cl.State.Subscriptions.Add(sub.Filter, sub) // [MQTT-3.2.2-10] + + if sub.Qos > s.Options.Capabilities.MaximumQos { + sub.Qos = s.Options.Capabilities.MaximumQos // [MQTT-3.2.2-9] + } + + filterExisted[i] = !isNew + reasonCodes[i] = sub.Qos // [MQTT-3.9.3-1] [MQTT-3.8.4-7] + } + + if reasonCodes[i] > packets.CodeGrantedQos2.Code && cl.Properties.ProtocolVersion < 5 { // MQTT3 + reasonCodes[i] = packets.ErrUnspecifiedError.Code + } + } + + ack := packets.Packet{ // [MQTT-3.8.4-1] [MQTT-3.8.4-5] + FixedHeader: packets.FixedHeader{ + Type: packets.Suback, + }, + PacketID: pk.PacketID, // [MQTT-2.2.1-6] [MQTT-3.8.4-2] + ReasonCodes: reasonCodes, // [MQTT-3.8.4-6] + Properties: packets.Properties{ + User: pk.Properties.User, + }, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + ack.Properties.ReasonString = code.Reason + } + + s.hooks.OnSubscribed(cl, pk, reasonCodes) + err := cl.WritePacket(ack) + if err != nil { + return err + } + + for i, sub := range pk.Filters { // [MQTT-3.3.1-9] + if reasonCodes[i] >= packets.ErrUnspecifiedError.Code { + continue + } + + s.publishRetainedToClient(cl, sub, filterExisted[i]) + } + + return nil +} + +// processUnsubscribe processes an unsubscribe packet. +func (s *Server) processUnsubscribe(cl *Client, pk packets.Packet) error { + code := packets.CodeSuccess + if _, ok := cl.State.Inflight.Get(pk.PacketID); ok { + code = packets.ErrPacketIdentifierInUse + } + + pk = s.hooks.OnUnsubscribe(cl, pk) + reasonCodes := make([]byte, len(pk.Filters)) + for i, sub := range pk.Filters { // [MQTT-3.10.4-6] [MQTT-3.11.3-1] + if code != packets.CodeSuccess { + reasonCodes[i] = code.Code // NB 3.11.3 Non-normative 0x91 + continue + } + + if q := s.Topics.Unsubscribe(sub.Filter, cl.ID); q { + atomic.AddInt64(&s.Info.Subscriptions, -1) + reasonCodes[i] = packets.CodeSuccess.Code + } else { + reasonCodes[i] = packets.CodeNoSubscriptionExisted.Code + } + + cl.State.Subscriptions.Delete(sub.Filter) // [MQTT-3.10.4-2] [MQTT-3.10.4-2] ~[MQTT-3.10.4-3] + } + + ack := packets.Packet{ // [MQTT-3.10.4-4] + FixedHeader: packets.FixedHeader{ + Type: packets.Unsuback, + }, + PacketID: pk.PacketID, // [MQTT-2.2.1-6] [MQTT-3.10.4-5] + ReasonCodes: reasonCodes, // [MQTT-3.11.3-2] + Properties: packets.Properties{ + User: pk.Properties.User, + }, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + ack.Properties.ReasonString = code.Reason + } + + s.hooks.OnUnsubscribed(cl, pk) + return cl.WritePacket(ack) +} + +// UnsubscribeClient unsubscribes a client from all of their subscriptions. +func (s *Server) UnsubscribeClient(cl *Client) { + i := 0 + filterMap := cl.State.Subscriptions.GetAll() + filters := make([]packets.Subscription, len(filterMap)) + for k := range filterMap { + cl.State.Subscriptions.Delete(k) + } + + if atomic.LoadUint32(&cl.State.isTakenOver) == 1 { + return + } + + for k, v := range filterMap { + if s.Topics.Unsubscribe(k, cl.ID) { + atomic.AddInt64(&s.Info.Subscriptions, -1) + } + filters[i] = v + i++ + } + s.hooks.OnUnsubscribed(cl, packets.Packet{FixedHeader: packets.FixedHeader{Type: packets.Unsubscribe}, Filters: filters}) +} + +// processAuth processes an Auth packet. +func (s *Server) processAuth(cl *Client, pk packets.Packet) error { + _, err := s.hooks.OnAuthPacket(cl, pk) + if err != nil { + return err + } + + return nil +} + +// processDisconnect processes a Disconnect packet. +func (s *Server) processDisconnect(cl *Client, pk packets.Packet) error { + if pk.Properties.SessionExpiryIntervalFlag { + if pk.Properties.SessionExpiryInterval > 0 && cl.Properties.Props.SessionExpiryInterval == 0 { + return packets.ErrProtocolViolationZeroNonZeroExpiry + } + + cl.Properties.Props.SessionExpiryInterval = pk.Properties.SessionExpiryInterval + cl.Properties.Props.SessionExpiryIntervalFlag = true + } + + s.loop.willDelayed.Delete(cl.ID) // [MQTT-3.1.3-9] [MQTT-3.1.2-8] + cl.Stop(packets.CodeDisconnect) // [MQTT-3.14.4-2] + + return nil +} + +// DisconnectClient sends a Disconnect packet to a client and then closes the client connection. +func (s *Server) DisconnectClient(cl *Client, code packets.Code) error { + out := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Disconnect, + }, + ReasonCode: code.Code, + Properties: packets.Properties{}, + } + + if code.Code >= packets.ErrUnspecifiedError.Code { + out.Properties.ReasonString = code.Reason // // [MQTT-3.14.2-1] + } + + // We already have a code we are using to disconnect the client, so we are not + // interested if the write packet fails due to a closed connection (as we are closing it). + err := cl.WritePacket(out) + if !s.Options.Capabilities.Compatibilities.PassiveClientDisconnect { + cl.Stop(code) + if code.Code >= packets.ErrUnspecifiedError.Code { + return code + } + } + + return err +} + +// publishSysTopics publishes the current values to the server $SYS topics. +// Due to the int to string conversions this method is not as cheap as +// some of the others so the publishing interval should be set appropriately. +func (s *Server) publishSysTopics() { + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Retain: true, + }, + Created: time.Now().Unix(), + } + + var m runtime.MemStats + runtime.ReadMemStats(&m) + atomic.StoreInt64(&s.Info.MemoryAlloc, int64(m.HeapInuse)) + atomic.StoreInt64(&s.Info.Threads, int64(runtime.NumGoroutine())) + atomic.StoreInt64(&s.Info.Time, time.Now().Unix()) + atomic.StoreInt64(&s.Info.Uptime, time.Now().Unix()-atomic.LoadInt64(&s.Info.Started)) + atomic.StoreInt64(&s.Info.ClientsTotal, int64(s.Clients.Len())) + atomic.StoreInt64(&s.Info.ClientsDisconnected, atomic.LoadInt64(&s.Info.ClientsTotal)-atomic.LoadInt64(&s.Info.ClientsConnected)) + + topics := map[string]string{ + SysPrefix + "/broker/version": s.Info.Version, + SysPrefix + "/broker/time": AtomicItoa(&s.Info.Time), + SysPrefix + "/broker/uptime": AtomicItoa(&s.Info.Uptime), + SysPrefix + "/broker/started": AtomicItoa(&s.Info.Started), + SysPrefix + "/broker/load/bytes/received": AtomicItoa(&s.Info.BytesReceived), + SysPrefix + "/broker/load/bytes/sent": AtomicItoa(&s.Info.BytesSent), + SysPrefix + "/broker/clients/connected": AtomicItoa(&s.Info.ClientsConnected), + SysPrefix + "/broker/clients/disconnected": AtomicItoa(&s.Info.ClientsDisconnected), + SysPrefix + "/broker/clients/maximum": AtomicItoa(&s.Info.ClientsMaximum), + SysPrefix + "/broker/clients/total": AtomicItoa(&s.Info.ClientsTotal), + SysPrefix + "/broker/packets/received": AtomicItoa(&s.Info.PacketsReceived), + SysPrefix + "/broker/packets/sent": AtomicItoa(&s.Info.PacketsSent), + SysPrefix + "/broker/messages/received": AtomicItoa(&s.Info.MessagesReceived), + SysPrefix + "/broker/messages/sent": AtomicItoa(&s.Info.MessagesSent), + SysPrefix + "/broker/messages/dropped": AtomicItoa(&s.Info.MessagesDropped), + SysPrefix + "/broker/messages/inflight": AtomicItoa(&s.Info.Inflight), + SysPrefix + "/broker/retained": AtomicItoa(&s.Info.Retained), + SysPrefix + "/broker/subscriptions": AtomicItoa(&s.Info.Subscriptions), + SysPrefix + "/broker/system/memory": AtomicItoa(&s.Info.MemoryAlloc), + SysPrefix + "/broker/system/threads": AtomicItoa(&s.Info.Threads), + } + + for topic, payload := range topics { + pk.TopicName = topic + pk.Payload = []byte(payload) + s.Topics.RetainMessage(pk.Copy(false)) + s.publishToSubscribers(pk) + } + + s.hooks.OnSysInfoTick(s.Info) +} + +// Close attempts to gracefully shut down the server, all listeners, clients, and stores. +func (s *Server) Close() error { + close(s.done) + s.Listeners.CloseAll(s.closeListenerClients) + s.hooks.OnStopped() + s.hooks.Stop() + + s.Log.Info().Msg("mochi mqtt server stopped") + return nil +} + +// closeListenerClients closes all clients on the specified listener. +func (s *Server) closeListenerClients(listener string) { + clients := s.Clients.GetByListener(listener) + for _, cl := range clients { + s.DisconnectClient(cl, packets.ErrServerShuttingDown) + } +} + +// sendLWT issues an LWT message to a topic when a client disconnects. +func (s *Server) sendLWT(cl *Client) { + if atomic.LoadUint32(&cl.Properties.Will.Flag) == 0 { + return + } + + modifiedLWT := s.hooks.OnWill(cl, cl.Properties.Will) + + pk := packets.Packet{ + FixedHeader: packets.FixedHeader{ + Type: packets.Publish, + Retain: modifiedLWT.Retain, // [MQTT-3.1.2-14] [MQTT-3.1.2-15] + Qos: modifiedLWT.Qos, + }, + TopicName: modifiedLWT.TopicName, + Payload: modifiedLWT.Payload, + Properties: packets.Properties{ + User: modifiedLWT.User, + }, + Origin: cl.ID, + Created: time.Now().Unix(), + } + + if cl.Properties.Will.WillDelayInterval > 0 { + pk.Connect.WillProperties.WillDelayInterval = cl.Properties.Will.WillDelayInterval + pk.Expiry = time.Now().Unix() + int64(pk.Connect.WillProperties.WillDelayInterval) + s.loop.willDelayed.Add(cl.ID, pk) + return + } + + if pk.FixedHeader.Retain { + s.retainMessage(cl, pk) + } + + s.publishToSubscribers(pk) // [MQTT-3.1.2-8] + atomic.StoreUint32(&cl.Properties.Will.Flag, 0) // [MQTT-3.1.2-10] + s.hooks.OnWillSent(cl, pk) +} + +// readStore reads in any data from the persistent datastore (if applicable). +func (s *Server) readStore() error { + if s.hooks.Provides(StoredClients) { + clients, err := s.hooks.StoredClients() + if err != nil { + return fmt.Errorf("failed to load clients; %w", err) + } + s.loadClients(clients) + s.Log.Debug(). + Int("len", len(clients)). + Msg("loaded clients from store") + } + + if s.hooks.Provides(StoredSubscriptions) { + subs, err := s.hooks.StoredSubscriptions() + if err != nil { + return fmt.Errorf("load subscriptions; %w", err) + } + s.loadSubscriptions(subs) + s.Log.Debug(). + Int("len", len(subs)). + Msg("loaded subscriptions from store") + } + + if s.hooks.Provides(StoredInflightMessages) { + inflight, err := s.hooks.StoredInflightMessages() + if err != nil { + return fmt.Errorf("load inflight; %w", err) + } + s.loadInflight(inflight) + s.Log.Debug(). + Int("len", len(inflight)). + Msg("loaded inflights from store") + } + + if s.hooks.Provides(StoredRetainedMessages) { + retained, err := s.hooks.StoredRetainedMessages() + if err != nil { + return fmt.Errorf("load retained; %w", err) + } + s.loadRetained(retained) + s.Log.Debug(). + Int("len", len(retained)). + Msg("loaded retained messages from store") + } + + if s.hooks.Provides(StoredSysInfo) { + sysInfo, err := s.hooks.StoredSysInfo() + if err != nil { + return fmt.Errorf("load server info; %w", err) + } + s.loadServerInfo(sysInfo.Info) + s.Log.Debug(). + Msg("loaded $SYS info from store") + } + + return nil +} + +// loadServerInfo restores server info from the datastore. +func (s *Server) loadServerInfo(v system.Info) { + if s.Options.Capabilities.Compatibilities.RestoreSysInfoOnRestart { + atomic.StoreInt64(&s.Info.BytesReceived, v.BytesReceived) + atomic.StoreInt64(&s.Info.BytesSent, v.BytesSent) + atomic.StoreInt64(&s.Info.ClientsMaximum, v.ClientsMaximum) + atomic.StoreInt64(&s.Info.ClientsTotal, v.ClientsTotal) + atomic.StoreInt64(&s.Info.ClientsDisconnected, v.ClientsDisconnected) + atomic.StoreInt64(&s.Info.MessagesReceived, v.MessagesReceived) + atomic.StoreInt64(&s.Info.MessagesSent, v.MessagesSent) + atomic.StoreInt64(&s.Info.MessagesDropped, v.MessagesDropped) + atomic.StoreInt64(&s.Info.PacketsReceived, v.PacketsReceived) + atomic.StoreInt64(&s.Info.PacketsSent, v.PacketsSent) + atomic.StoreInt64(&s.Info.InflightDropped, v.InflightDropped) + } + atomic.StoreInt64(&s.Info.Retained, v.Retained) + atomic.StoreInt64(&s.Info.Inflight, v.Inflight) + atomic.StoreInt64(&s.Info.Subscriptions, v.Subscriptions) +} + +// loadSubscriptions restores subscriptions from the datastore. +func (s *Server) loadSubscriptions(v []storage.Subscription) { + for _, sub := range v { + sb := packets.Subscription{ + Filter: sub.Filter, + RetainHandling: sub.RetainHandling, + Qos: sub.Qos, + RetainAsPublished: sub.RetainAsPublished, + NoLocal: sub.NoLocal, + Identifier: sub.Identifier, + } + if s.Topics.Subscribe(sub.Client, sb) { + if cl, ok := s.Clients.Get(sub.Client); ok { + cl.State.Subscriptions.Add(sub.Filter, sb) + } + } + } +} + +// loadClients restores clients from the datastore. +func (s *Server) loadClients(v []storage.Client) { + for _, c := range v { + cl := s.NewClient(nil, c.Listener, c.ID, false) + cl.Properties.Username = c.Username + cl.Properties.Clean = c.Clean + cl.Properties.ProtocolVersion = c.ProtocolVersion + cl.Properties.Props = packets.Properties{ + SessionExpiryInterval: c.Properties.SessionExpiryInterval, + SessionExpiryIntervalFlag: c.Properties.SessionExpiryIntervalFlag, + AuthenticationMethod: c.Properties.AuthenticationMethod, + AuthenticationData: c.Properties.AuthenticationData, + RequestProblemInfoFlag: c.Properties.RequestProblemInfoFlag, + RequestProblemInfo: c.Properties.RequestProblemInfo, + RequestResponseInfo: c.Properties.RequestResponseInfo, + ReceiveMaximum: c.Properties.ReceiveMaximum, + TopicAliasMaximum: c.Properties.TopicAliasMaximum, + User: c.Properties.User, + MaximumPacketSize: c.Properties.MaximumPacketSize, + } + cl.Properties.Will = Will(c.Will) + s.Clients.Add(cl) + } +} + +// loadInflight restores inflight messages from the datastore. +func (s *Server) loadInflight(v []storage.Message) { + for _, msg := range v { + if client, ok := s.Clients.Get(msg.Origin); ok { + client.State.Inflight.Set(msg.ToPacket()) + } + } +} + +// loadRetained restores retained messages from the datastore. +func (s *Server) loadRetained(v []storage.Message) { + for _, msg := range v { + s.Topics.RetainMessage(msg.ToPacket()) + } +} + +// clearExpiredClients deletes all clients which have been disconnected for longer +// than their given expiry intervals. +func (s *Server) clearExpiredClients(dt int64) { + for id, client := range s.Clients.GetAll() { + disconnected := atomic.LoadInt64(&client.State.disconnected) + if disconnected == 0 { + continue + } + + expire := s.Options.Capabilities.MaximumSessionExpiryInterval + if client.Properties.ProtocolVersion == 5 && client.Properties.Props.SessionExpiryIntervalFlag { + expire = client.Properties.Props.SessionExpiryInterval + } + + if disconnected+int64(expire) < dt { + s.hooks.OnClientExpired(client) + s.Clients.Delete(id) // [MQTT-4.1.0-2] + } + } +} + +// clearExpiredRetainedMessage deletes retained messages from topics if they have expired. +func (s *Server) clearExpiredRetainedMessages(now int64) { + for filter, pk := range s.Topics.Retained.GetAll() { + if (pk.Expiry > 0 && pk.Expiry < now) || pk.Created+s.Options.Capabilities.MaximumMessageExpiryInterval < now { + s.Topics.Retained.Delete(filter) + s.hooks.OnRetainedExpired(filter) + } + } +} + +// clearExpiredInflights deletes any inflight messages which have expired. +func (s *Server) clearExpiredInflights(now int64) { + for _, client := range s.Clients.GetAll() { + if deleted := client.ClearInflights(now, s.Options.Capabilities.MaximumMessageExpiryInterval); len(deleted) > 0 { + for _, id := range deleted { + s.hooks.OnQosDropped(client, packets.Packet{PacketID: id}) + } + } + } +} + +// sendDelayedLWT sends any LWT messages which have reached their issue time. +func (s *Server) sendDelayedLWT(dt int64) { + for id, pk := range s.loop.willDelayed.GetAll() { + if dt > pk.Expiry { + s.publishToSubscribers(pk) // [MQTT-3.1.2-8] + if cl, ok := s.Clients.Get(id); ok { + if pk.FixedHeader.Retain { + s.retainMessage(cl, pk) + } + cl.Properties.Will = Will{} // [MQTT-3.1.2-10] + s.hooks.OnWillSent(cl, pk) + } + s.loop.willDelayed.Delete(id) + } + } +} + +// AtomicItoa converts an int64 point to a string. +func AtomicItoa(ptr *int64) string { + return strconv.FormatInt(atomic.LoadInt64(ptr), 10) +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/system/system.go b/vendor/github.com/mochi-mqtt/server/v2/system/system.go new file mode 100644 index 00000000..2ed47d0c --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/system/system.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2022 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package system + +import "sync/atomic" + +// Info contains atomic counters and values for various server statistics +// commonly found in $SYS topics (and others). +// based on https://github.com/mqtt/mqtt.org/wiki/SYS-Topics +type Info struct { + Version string `json:"version"` // the current version of the server + Started int64 `json:"started"` // the time the server started in unix seconds + Time int64 `json:"time"` // current time on the server + Uptime int64 `json:"uptime"` // the number of seconds the server has been online + BytesReceived int64 `json:"bytes_received"` // total number of bytes received since the broker started + BytesSent int64 `json:"bytes_sent"` // total number of bytes sent since the broker started + ClientsConnected int64 `json:"clients_connected"` // number of currently connected clients + ClientsDisconnected int64 `json:"clients_disconnected"` // total number of persistent clients (with clean session disabled) that are registered at the broker but are currently disconnected + ClientsMaximum int64 `json:"clients_maximum"` // maximum number of active clients that have been connected + ClientsTotal int64 `json:"clients_total"` // total number of connected and disconnected clients with a persistent session currently connected and registered + MessagesReceived int64 `json:"messages_received"` // total number of publish messages received + MessagesSent int64 `json:"messages_sent"` // total number of publish messages sent + MessagesDropped int64 `json:"messages_dropped"` // total number of publish messages dropped to slow subscriber + Retained int64 `json:"retained"` // total number of retained messages active on the broker + Inflight int64 `json:"inflight"` // the number of messages currently in-flight + InflightDropped int64 `json:"inflight_dropped"` // the number of inflight messages which were dropped + Subscriptions int64 `json:"subscriptions"` // total number of subscriptions active on the broker + PacketsReceived int64 `json:"packets_received"` // the total number of publish messages received + PacketsSent int64 `json:"packets_sent"` // total number of messages of any type sent since the broker started + MemoryAlloc int64 `json:"memory_alloc"` // memory currently allocated + Threads int64 `json:"threads"` // number of active goroutines, named as threads for platform ambiguity +} + +// Clone makes a copy of Info using atomic operation +func (i *Info) Clone() *Info { + return &Info{ + Version: i.Version, + Started: atomic.LoadInt64(&i.Started), + Time: atomic.LoadInt64(&i.Time), + Uptime: atomic.LoadInt64(&i.Uptime), + BytesReceived: atomic.LoadInt64(&i.BytesReceived), + BytesSent: atomic.LoadInt64(&i.BytesSent), + ClientsConnected: atomic.LoadInt64(&i.ClientsConnected), + ClientsMaximum: atomic.LoadInt64(&i.ClientsMaximum), + ClientsTotal: atomic.LoadInt64(&i.ClientsTotal), + ClientsDisconnected: atomic.LoadInt64(&i.ClientsDisconnected), + MessagesReceived: atomic.LoadInt64(&i.MessagesReceived), + MessagesSent: atomic.LoadInt64(&i.MessagesSent), + MessagesDropped: atomic.LoadInt64(&i.MessagesDropped), + Retained: atomic.LoadInt64(&i.Retained), + Inflight: atomic.LoadInt64(&i.Inflight), + InflightDropped: atomic.LoadInt64(&i.InflightDropped), + Subscriptions: atomic.LoadInt64(&i.Subscriptions), + PacketsReceived: atomic.LoadInt64(&i.PacketsReceived), + PacketsSent: atomic.LoadInt64(&i.PacketsSent), + MemoryAlloc: atomic.LoadInt64(&i.MemoryAlloc), + Threads: atomic.LoadInt64(&i.Threads), + } +} diff --git a/vendor/github.com/mochi-mqtt/server/v2/topics.go b/vendor/github.com/mochi-mqtt/server/v2/topics.go new file mode 100644 index 00000000..be7c9a39 --- /dev/null +++ b/vendor/github.com/mochi-mqtt/server/v2/topics.go @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: MIT +// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co +// SPDX-FileContributor: mochi-co + +package mqtt + +import ( + "strings" + "sync" + "sync/atomic" + + "github.com/mochi-mqtt/server/v2/packets" +) + +var ( + SharePrefix = "$SHARE" // the prefix indicating a share topic + SysPrefix = "$SYS" // the prefix indicating a system info topic +) + +// TopicAliases contains inbound and outbound topic alias registrations. +type TopicAliases struct { + Inbound *InboundTopicAliases + Outbound *OutboundTopicAliases +} + +// NewTopicAliases returns an instance of TopicAliases. +func NewTopicAliases(topicAliasMaximum uint16) TopicAliases { + return TopicAliases{ + Inbound: NewInboundTopicAliases(topicAliasMaximum), + Outbound: NewOutboundTopicAliases(topicAliasMaximum), + } +} + +// NewInboundTopicAliases returns a pointer to InboundTopicAliases. +func NewInboundTopicAliases(topicAliasMaximum uint16) *InboundTopicAliases { + return &InboundTopicAliases{ + maximum: topicAliasMaximum, + internal: map[uint16]string{}, + } +} + +// InboundTopicAliases contains a map of topic aliases received from the client. +type InboundTopicAliases struct { + internal map[uint16]string + sync.RWMutex + maximum uint16 +} + +// Set sets a new alias for a specific topic. +func (a *InboundTopicAliases) Set(id uint16, topic string) string { + a.Lock() + defer a.Unlock() + + if a.maximum == 0 { + return topic // ? + } + + if existing, ok := a.internal[id]; ok && topic == "" { + return existing + } + + a.internal[id] = topic + return topic +} + +// OutboundTopicAliases contains a map of topic aliases sent from the broker to the client. +type OutboundTopicAliases struct { + internal map[string]uint16 + sync.RWMutex + cursor uint32 + maximum uint16 +} + +// NewOutboundTopicAliases returns a pointer to OutboundTopicAliases. +func NewOutboundTopicAliases(topicAliasMaximum uint16) *OutboundTopicAliases { + return &OutboundTopicAliases{ + maximum: topicAliasMaximum, + internal: map[string]uint16{}, + } +} + +// Set sets a new topic alias for a topic and returns the alias value, and a boolean +// indicating if the alias already existed. +func (a *OutboundTopicAliases) Set(topic string) (uint16, bool) { + a.Lock() + defer a.Unlock() + + if a.maximum == 0 { + return 0, false + } + + if i, ok := a.internal[topic]; ok { + return i, true + } + + i := atomic.LoadUint32(&a.cursor) + if i+1 > uint32(a.maximum) { + // if i+1 > math.MaxUint16 { + return 0, false + } + + a.internal[topic] = uint16(i) + 1 + atomic.StoreUint32(&a.cursor, i+1) + return uint16(i) + 1, false +} + +// SharedSubscriptions contains a map of subscriptions to a shared filter, +// keyed on share group then client id. +type SharedSubscriptions struct { + internal map[string]map[string]packets.Subscription + sync.RWMutex +} + +// NewSharedSubscriptions returns a new instance of Subscriptions. +func NewSharedSubscriptions() *SharedSubscriptions { + return &SharedSubscriptions{ + internal: map[string]map[string]packets.Subscription{}, + } +} + +// Add creates a new shared subscription for a group and client id pair. +func (s *SharedSubscriptions) Add(group, id string, val packets.Subscription) { + s.Lock() + defer s.Unlock() + if _, ok := s.internal[group]; !ok { + s.internal[group] = map[string]packets.Subscription{} + } + s.internal[group][id] = val +} + +// Delete deletes a client id from a shared subscription group. +func (s *SharedSubscriptions) Delete(group, id string) { + s.Lock() + defer s.Unlock() + delete(s.internal[group], id) + if len(s.internal[group]) == 0 { + delete(s.internal, group) + } +} + +// Get returns the subscription properties for a client id in a share group, if one exists. +func (s *SharedSubscriptions) Get(group, id string) (val packets.Subscription, ok bool) { + s.RLock() + defer s.RUnlock() + if _, ok := s.internal[group]; !ok { + return val, ok + } + + val, ok = s.internal[group][id] + return val, ok +} + +// GroupLen returns the number of groups subscribed to the filter. +func (s *SharedSubscriptions) GroupLen() int { + s.RLock() + defer s.RUnlock() + val := len(s.internal) + return val +} + +// Len returns the total number of shared subscriptions to a filter across all groups. +func (s *SharedSubscriptions) Len() int { + s.RLock() + defer s.RUnlock() + n := 0 + for _, group := range s.internal { + n += len(group) + } + return n +} + +// GetAll returns all shared subscription groups and their subscriptions. +func (s *SharedSubscriptions) GetAll() map[string]map[string]packets.Subscription { + s.RLock() + defer s.RUnlock() + m := map[string]map[string]packets.Subscription{} + for group, subs := range s.internal { + if _, ok := m[group]; !ok { + m[group] = map[string]packets.Subscription{} + } + + for id, sub := range subs { + m[group][id] = sub + } + } + return m +} + +// Subscriptions is a map of subscriptions keyed on client. +type Subscriptions struct { + internal map[string]packets.Subscription + sync.RWMutex +} + +// NewSubscriptions returns a new instance of Subscriptions. +func NewSubscriptions() *Subscriptions { + return &Subscriptions{ + internal: map[string]packets.Subscription{}, + } +} + +// Add adds a new subscription for a client. ID can be a filter in the +// case this map is client state, or a client id if particle state. +func (s *Subscriptions) Add(id string, val packets.Subscription) { + s.Lock() + defer s.Unlock() + s.internal[id] = val +} + +// GetAll returns all subscriptions. +func (s *Subscriptions) GetAll() map[string]packets.Subscription { + s.RLock() + defer s.RUnlock() + m := map[string]packets.Subscription{} + for k, v := range s.internal { + m[k] = v + } + return m +} + +// Get returns a subscriptions for a specific client or filter id. +func (s *Subscriptions) Get(id string) (val packets.Subscription, ok bool) { + s.RLock() + defer s.RUnlock() + val, ok = s.internal[id] + return val, ok +} + +// Len returns the number of subscriptions. +func (s *Subscriptions) Len() int { + s.RLock() + defer s.RUnlock() + val := len(s.internal) + return val +} + +// Delete removes a subscription by client or filter id. +func (s *Subscriptions) Delete(id string) { + s.Lock() + defer s.Unlock() + delete(s.internal, id) +} + +// ClientSubscriptions is a map of aggregated subscriptions for a client. +type ClientSubscriptions map[string]packets.Subscription + +// Subscribers contains the shared and non-shared subscribers matching a topic. +type Subscribers struct { + Shared map[string]map[string]packets.Subscription + SharedSelected map[string]packets.Subscription + Subscriptions map[string]packets.Subscription +} + +// SelectShared returns one subscriber for each shared subscription group. +func (s *Subscribers) SelectShared() { + s.SharedSelected = map[string]packets.Subscription{} + for _, subs := range s.Shared { + for client, sub := range subs { + cls, ok := s.SharedSelected[client] + if !ok { + cls = sub + } + + s.SharedSelected[client] = cls.Merge(sub) + break + } + } +} + +// MergeSharedSelected merges the selected subscribers for a shared subscription group +// and the non-shared subscribers, to ensure that no subscriber gets multiple messages +// due to have both types of subscription matching the same filter. +func (s *Subscribers) MergeSharedSelected() { + for client, sub := range s.SharedSelected { + cls, ok := s.Subscriptions[client] + if !ok { + cls = sub + } + + s.Subscriptions[client] = cls.Merge(sub) + } +} + +// TopicsIndex is a prefix/trie tree containing topic subscribers and retained messages. +type TopicsIndex struct { + Retained *packets.Packets + root *particle // a leaf containing a message and more leaves. +} + +// NewTopicsIndex returns a pointer to a new instance of Index. +func NewTopicsIndex() *TopicsIndex { + return &TopicsIndex{ + Retained: packets.NewPackets(), + root: &particle{ + particles: newParticles(), + subscriptions: NewSubscriptions(), + }, + } +} + +// Subscribe adds a new subscription for a client to a topic filter, returning +// true if the subscription was new. +func (x *TopicsIndex) Subscribe(client string, subscription packets.Subscription) bool { + x.root.Lock() + defer x.root.Unlock() + + var existed bool + prefix, _ := isolateParticle(subscription.Filter, 0) + if strings.EqualFold(prefix, SharePrefix) { + group, _ := isolateParticle(subscription.Filter, 1) + n := x.set(subscription.Filter, 2) + _, existed = n.shared.Get(group, client) + n.shared.Add(group, client, subscription) + } else { + n := x.set(subscription.Filter, 0) + _, existed = n.subscriptions.Get(client) + n.subscriptions.Add(client, subscription) + } + + return !existed +} + +// Unsubscribe removes a subscription filter for a client, returning true if the +// subscription existed. +func (x *TopicsIndex) Unsubscribe(filter, client string) bool { + x.root.Lock() + defer x.root.Unlock() + + var d int + prefix, _ := isolateParticle(filter, 0) + shareSub := strings.EqualFold(prefix, SharePrefix) + if shareSub { + d = 2 + } + + particle := x.seek(filter, d) + if particle == nil { + return false + } + + if shareSub { + group, _ := isolateParticle(filter, 1) + particle.shared.Delete(group, client) + } else { + particle.subscriptions.Delete(client) + } + + x.trim(particle) + return true +} + +// RetainMessage saves a message payload to the end of a topic address. Returns +// 1 if a retained message was added, and -1 if the retained message was removed. +// 0 is returned if sequential empty payloads are received. +func (x *TopicsIndex) RetainMessage(pk packets.Packet) int64 { + x.root.Lock() + defer x.root.Unlock() + + n := x.set(pk.TopicName, 0) + n.Lock() + defer n.Unlock() + if len(pk.Payload) > 0 { + n.retainPath = pk.TopicName + x.Retained.Add(pk.TopicName, pk) + return 1 + } + + var out int64 + if pke, ok := x.Retained.Get(pk.TopicName); ok && len(pke.Payload) > 0 && pke.FixedHeader.Retain { + out = -1 // if a retained packet existed, return -1 + } + + n.retainPath = "" + x.Retained.Delete(pk.TopicName) // [MQTT-3.3.1-6] [MQTT-3.3.1-7] + x.trim(n) + + return out +} + +// set creates a topic address in the index and returns the final particle. +func (x *TopicsIndex) set(topic string, d int) *particle { + var key string + var hasNext = true + n := x.root + for hasNext { + key, hasNext = isolateParticle(topic, d) + d++ + + p := n.particles.get(key) + if p == nil { + p = newParticle(key, n) + n.particles.add(p) + } + n = p + } + + return n +} + +// seek finds the particle at a specific index in a topic filter. +func (x *TopicsIndex) seek(filter string, d int) *particle { + var key string + var hasNext = true + n := x.root + for hasNext { + key, hasNext = isolateParticle(filter, d) + n = n.particles.get(key) + d++ + if n == nil { + return nil + } + } + + return n +} + +// trim removes empty filter particles from the index. +func (x *TopicsIndex) trim(n *particle) { + for n.parent != nil && n.retainPath == "" && n.particles.len()+n.subscriptions.Len()+n.shared.Len() == 0 { + key := n.key + n = n.parent + n.particles.delete(key) + } +} + +// Messages returns a slice of any retained messages which match a filter. +func (x *TopicsIndex) Messages(filter string) []packets.Packet { + return x.scanMessages(filter, 0, nil, []packets.Packet{}) +} + +// scanMessages returns all retained messages on topics matching a given filter. +func (x *TopicsIndex) scanMessages(filter string, d int, n *particle, pks []packets.Packet) []packets.Packet { + if n == nil { + n = x.root + } + + if len(filter) == 0 || x.Retained.Len() == 0 { + return pks + } + + if !strings.ContainsRune(filter, '#') && !strings.ContainsRune(filter, '+') { + if pk, ok := x.Retained.Get(filter); ok { + pks = append(pks, pk) + } + return pks + } + + key, hasNext := isolateParticle(filter, d) + if key == "+" || key == "#" || d == -1 { + for _, adjacent := range n.particles.getAll() { + if d == 0 && adjacent.key == SysPrefix { + continue + } + + if !hasNext { + if adjacent.retainPath != "" { + if pk, ok := x.Retained.Get(adjacent.retainPath); ok { + pks = append(pks, pk) + } + } + } + + if hasNext || (d >= 0 && key == "#") { + pks = x.scanMessages(filter, d+1, adjacent, pks) + } + } + return pks + } + + if particle := n.particles.get(key); particle != nil { + if hasNext { + return x.scanMessages(filter, d+1, particle, pks) + } + + if pk, ok := x.Retained.Get(particle.retainPath); ok { + pks = append(pks, pk) + } + } + + return pks +} + +// Subscribers returns a map of clients who are subscribed to matching filters, +// their subscription ids and highest qos. +func (x *TopicsIndex) Subscribers(topic string) *Subscribers { + return x.scanSubscribers(topic, 0, nil, &Subscribers{ + Shared: map[string]map[string]packets.Subscription{}, + SharedSelected: map[string]packets.Subscription{}, + Subscriptions: map[string]packets.Subscription{}, + }) +} + +// scanSubscribers returns a list of client subscriptions matching an indexed topic address. +func (x *TopicsIndex) scanSubscribers(topic string, d int, n *particle, subs *Subscribers) *Subscribers { + if n == nil { + n = x.root + } + + if len(topic) == 0 { + return subs + } + + key, hasNext := isolateParticle(topic, d) + for _, partKey := range []string{key, "+"} { + if particle := n.particles.get(partKey); particle != nil { // [MQTT-3.3.2-3] + if hasNext { + x.scanSubscribers(topic, d+1, particle, subs) + } else { + x.gatherSubscriptions(topic, particle, subs) + x.gatherSharedSubscriptions(particle, subs) + + if wild := particle.particles.get("#"); wild != nil && partKey != "+" { + x.gatherSubscriptions(topic, wild, subs) // also match any subs where filter/# is filter as per 4.7.1.2 + x.gatherSharedSubscriptions(wild, subs) + } + } + } + } + + if particle := n.particles.get("#"); particle != nil { + x.gatherSubscriptions(topic, particle, subs) + x.gatherSharedSubscriptions(particle, subs) + } + + return subs +} + +// gatherSubscriptions collects any matching subscriptions, and gathers any identifiers or highest qos values. +func (x *TopicsIndex) gatherSubscriptions(topic string, particle *particle, subs *Subscribers) { + if subs.Subscriptions == nil { + subs.Subscriptions = map[string]packets.Subscription{} + } + + for client, sub := range particle.subscriptions.GetAll() { + if len(sub.Filter) > 0 && topic[0] == '$' && (sub.Filter[0] == '+' || sub.Filter[0] == '#') { // don't match $ topics with top level wildcards [MQTT-4.7.1-1] [MQTT-4.7.1-2] + continue + } + + cls, ok := subs.Subscriptions[client] + if !ok { + cls = sub + } + + subs.Subscriptions[client] = cls.Merge(sub) + } +} + +// gatherSharedSubscriptions gathers all shared subscriptions for a particle. +func (x *TopicsIndex) gatherSharedSubscriptions(particle *particle, subs *Subscribers) { + if subs.Shared == nil { + subs.Shared = map[string]map[string]packets.Subscription{} + } + + for _, shares := range particle.shared.GetAll() { + for client, sub := range shares { + if _, ok := subs.Shared[sub.Filter]; !ok { + subs.Shared[sub.Filter] = map[string]packets.Subscription{} + } + + subs.Shared[sub.Filter][client] = sub + } + } +} + +// isolateParticle extracts a particle between d / and d+1 / without allocations. +func isolateParticle(filter string, d int) (particle string, hasNext bool) { + var next, end int + for i := 0; end > -1 && i <= d; i++ { + end = strings.IndexRune(filter, '/') + + switch { + case d > -1 && i == d && end > -1: + hasNext = true + particle = filter[next:end] + case end > -1: + hasNext = false + filter = filter[end+1:] + default: + hasNext = false + particle = filter[next:] + } + } + + return +} + +// IsSharedFilter returns true if the filter uses the share prefix. +func IsSharedFilter(filter string) bool { + prefix, _ := isolateParticle(filter, 0) + return strings.EqualFold(prefix, SharePrefix) +} + +// IsValidFilter returns true if the filter is valid. +func IsValidFilter(filter string, forPublish bool) bool { + if !forPublish && len(filter) == 0 { // publishing can accept zero-length topic filter if topic alias exists, so we don't enforce for publihs. + return false // [MQTT-4.7.3-1] + } + + if forPublish { + if len(filter) >= len(SysPrefix) && strings.EqualFold(filter[0:len(SysPrefix)], SysPrefix) { + // 4.7.2 Non-normative - The Server SHOULD prevent Clients from using such Topic Names [$SYS] to exchange messages with other Clients. + return false + } + + if strings.ContainsRune(filter, '+') || strings.ContainsRune(filter, '#') { + return false //[MQTT-3.3.2-2] + } + } + + wildhash := strings.IndexRune(filter, '#') + if wildhash >= 0 && wildhash != len(filter)-1 { // [MQTT-4.7.1-2] + return false + } + + prefix, hasNext := isolateParticle(filter, 0) + if !hasNext && strings.EqualFold(prefix, SharePrefix) { + return false // [MQTT-4.8.2-1] + } + + if hasNext && strings.EqualFold(prefix, SharePrefix) { + group, hasNext := isolateParticle(filter, 1) + if !hasNext { + return false // [MQTT-4.8.2-1] + } + + if strings.ContainsRune(group, '+') || strings.ContainsRune(group, '#') { + return false // [MQTT-4.8.2-2] + } + } + + return true +} + +// particle is a child node on the tree. +type particle struct { + key string // the key of the particle + parent *particle // a pointer to the parent of the particle + particles particles // a map of child particles + subscriptions *Subscriptions // a map of subscriptions made by clients to this ending address + shared *SharedSubscriptions // a map of shared subscriptions keyed on group name + retainPath string // path of a retained message + sync.Mutex // mutex for when making changes to the particle +} + +// newParticle returns a pointer to a new instance of particle. +func newParticle(key string, parent *particle) *particle { + return &particle{ + key: key, + parent: parent, + particles: newParticles(), + subscriptions: NewSubscriptions(), + shared: NewSharedSubscriptions(), + } +} + +// particles is a concurrency safe map of particles. +type particles struct { + internal map[string]*particle + sync.RWMutex +} + +// newParticles returns a map of particles. +func newParticles() particles { + return particles{ + internal: map[string]*particle{}, + } +} + +// add adds a new particle. +func (p *particles) add(val *particle) { + p.Lock() + p.internal[val.key] = val + p.Unlock() +} + +// getAll returns all particles. +func (p *particles) getAll() map[string]*particle { + p.RLock() + defer p.RUnlock() + m := map[string]*particle{} + for k, v := range p.internal { + m[k] = v + } + return m +} + +// get returns a particle by id (key). +func (p *particles) get(id string) *particle { + p.RLock() + defer p.RUnlock() + return p.internal[id] +} + +// len returns the number of particles. +func (p *particles) len() int { + p.RLock() + defer p.RUnlock() + val := len(p.internal) + return val +} + +// delete removes a particle. +func (p *particles) delete(id string) { + p.Lock() + defer p.Unlock() + delete(p.internal, id) +} diff --git a/vendor/github.com/rs/xid/.appveyor.yml b/vendor/github.com/rs/xid/.appveyor.yml new file mode 100644 index 00000000..c73bb33b --- /dev/null +++ b/vendor/github.com/rs/xid/.appveyor.yml @@ -0,0 +1,27 @@ +version: 1.0.0.{build} + +platform: x64 + +branches: + only: + - master + +clone_folder: c:\gopath\src\github.com\rs\xid + +environment: + GOPATH: c:\gopath + +install: + - echo %PATH% + - echo %GOPATH% + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -t . + +build_script: + - go build + +test_script: + - go test + diff --git a/vendor/github.com/rs/xid/.travis.yml b/vendor/github.com/rs/xid/.travis.yml new file mode 100644 index 00000000..b37da159 --- /dev/null +++ b/vendor/github.com/rs/xid/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: +- "1.9" +- "1.10" +- "master" +matrix: + allow_failures: + - go: "master" diff --git a/vendor/github.com/rs/xid/LICENSE b/vendor/github.com/rs/xid/LICENSE new file mode 100644 index 00000000..47c5e9d2 --- /dev/null +++ b/vendor/github.com/rs/xid/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md new file mode 100644 index 00000000..5bf462e8 --- /dev/null +++ b/vendor/github.com/rs/xid/README.md @@ -0,0 +1,116 @@ +# Globally Unique ID Generator + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/xid) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/xid/master/LICENSE) [![Build Status](https://travis-ci.org/rs/xid.svg?branch=master)](https://travis-ci.org/rs/xid) [![Coverage](http://gocover.io/_badge/github.com/rs/xid)](http://gocover.io/github.com/rs/xid) + +Package xid is a globally unique id generator library, ready to safely be used directly in your server code. + +Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: +https://docs.mongodb.org/manual/reference/object-id/ + +- 4-byte value representing the seconds since the Unix epoch, +- 3-byte machine identifier, +- 2-byte process id, and +- 3-byte counter, starting with a random value. + +The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +The string representation is using base32 hex (w/o padding) for better space efficiency +when stored in that form (20 bytes). The hex variant of base32 is used to retain the +sortable property of the id. + +Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +issue when transported as a string between various systems. Base36 wasn't retained either +because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). + +UUIDs are 16 bytes (128 bits) and 36 chars as string representation. Twitter Snowflake +ids are 8 bytes (64 bits) but require machine/data-center configuration and/or central +generator servers. xid stands in between with 12 bytes (96 bits) and a more compact +URL-safe string representation (20 chars). No configuration or central generator server +is required so it can be used directly in server's code. + +| Name | Binary Size | String Size | Features +|-------------|-------------|----------------|---------------- +| [UUID] | 16 bytes | 36 chars | configuration free, not sortable +| [shortuuid] | 16 bytes | 22 chars | configuration free, not sortable +| [Snowflake] | 8 bytes | up to 20 chars | needs machine/DC configuration, needs central server, sortable +| [MongoID] | 12 bytes | 24 chars | configuration free, sortable +| xid | 12 bytes | 20 chars | configuration free, sortable + +[UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier +[shortuuid]: https://github.com/stochastic-technologies/shortuuid +[Snowflake]: https://blog.twitter.com/2010/announcing-snowflake +[MongoID]: https://docs.mongodb.org/manual/reference/object-id/ + +Features: + +- Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +- Base32 hex encoded by default (20 chars when transported as printable string, still sortable) +- Non configured, you don't need set a unique machine and/or data center id +- K-ordered +- Embedded time with 1 second precision +- Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +- Lock-free (i.e.: unlike UUIDv1 and v2) + +Best used with [zerolog](https://github.com/rs/zerolog)'s +[RequestIDHandler](https://godoc.org/github.com/rs/zerolog/hlog#RequestIDHandler). + +Notes: + +- Xid is dependent on the system time, a monotonic counter and so is not cryptographically secure. If unpredictability of IDs is important, you should not use Xids. It is worth noting that most other UUID-like implementations are also not cryptographically secure. You should use libraries that rely on cryptographically secure sources (like /dev/urandom on unix, crypto/rand in golang), if you want a truly random ID generator. + +References: + +- http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +- https://en.wikipedia.org/wiki/Universally_unique_identifier +- https://blog.twitter.com/2010/announcing-snowflake +- Python port by [Graham Abbott](https://github.com/graham): https://github.com/graham/python_xid +- Scala port by [Egor Kolotaev](https://github.com/kolotaev): https://github.com/kolotaev/ride +- Rust port by [Jérôme Renard](https://github.com/jeromer/): https://github.com/jeromer/libxid +- Ruby port by [Valar](https://github.com/valarpirai/): https://github.com/valarpirai/ruby_xid +- Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid +- Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid + +## Install + + go get github.com/rs/xid + +## Usage + +```go +guid := xid.New() + +println(guid.String()) +// Output: 9m4e2mr0ui3e8a215n4g +``` + +Get `xid` embedded info: + +```go +guid.Machine() +guid.Pid() +guid.Time() +guid.Counter() +``` + +## Benchmark + +Benchmark against Go [Maxim Bublis](https://github.com/satori)'s [UUID](https://github.com/satori/go.uuid). + +``` +BenchmarkXID 20000000 91.1 ns/op 32 B/op 1 allocs/op +BenchmarkXID-2 20000000 55.9 ns/op 32 B/op 1 allocs/op +BenchmarkXID-4 50000000 32.3 ns/op 32 B/op 1 allocs/op +BenchmarkUUIDv1 10000000 204 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-2 10000000 160 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv1-4 10000000 195 ns/op 48 B/op 1 allocs/op +BenchmarkUUIDv4 1000000 1503 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-2 1000000 1427 ns/op 64 B/op 2 allocs/op +BenchmarkUUIDv4-4 1000000 1452 ns/op 64 B/op 2 allocs/op +``` + +Note: UUIDv1 requires a global lock, hence the performance degradation as we add more CPUs. + +## Licenses + +All source code is licensed under the [MIT License](https://raw.github.com/rs/xid/master/LICENSE). diff --git a/vendor/github.com/rs/xid/error.go b/vendor/github.com/rs/xid/error.go new file mode 100644 index 00000000..ea253749 --- /dev/null +++ b/vendor/github.com/rs/xid/error.go @@ -0,0 +1,11 @@ +package xid + +const ( + // ErrInvalidID is returned when trying to unmarshal an invalid ID. + ErrInvalidID strErr = "xid: invalid ID" +) + +// strErr allows declaring errors as constants. +type strErr string + +func (err strErr) Error() string { return string(err) } diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go new file mode 100644 index 00000000..08351ff7 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_darwin.go @@ -0,0 +1,9 @@ +// +build darwin + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.uuid") +} diff --git a/vendor/github.com/rs/xid/hostid_fallback.go b/vendor/github.com/rs/xid/hostid_fallback.go new file mode 100644 index 00000000..7fbd3c00 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_fallback.go @@ -0,0 +1,9 @@ +// +build !darwin,!linux,!freebsd,!windows + +package xid + +import "errors" + +func readPlatformMachineID() (string, error) { + return "", errors.New("not implemented") +} diff --git a/vendor/github.com/rs/xid/hostid_freebsd.go b/vendor/github.com/rs/xid/hostid_freebsd.go new file mode 100644 index 00000000..be25a039 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_freebsd.go @@ -0,0 +1,9 @@ +// +build freebsd + +package xid + +import "syscall" + +func readPlatformMachineID() (string, error) { + return syscall.Sysctl("kern.hostuuid") +} diff --git a/vendor/github.com/rs/xid/hostid_linux.go b/vendor/github.com/rs/xid/hostid_linux.go new file mode 100644 index 00000000..837b2043 --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_linux.go @@ -0,0 +1,13 @@ +// +build linux + +package xid + +import "io/ioutil" + +func readPlatformMachineID() (string, error) { + b, err := ioutil.ReadFile("/etc/machine-id") + if err != nil || len(b) == 0 { + b, err = ioutil.ReadFile("/sys/class/dmi/id/product_uuid") + } + return string(b), err +} diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go new file mode 100644 index 00000000..ec2593ee --- /dev/null +++ b/vendor/github.com/rs/xid/hostid_windows.go @@ -0,0 +1,38 @@ +// +build windows + +package xid + +import ( + "fmt" + "syscall" + "unsafe" +) + +func readPlatformMachineID() (string, error) { + // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go + var h syscall.Handle + err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) + if err != nil { + return "", err + } + defer syscall.RegCloseKey(h) + + const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 + const uuidLen = 36 + + var regBuf [syscallRegBufLen]uint16 + bufLen := uint32(syscallRegBufLen) + var valType uint32 + err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", err + } + + hostID := syscall.UTF16ToString(regBuf[:]) + hostIDLen := len(hostID) + if hostIDLen != uuidLen { + return "", fmt.Errorf("HostID incorrect: %q\n", hostID) + } + + return hostID, nil +} diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go new file mode 100644 index 00000000..1f536b41 --- /dev/null +++ b/vendor/github.com/rs/xid/id.go @@ -0,0 +1,392 @@ +// Package xid is a globally unique id generator suited for web scale +// +// Xid is using Mongo Object ID algorithm to generate globally unique ids: +// https://docs.mongodb.org/manual/reference/object-id/ +// +// - 4-byte value representing the seconds since the Unix epoch, +// - 3-byte machine identifier, +// - 2-byte process id, and +// - 3-byte counter, starting with a random value. +// +// The binary representation of the id is compatible with Mongo 12 bytes Object IDs. +// The string representation is using base32 hex (w/o padding) for better space efficiency +// when stored in that form (20 bytes). The hex variant of base32 is used to retain the +// sortable property of the id. +// +// Xid doesn't use base64 because case sensitivity and the 2 non alphanum chars may be an +// issue when transported as a string between various systems. Base36 wasn't retained either +// because 1/ it's not standard 2/ the resulting size is not predictable (not bit aligned) +// and 3/ it would not remain sortable. To validate a base32 `xid`, expect a 20 chars long, +// all lowercase sequence of `a` to `v` letters and `0` to `9` numbers (`[0-9a-v]{20}`). +// +// UUID is 16 bytes (128 bits), snowflake is 8 bytes (64 bits), xid stands in between +// with 12 bytes with a more compact string representation ready for the web and no +// required configuration or central generation server. +// +// Features: +// +// - Size: 12 bytes (96 bits), smaller than UUID, larger than snowflake +// - Base32 hex encoded by default (16 bytes storage when transported as printable string) +// - Non configured, you don't need set a unique machine and/or data center id +// - K-ordered +// - Embedded time with 1 second precision +// - Unicity guaranteed for 16,777,216 (24 bits) unique ids per second and per host/process +// +// Best used with xlog's RequestIDHandler (https://godoc.org/github.com/rs/xlog#RequestIDHandler). +// +// References: +// +// - http://www.slideshare.net/davegardnerisme/unique-id-generation-in-distributed-systems +// - https://en.wikipedia.org/wiki/Universally_unique_identifier +// - https://blog.twitter.com/2010/announcing-snowflake +package xid + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "database/sql/driver" + "encoding/binary" + "fmt" + "hash/crc32" + "io/ioutil" + "os" + "sort" + "sync/atomic" + "time" + "unsafe" +) + +// Code inspired from mgo/bson ObjectId + +// ID represents a unique request id +type ID [rawLen]byte + +const ( + encodedLen = 20 // string encoded len + rawLen = 12 // binary raw len + + // encoding stores a custom version of the base32 encoding with lower case + // letters. + encoding = "0123456789abcdefghijklmnopqrstuv" +) + +var ( + // objectIDCounter is atomically incremented when generating a new ObjectId + // using NewObjectId() function. It's used as a counter part of an id. + // This id is initialized with a random value. + objectIDCounter = randInt() + + // machineId stores machine id generated once and used in subsequent calls + // to NewObjectId function. + machineID = readMachineID() + + // pid stores the current process id + pid = os.Getpid() + + nilID ID + + // dec is the decoding map for base32 encoding + dec [256]byte +) + +func init() { + for i := 0; i < len(dec); i++ { + dec[i] = 0xFF + } + for i := 0; i < len(encoding); i++ { + dec[encoding[i]] = byte(i) + } + + // If /proc/self/cpuset exists and is not /, we can assume that we are in a + // form of container and use the content of cpuset xor-ed with the PID in + // order get a reasonable machine global unique PID. + b, err := ioutil.ReadFile("/proc/self/cpuset") + if err == nil && len(b) > 1 { + pid ^= int(crc32.ChecksumIEEE(b)) + } +} + +// readMachineId generates machine id and puts it into the machineId global +// variable. If this function fails to get the hostname, it will cause +// a runtime error. +func readMachineID() []byte { + id := make([]byte, 3) + hid, err := readPlatformMachineID() + if err != nil || len(hid) == 0 { + hid, err = os.Hostname() + } + if err == nil && len(hid) != 0 { + hw := md5.New() + hw.Write([]byte(hid)) + copy(id, hw.Sum(nil)) + } else { + // Fallback to rand number if machine id can't be gathered + if _, randErr := rand.Reader.Read(id); randErr != nil { + panic(fmt.Errorf("xid: cannot get hostname nor generate a random number: %v; %v", err, randErr)) + } + } + return id +} + +// randInt generates a random uint32 +func randInt() uint32 { + b := make([]byte, 3) + if _, err := rand.Reader.Read(b); err != nil { + panic(fmt.Errorf("xid: cannot generate random number: %v;", err)) + } + return uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]) +} + +// New generates a globally unique ID +func New() ID { + return NewWithTime(time.Now()) +} + +// NewWithTime generates a globally unique ID with the passed in time +func NewWithTime(t time.Time) ID { + var id ID + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(id[:], uint32(t.Unix())) + // Machine, first 3 bytes of md5(hostname) + id[4] = machineID[0] + id[5] = machineID[1] + id[6] = machineID[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + id[7] = byte(pid >> 8) + id[8] = byte(pid) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIDCounter, 1) + id[9] = byte(i >> 16) + id[10] = byte(i >> 8) + id[11] = byte(i) + return id +} + +// FromString reads an ID from its string representation +func FromString(id string) (ID, error) { + i := &ID{} + err := i.UnmarshalText([]byte(id)) + return *i, err +} + +// String returns a base32 hex lowercased with no padding representation of the id (char set is 0-9, a-v). +func (id ID) String() string { + text := make([]byte, encodedLen) + encode(text, id[:]) + return *(*string)(unsafe.Pointer(&text)) +} + +// Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it. +func (id ID) Encode(dst []byte) []byte { + encode(dst, id[:]) + return dst +} + +// MarshalText implements encoding/text TextMarshaler interface +func (id ID) MarshalText() ([]byte, error) { + text := make([]byte, encodedLen) + encode(text, id[:]) + return text, nil +} + +// MarshalJSON implements encoding/json Marshaler interface +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsNil() { + return []byte("null"), nil + } + text := make([]byte, encodedLen+2) + encode(text[1:encodedLen+1], id[:]) + text[0], text[encodedLen+1] = '"', '"' + return text, nil +} + +// encode by unrolling the stdlib base32 algorithm + removing all safe checks +func encode(dst, id []byte) { + _ = dst[19] + _ = id[11] + + dst[19] = encoding[(id[11]<<4)&0x1F] + dst[18] = encoding[(id[11]>>1)&0x1F] + dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + dst[16] = encoding[id[10]>>3] + dst[15] = encoding[id[9]&0x1F] + dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] + dst[13] = encoding[(id[8]>>2)&0x1F] + dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] + dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] + dst[10] = encoding[(id[6]>>1)&0x1F] + dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] + dst[8] = encoding[id[5]>>3] + dst[7] = encoding[id[4]&0x1F] + dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] + dst[5] = encoding[(id[3]>>2)&0x1F] + dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] + dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] + dst[2] = encoding[(id[1]>>1)&0x1F] + dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] + dst[0] = encoding[id[0]>>3] +} + +// UnmarshalText implements encoding/text TextUnmarshaler interface +func (id *ID) UnmarshalText(text []byte) error { + if len(text) != encodedLen { + return ErrInvalidID + } + for _, c := range text { + if dec[c] == 0xFF { + return ErrInvalidID + } + } + if !decode(id, text) { + return ErrInvalidID + } + return nil +} + +// UnmarshalJSON implements encoding/json Unmarshaler interface +func (id *ID) UnmarshalJSON(b []byte) error { + s := string(b) + if s == "null" { + *id = nilID + return nil + } + // Check the slice length to prevent panic on passing it to UnmarshalText() + if len(b) < 2 { + return ErrInvalidID + } + return id.UnmarshalText(b[1 : len(b)-1]) +} + +// decode by unrolling the stdlib base32 algorithm + customized safe check. +func decode(id *ID, src []byte) bool { + _ = src[19] + _ = id[11] + + id[11] = dec[src[17]]<<6 | dec[src[18]]<<1 | dec[src[19]]>>4 + id[10] = dec[src[16]]<<3 | dec[src[17]]>>2 + id[9] = dec[src[14]]<<5 | dec[src[15]] + id[8] = dec[src[12]]<<7 | dec[src[13]]<<2 | dec[src[14]]>>3 + id[7] = dec[src[11]]<<4 | dec[src[12]]>>1 + id[6] = dec[src[9]]<<6 | dec[src[10]]<<1 | dec[src[11]]>>4 + id[5] = dec[src[8]]<<3 | dec[src[9]]>>2 + id[4] = dec[src[6]]<<5 | dec[src[7]] + id[3] = dec[src[4]]<<7 | dec[src[5]]<<2 | dec[src[6]]>>3 + id[2] = dec[src[3]]<<4 | dec[src[4]]>>1 + id[1] = dec[src[1]]<<6 | dec[src[2]]<<1 | dec[src[3]]>>4 + id[0] = dec[src[0]]<<3 | dec[src[1]]>>2 + + // Validate that there are no discarer bits (padding) in src that would + // cause the string-encoded id not to equal src. + var check [4]byte + + check[3] = encoding[(id[11]<<4)&0x1F] + check[2] = encoding[(id[11]>>1)&0x1F] + check[1] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + check[0] = encoding[id[10]>>3] + return bytes.Equal([]byte(src[16:20]), check[:]) +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id[0:4])) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Machine() []byte { + return id[4:7] +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Pid() uint16 { + return binary.BigEndian.Uint16(id[7:9]) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ID) Counter() int32 { + b := id[9:12] + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// Value implements the driver.Valuer interface. +func (id ID) Value() (driver.Value, error) { + if id.IsNil() { + return nil, nil + } + b, err := id.MarshalText() + return string(b), err +} + +// Scan implements the sql.Scanner interface. +func (id *ID) Scan(value interface{}) (err error) { + switch val := value.(type) { + case string: + return id.UnmarshalText([]byte(val)) + case []byte: + return id.UnmarshalText(val) + case nil: + *id = nilID + return nil + default: + return fmt.Errorf("xid: scanning unsupported type: %T", value) + } +} + +// IsNil Returns true if this is a "nil" ID +func (id ID) IsNil() bool { + return id == nilID +} + +// NilID returns a zero value for `xid.ID`. +func NilID() ID { + return nilID +} + +// Bytes returns the byte array representation of `ID` +func (id ID) Bytes() []byte { + return id[:] +} + +// FromBytes convert the byte array representation of `ID` back to `ID` +func FromBytes(b []byte) (ID, error) { + var id ID + if len(b) != rawLen { + return id, ErrInvalidID + } + copy(id[:], b) + return id, nil +} + +// Compare returns an integer comparing two IDs. It behaves just like `bytes.Compare`. +// The result will be 0 if two IDs are identical, -1 if current id is less than the other one, +// and 1 if current id is greater than the other. +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) +} + +type sorter []ID + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Less(i, j int) bool { + return s[i].Compare(s[j]) < 0 +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Sort sorts an array of IDs inplace. +// It works by wrapping `[]ID` and use `sort.Sort`. +func Sort(ids []ID) { + sort.Sort(sorter(ids)) +} diff --git a/vendor/github.com/rs/zerolog/.gitignore b/vendor/github.com/rs/zerolog/.gitignore new file mode 100644 index 00000000..8ebe58b1 --- /dev/null +++ b/vendor/github.com/rs/zerolog/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +tmp + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/rs/zerolog/CNAME b/vendor/github.com/rs/zerolog/CNAME new file mode 100644 index 00000000..9ce57a6e --- /dev/null +++ b/vendor/github.com/rs/zerolog/CNAME @@ -0,0 +1 @@ +zerolog.io \ No newline at end of file diff --git a/vendor/github.com/rs/zerolog/LICENSE b/vendor/github.com/rs/zerolog/LICENSE new file mode 100644 index 00000000..677e07f7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md new file mode 100644 index 00000000..95666b32 --- /dev/null +++ b/vendor/github.com/rs/zerolog/README.md @@ -0,0 +1,716 @@ +# Zero Allocation JSON Logger + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) + +The zerolog package provides a fast and simple logger dedicated to JSON output. + +Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. + +Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. + +To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). + +![Pretty Logging Image](pretty.png) + +## Who uses zerolog + +Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. + +## Features + +* [Blazing fast](#benchmarks) +* [Low to zero allocation](#benchmarks) +* [Leveled logging](#leveled-logging) +* [Sampling](#log-sampling) +* [Hooks](#hooks) +* [Contextual fields](#contextual-logging) +* `context.Context` integration +* [Integration with `net/http`](#integration-with-nethttp) +* [JSON and CBOR encoding formats](#binary-encoding) +* [Pretty logging for development](#pretty-logging) +* [Error Logging (with optional Stacktrace)](#error-logging) + +## Installation + +```bash +go get -u github.com/rs/zerolog/log +``` + +## Getting Started + +### Simple Logging Example + +For simple logging, import the global logger package **github.com/rs/zerolog/log** + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + // UNIX Time is faster and smaller than most timestamps + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Print("hello world") +} + +// Output: {"time":1516134303,"level":"debug","message":"hello world"} +``` +> Note: By default log writes to `os.Stderr` +> Note: The default log level for `log.Print` is *debug* + +### Contextual Logging + +**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Debug(). + Str("Scale", "833 cents"). + Float64("Interval", 833.09). + Msg("Fibonacci is everywhere") + + log.Debug(). + Str("Name", "Tom"). + Send() +} + +// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"} +// Output: {"level":"debug","Name":"Tom","time":1562212768} +``` + +> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) + +### Leveled Logging + +#### Simple Leveled Logging Example + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Info().Msg("hello world") +} + +// Output: {"time":1516134303,"level":"info","message":"hello world"} +``` + +> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. + +**zerolog** allows for logging at the following levels (from highest to lowest): + +* panic (`zerolog.PanicLevel`, 5) +* fatal (`zerolog.FatalLevel`, 4) +* error (`zerolog.ErrorLevel`, 3) +* warn (`zerolog.WarnLevel`, 2) +* info (`zerolog.InfoLevel`, 1) +* debug (`zerolog.DebugLevel`, 0) +* trace (`zerolog.TraceLevel`, -1) + +You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. + +#### Setting Global Log Level + +This example uses command-line flags to demonstrate various outputs depending on the chosen log level. + +```go +package main + +import ( + "flag" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + debug := flag.Bool("debug", false, "sets log level to debug") + + flag.Parse() + + // Default level for this example is info, unless debug flag is present + zerolog.SetGlobalLevel(zerolog.InfoLevel) + if *debug { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + log.Debug().Msg("This message appears only when log level set to Debug") + log.Info().Msg("This message appears when log level set to Debug or Info") + + if e := log.Debug(); e.Enabled() { + // Compute log output only if enabled. + value := "bar" + e.Str("foo", value).Msg("some debug message") + } +} +``` + +Info Output (no flag) + +```bash +$ ./logLevelExample +{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} +``` + +Debug Output (debug flag set) + +```bash +$ ./logLevelExample -debug +{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} +{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} +{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} +``` + +#### Logging without Level or Message + +You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Log(). + Str("foo", "bar"). + Msg("") +} + +// Output: {"time":1494567715,"foo":"bar"} +``` + +### Error Logging + +You can log errors using the `Err` method + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + err := errors.New("seems we have an error here") + log.Error().Err(err).Msg("") +} + +// Output: {"level":"error","error":"seems we have an error here","time":1609085256} +``` + +> The default field name for errors is `error`, you can change this by setting `zerolog.ErrorFieldName` to meet your needs. + +#### Error Logging with Stacktrace + +Using `github.com/pkg/errors`, you can add a formatted stacktrace to your errors. + +```go +package main + +import ( + "github.com/pkg/errors" + "github.com/rs/zerolog/pkgerrors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + + err := outer() + log.Error().Stack().Err(err).Msg("") +} + +func inner() error { + return errors.New("seems we have an error here") +} + +func middle() error { + err := inner() + if err != nil { + return err + } + return nil +} + +func outer() error { + err := middle() + if err != nil { + return err + } + return nil +} + +// Output: {"level":"error","stack":[{"func":"inner","line":"20","source":"errors.go"},{"func":"middle","line":"24","source":"errors.go"},{"func":"outer","line":"32","source":"errors.go"},{"func":"main","line":"15","source":"errors.go"},{"func":"main","line":"204","source":"proc.go"},{"func":"goexit","line":"1374","source":"asm_amd64.s"}],"error":"seems we have an error here","time":1609086683} +``` + +> zerolog.ErrorStackMarshaler must be set in order for the stack to output anything. + +#### Logging Fatal Messages + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + err := errors.New("A repo man spends his life getting into tense situations") + service := "myservice" + + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Fatal(). + Err(err). + Str("service", service). + Msgf("Cannot start %s", service) +} + +// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} +// exit status 1 +``` + +> NOTE: Using `Msgf` generates one allocation even when the logger is disabled. + + +### Create logger instance to manage different outputs + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + +logger.Info().Str("foo", "bar").Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"} +``` + +### Sub-loggers let you chain loggers with additional context + +```go +sublogger := log.With(). + Str("component", "foo"). + Logger() +sublogger.Info().Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} +``` + +### Pretty logging + +To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: + +```go +log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) + +log.Info().Str("foo", "bar").Msg("Hello world") + +// Output: 3:04PM INF Hello World foo=bar +``` + +To customize the configuration and formatting: + +```go +output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} +output.FormatLevel = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) +} +output.FormatMessage = func(i interface{}) string { + return fmt.Sprintf("***%s****", i) +} +output.FormatFieldName = func(i interface{}) string { + return fmt.Sprintf("%s:", i) +} +output.FormatFieldValue = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("%s", i)) +} + +log := zerolog.New(output).With().Timestamp().Logger() + +log.Info().Str("foo", "bar").Msg("Hello World") + +// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR +``` + +### Sub dictionary + +```go +log.Info(). + Str("foo", "bar"). + Dict("dict", zerolog.Dict(). + Str("bar", "baz"). + Int("n", 1), + ).Msg("hello world") + +// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} +``` + +### Customize automatic field names + +```go +zerolog.TimestampFieldName = "t" +zerolog.LevelFieldName = "l" +zerolog.MessageFieldName = "m" + +log.Info().Msg("hello world") + +// Output: {"l":"info","t":1494567715,"m":"hello world"} +``` + +### Add contextual fields to the global logger + +```go +log.Logger = log.With().Str("foo", "bar").Logger() +``` + +### Add file and line number to log + +Equivalent of `Llongfile`: + +```go +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} +``` + +Equivalent of `Lshortfile`: + +```go +zerolog.CallerMarshalFunc = func(file string, line int) string { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + return file + ":" + strconv.Itoa(line) +} +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "some_file:21"} +``` + +### Thread-safe, lock-free, non-blocking writer + +If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follows: + +```go +wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { + fmt.Printf("Logger Dropped %d messages", missed) + }) +log := zerolog.New(wr) +log.Print("test") +``` + +You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. + +### Log Sampling + +```go +sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +sampled.Info().Msg("will be logged every 10 messages") + +// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"} +``` + +More advanced sampling: + +```go +// Will let 5 debug messages per period of 1 second. +// Over 5 debug message, 1 every 100 debug messages are logged. +// Other levels are not sampled. +sampled := log.Sample(zerolog.LevelSampler{ + DebugSampler: &zerolog.BurstSampler{ + Burst: 5, + Period: 1*time.Second, + NextSampler: &zerolog.BasicSampler{N: 100}, + }, +}) +sampled.Debug().Msg("hello world") + +// Output: {"time":1494567715,"level":"debug","message":"hello world"} +``` + +### Hooks + +```go +type SeverityHook struct{} + +func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + if level != zerolog.NoLevel { + e.Str("severity", level.String()) + } +} + +hooked := log.Hook(SeverityHook{}) +hooked.Warn().Msg("") + +// Output: {"level":"warn","severity":"warn"} +``` + +### Pass a sub-logger by context + +```go +ctx := log.With().Str("component", "module").Logger().WithContext(ctx) + +log.Ctx(ctx).Info().Msg("hello world") + +// Output: {"component":"module","level":"info","message":"hello world"} +``` + +### Set as standard logger output + +```go +log := zerolog.New(os.Stdout).With(). + Str("foo", "bar"). + Logger() + +stdlog.SetFlags(0) +stdlog.SetOutput(log) + +stdlog.Print("hello world") + +// Output: {"foo":"bar","message":"hello world"} +``` + +### Integration with `net/http` + +The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`. + +In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability. + +```go +log := zerolog.New(os.Stdout).With(). + Timestamp(). + Str("role", "my-service"). + Str("host", host). + Logger() + +c := alice.New() + +// Install the logger handler with default output on the console +c = c.Append(hlog.NewHandler(log)) + +// Install some provided extra handler to set some request's context fields. +// Thanks to that handler, all our logs will come with some prepopulated fields. +c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { + hlog.FromRequest(r).Info(). + Str("method", r.Method). + Stringer("url", r.URL). + Int("status", status). + Int("size", size). + Dur("duration", duration). + Msg("") +})) +c = c.Append(hlog.RemoteAddrHandler("ip")) +c = c.Append(hlog.UserAgentHandler("user_agent")) +c = c.Append(hlog.RefererHandler("referer")) +c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id")) + +// Here is your final handler +h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Get the logger from the request's context. You can safely assume it + // will be always there: if the handler is removed, hlog.FromRequest + // will return a no-op logger. + hlog.FromRequest(r).Info(). + Str("user", "current user"). + Str("status", "ok"). + Msg("Something happened") + + // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"} +})) +http.Handle("/", h) + +if err := http.ListenAndServe(":8080", nil); err != nil { + log.Fatal().Err(err).Msg("Startup failed") +} +``` + +## Multiple Log Output +`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs. +In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter. +```go +func main() { + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout} + + multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout) + + logger := zerolog.New(multi).With().Timestamp().Logger() + + logger.Info().Msg("Hello World!") +} + +// Output (Line 1: Console; Line 2: Stdout) +// 12:36PM INF Hello World! +// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"} +``` + +## Global Settings + +Some settings can be changed and will be applied to all loggers: + +* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). +* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode). +* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. +* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. +* `zerolog.LevelFieldName`: Can be set to customize level field name. +* `zerolog.MessageFieldName`: Can be set to customize message field name. +* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. +* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp. +* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`). +* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`). +* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. + +## Field Types + +### Standard Types + +* `Str` +* `Bool` +* `Int`, `Int8`, `Int16`, `Int32`, `Int64` +* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64` +* `Float32`, `Float64` + +### Advanced Fields + +* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name. +* `Func`: Run a `func` only if the level is enabled. +* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`. +* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`. +* `Dur`: Adds a field with `time.Duration`. +* `Dict`: Adds a sub-key/value as a field of the event. +* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`) +* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`) +* `Interface`: Uses reflection to marshal the type. + +Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.) + +## Binary Encoding + +In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](https://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: + +```bash +go build -tags binary_log . +``` + +To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work +with zerolog library is [CSD](https://github.com/toravir/csd/). + +## Related Projects + +* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` +* [overlog](https://github.com/Trendyol/overlog): Implementation of `Mapped Diagnostic Context` interface using `zerolog` +* [zerologr](https://github.com/go-logr/zerologr): Implementation of `logr.LogSink` interface using `zerolog` + +## Benchmarks + +See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. + +All operations are allocation free (those numbers *include* JSON encoding): + +```text +BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op +BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op +BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op +BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op +BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op +``` + +There are a few Go logging benchmarks and comparisons that include zerolog. + +* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) +* [uber-common/zap](https://github.com/uber-go/zap#performance) + +Using Uber's zap comparison benchmark: + +Log a message and 10 fields: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 767 ns/op | 552 B/op | 6 allocs/op | +| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op | +| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op | +| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op | +| lion | 5392 ns/op | 5807 B/op | 63 allocs/op | +| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op | +| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op | +| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 52 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| lion | 2702 ns/op | 4074 B/op | 38 allocs/op | +| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op | +| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op | +| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op | +| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 50 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op | +| standard library | 453 ns/op | 80 B/op | 2 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| go-kit | 508 ns/op | 656 B/op | 13 allocs/op | +| lion | 771 ns/op | 1224 B/op | 10 allocs/op | +| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op | +| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | +| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | + +## Caveats + +Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON: + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +logger.Info(). + Timestamp(). + Msg("dup") +// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +``` + +In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt. diff --git a/vendor/github.com/rs/zerolog/_config.yml b/vendor/github.com/rs/zerolog/_config.yml new file mode 100644 index 00000000..a1e896d7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/_config.yml @@ -0,0 +1 @@ +remote_theme: rs/gh-readme diff --git a/vendor/github.com/rs/zerolog/array.go b/vendor/github.com/rs/zerolog/array.go new file mode 100644 index 00000000..c75c0520 --- /dev/null +++ b/vendor/github.com/rs/zerolog/array.go @@ -0,0 +1,240 @@ +package zerolog + +import ( + "net" + "sync" + "time" +) + +var arrayPool = &sync.Pool{ + New: func() interface{} { + return &Array{ + buf: make([]byte, 0, 500), + } + }, +} + +// Array is used to prepopulate an array of items +// which can be re-used to add to log messages. +type Array struct { + buf []byte +} + +func putArray(a *Array) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(a.buf) > maxSize { + return + } + arrayPool.Put(a) +} + +// Arr creates an array to be added to an Event or Context. +func Arr() *Array { + a := arrayPool.Get().(*Array) + a.buf = a.buf[:0] + return a +} + +// MarshalZerologArray method here is no-op - since data is +// already in the needed format. +func (*Array) MarshalZerologArray(*Array) { +} + +func (a *Array) write(dst []byte) []byte { + dst = enc.AppendArrayStart(dst) + if len(a.buf) > 0 { + dst = append(dst, a.buf...) + } + dst = enc.AppendArrayEnd(dst) + putArray(a) + return dst +} + +// Object marshals an object that implement the LogObjectMarshaler +// interface and append append it to the array. +func (a *Array) Object(obj LogObjectMarshaler) *Array { + e := Dict() + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + return a +} + +// Str append append the val as a string to the array. +func (a *Array) Str(val string) *Array { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Bytes append append the val as a string to the array. +func (a *Array) Bytes(val []byte) *Array { + a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Hex append append the val as a hex string to the array. +func (a *Array) Hex(val []byte) *Array { + a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) + return a +} + +// RawJSON adds already encoded JSON to the array. +func (a *Array) RawJSON(val []byte) *Array { + a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Err serializes and appends the err to the array. +func (a *Array) Err(err error) *Array { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf)) + } else { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) + } + case string: + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) + default: + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) + } + + return a +} + +// Bool append append the val as a bool to the array. +func (a *Array) Bool(b bool) *Array { + a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) + return a +} + +// Int append append i as a int to the array. +func (a *Array) Int(i int) *Array { + a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int8 append append i as a int8 to the array. +func (a *Array) Int8(i int8) *Array { + a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int16 append append i as a int16 to the array. +func (a *Array) Int16(i int16) *Array { + a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int32 append append i as a int32 to the array. +func (a *Array) Int32(i int32) *Array { + a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int64 append append i as a int64 to the array. +func (a *Array) Int64(i int64) *Array { + a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint append append i as a uint to the array. +func (a *Array) Uint(i uint) *Array { + a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint8 append append i as a uint8 to the array. +func (a *Array) Uint8(i uint8) *Array { + a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint16 append append i as a uint16 to the array. +func (a *Array) Uint16(i uint16) *Array { + a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint32 append append i as a uint32 to the array. +func (a *Array) Uint32(i uint32) *Array { + a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint64 append append i as a uint64 to the array. +func (a *Array) Uint64(i uint64) *Array { + a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Float32 append append f as a float32 to the array. +func (a *Array) Float32(f float32) *Array { + a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Float64 append append f as a float64 to the array. +func (a *Array) Float64(f float64) *Array { + a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Time append append t formatted as string using zerolog.TimeFieldFormat. +func (a *Array) Time(t time.Time) *Array { + a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) + return a +} + +// Dur append append d to the array. +func (a *Array) Dur(d time.Duration) *Array { + a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) + return a +} + +// Interface append append i marshaled using reflection. +func (a *Array) Interface(i interface{}) *Array { + if obj, ok := i.(LogObjectMarshaler); ok { + return a.Object(obj) + } + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) + return a +} + +// IPAddr adds IPv4 or IPv6 address to the array +func (a *Array) IPAddr(ip net.IP) *Array { + a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) + return a +} + +// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array +func (a *Array) IPPrefix(pfx net.IPNet) *Array { + a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) + return a +} + +// MACAddr adds a MAC (Ethernet) address to the array +func (a *Array) MACAddr(ha net.HardwareAddr) *Array { + a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) + return a +} + +// Dict adds the dict Event to the array +func (a *Array) Dict(dict *Event) *Array { + dict.buf = enc.AppendEndMarker(dict.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), dict.buf...) + return a +} diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go new file mode 100644 index 00000000..ac34b7eb --- /dev/null +++ b/vendor/github.com/rs/zerolog/console.go @@ -0,0 +1,446 @@ +package zerolog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/mattn/go-colorable" +) + +const ( + colorBlack = iota + 30 + colorRed + colorGreen + colorYellow + colorBlue + colorMagenta + colorCyan + colorWhite + + colorBold = 1 + colorDarkGray = 90 +) + +var ( + consoleBufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 100)) + }, + } +) + +const ( + consoleDefaultTimeFormat = time.Kitchen +) + +// Formatter transforms the input into a formatted string. +type Formatter func(interface{}) string + +// ConsoleWriter parses the JSON input and writes it in an +// (optionally) colorized, human-friendly format to Out. +type ConsoleWriter struct { + // Out is the output destination. + Out io.Writer + + // NoColor disables the colorized output. + NoColor bool + + // TimeFormat specifies the format for timestamp in output. + TimeFormat string + + // PartsOrder defines the order of parts in output. + PartsOrder []string + + // PartsExclude defines parts to not display in output. + PartsExclude []string + + // FieldsExclude defines contextual fields to not display in output. + FieldsExclude []string + + FormatTimestamp Formatter + FormatLevel Formatter + FormatCaller Formatter + FormatMessage Formatter + FormatFieldName Formatter + FormatFieldValue Formatter + FormatErrFieldName Formatter + FormatErrFieldValue Formatter + + FormatExtra func(map[string]interface{}, *bytes.Buffer) error +} + +// NewConsoleWriter creates and initializes a new ConsoleWriter. +func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { + w := ConsoleWriter{ + Out: os.Stdout, + TimeFormat: consoleDefaultTimeFormat, + PartsOrder: consoleDefaultPartsOrder(), + } + + for _, opt := range options { + opt(&w) + } + + // Fix color on Windows + if w.Out == os.Stdout || w.Out == os.Stderr { + w.Out = colorable.NewColorable(w.Out.(*os.File)) + } + + return w +} + +// Write transforms the JSON input with formatters and appends to w.Out. +func (w ConsoleWriter) Write(p []byte) (n int, err error) { + // Fix color on Windows + if w.Out == os.Stdout || w.Out == os.Stderr { + w.Out = colorable.NewColorable(w.Out.(*os.File)) + } + + if w.PartsOrder == nil { + w.PartsOrder = consoleDefaultPartsOrder() + } + + var buf = consoleBufPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + consoleBufPool.Put(buf) + }() + + var evt map[string]interface{} + p = decodeIfBinaryToBytes(p) + d := json.NewDecoder(bytes.NewReader(p)) + d.UseNumber() + err = d.Decode(&evt) + if err != nil { + return n, fmt.Errorf("cannot decode event: %s", err) + } + + for _, p := range w.PartsOrder { + w.writePart(buf, evt, p) + } + + w.writeFields(evt, buf) + + if w.FormatExtra != nil { + err = w.FormatExtra(evt, buf) + if err != nil { + return n, err + } + } + + err = buf.WriteByte('\n') + if err != nil { + return n, err + } + + _, err = buf.WriteTo(w.Out) + return len(p), err +} + +// writeFields appends formatted key-value pairs to buf. +func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { + var fields = make([]string, 0, len(evt)) + for field := range evt { + var isExcluded bool + for _, excluded := range w.FieldsExclude { + if field == excluded { + isExcluded = true + break + } + } + if isExcluded { + continue + } + + switch field { + case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: + continue + } + fields = append(fields, field) + } + sort.Strings(fields) + + // Write space only if something has already been written to the buffer, and if there are fields. + if buf.Len() > 0 && len(fields) > 0 { + buf.WriteByte(' ') + } + + // Move the "error" field to the front + ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) + if ei < len(fields) && fields[ei] == ErrorFieldName { + fields[ei] = "" + fields = append([]string{ErrorFieldName}, fields...) + var xfields = make([]string, 0, len(fields)) + for _, field := range fields { + if field == "" { // Skip empty fields + continue + } + xfields = append(xfields, field) + } + fields = xfields + } + + for i, field := range fields { + var fn Formatter + var fv Formatter + + if field == ErrorFieldName { + if w.FormatErrFieldName == nil { + fn = consoleDefaultFormatErrFieldName(w.NoColor) + } else { + fn = w.FormatErrFieldName + } + + if w.FormatErrFieldValue == nil { + fv = consoleDefaultFormatErrFieldValue(w.NoColor) + } else { + fv = w.FormatErrFieldValue + } + } else { + if w.FormatFieldName == nil { + fn = consoleDefaultFormatFieldName(w.NoColor) + } else { + fn = w.FormatFieldName + } + + if w.FormatFieldValue == nil { + fv = consoleDefaultFormatFieldValue + } else { + fv = w.FormatFieldValue + } + } + + buf.WriteString(fn(field)) + + switch fValue := evt[field].(type) { + case string: + if needsQuote(fValue) { + buf.WriteString(fv(strconv.Quote(fValue))) + } else { + buf.WriteString(fv(fValue)) + } + case json.Number: + buf.WriteString(fv(fValue)) + default: + b, err := InterfaceMarshalFunc(fValue) + if err != nil { + fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) + } else { + fmt.Fprint(buf, fv(b)) + } + } + + if i < len(fields)-1 { // Skip space for last field + buf.WriteByte(' ') + } + } +} + +// writePart appends a formatted part to buf. +func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { + var f Formatter + + if w.PartsExclude != nil && len(w.PartsExclude) > 0 { + for _, exclude := range w.PartsExclude { + if exclude == p { + return + } + } + } + + switch p { + case LevelFieldName: + if w.FormatLevel == nil { + f = consoleDefaultFormatLevel(w.NoColor) + } else { + f = w.FormatLevel + } + case TimestampFieldName: + if w.FormatTimestamp == nil { + f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) + } else { + f = w.FormatTimestamp + } + case MessageFieldName: + if w.FormatMessage == nil { + f = consoleDefaultFormatMessage + } else { + f = w.FormatMessage + } + case CallerFieldName: + if w.FormatCaller == nil { + f = consoleDefaultFormatCaller(w.NoColor) + } else { + f = w.FormatCaller + } + default: + if w.FormatFieldValue == nil { + f = consoleDefaultFormatFieldValue + } else { + f = w.FormatFieldValue + } + } + + var s = f(evt[p]) + + if len(s) > 0 { + if buf.Len() > 0 { + buf.WriteByte(' ') // Write space only if not the first part + } + buf.WriteString(s) + } +} + +// needsQuote returns true when the string s should be quoted in output. +func needsQuote(s string) bool { + for i := range s { + if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { + return true + } + } + return false +} + +// colorize returns the string s wrapped in ANSI code c, unless disabled is true. +func colorize(s interface{}, c int, disabled bool) string { + if disabled { + return fmt.Sprintf("%s", s) + } + return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) +} + +// ----- DEFAULT FORMATTERS --------------------------------------------------- + +func consoleDefaultPartsOrder() []string { + return []string{ + TimestampFieldName, + LevelFieldName, + CallerFieldName, + MessageFieldName, + } +} + +func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { + if timeFormat == "" { + timeFormat = consoleDefaultTimeFormat + } + return func(i interface{}) string { + t := "" + switch tt := i.(type) { + case string: + ts, err := time.Parse(TimeFieldFormat, tt) + if err != nil { + t = tt + } else { + t = ts.Local().Format(timeFormat) + } + case json.Number: + i, err := tt.Int64() + if err != nil { + t = tt.String() + } else { + var sec, nsec int64 = i, 0 + switch TimeFieldFormat { + case TimeFormatUnixMs: + nsec = int64(time.Duration(i) * time.Millisecond) + sec = 0 + case TimeFormatUnixMicro: + nsec = int64(time.Duration(i) * time.Microsecond) + sec = 0 + } + ts := time.Unix(sec, nsec) + t = ts.Format(timeFormat) + } + } + return colorize(t, colorDarkGray, noColor) + } +} + +func consoleDefaultFormatLevel(noColor bool) Formatter { + return func(i interface{}) string { + var l string + if ll, ok := i.(string); ok { + switch ll { + case LevelTraceValue: + l = colorize("TRC", colorMagenta, noColor) + case LevelDebugValue: + l = colorize("DBG", colorYellow, noColor) + case LevelInfoValue: + l = colorize("INF", colorGreen, noColor) + case LevelWarnValue: + l = colorize("WRN", colorRed, noColor) + case LevelErrorValue: + l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) + case LevelFatalValue: + l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) + case LevelPanicValue: + l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) + default: + l = colorize("???", colorBold, noColor) + } + } else { + if i == nil { + l = colorize("???", colorBold, noColor) + } else { + l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] + } + } + return l + } +} + +func consoleDefaultFormatCaller(noColor bool) Formatter { + return func(i interface{}) string { + var c string + if cc, ok := i.(string); ok { + c = cc + } + if len(c) > 0 { + if cwd, err := os.Getwd(); err == nil { + if rel, err := filepath.Rel(cwd, c); err == nil { + c = rel + } + } + c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor) + } + return c + } +} + +func consoleDefaultFormatMessage(i interface{}) string { + if i == nil { + return "" + } + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) + } +} + +func consoleDefaultFormatFieldValue(i interface{}) string { + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatErrFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) + } +} + +func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s", i), colorRed, noColor) + } +} diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go new file mode 100644 index 00000000..f398e319 --- /dev/null +++ b/vendor/github.com/rs/zerolog/context.go @@ -0,0 +1,433 @@ +package zerolog + +import ( + "fmt" + "io/ioutil" + "math" + "net" + "time" +) + +// Context configures a new sub-logger with contextual fields. +type Context struct { + l Logger +} + +// Logger returns the logger with the context previously set. +func (c Context) Logger() Logger { + return c.l +} + +// Fields is a helper function to use a map or slice to set fields using type assertion. +// Only map[string]interface{} and []interface{} are accepted. []interface{} must +// alternate string keys and arbitrary values, and extraneous ones are ignored. +func (c Context) Fields(fields interface{}) Context { + c.l.context = appendFields(c.l.context, fields) + return c +} + +// Dict adds the field key with the dict to the logger context. +func (c Context) Dict(key string, dict *Event) Context { + dict.buf = enc.AppendEndMarker(dict.buf) + c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) + putEvent(dict) + return c +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (c Context) Array(key string, arr LogArrayMarshaler) Context { + c.l.context = enc.AppendKey(c.l.context, key) + if arr, ok := arr.(*Array); ok { + c.l.context = arr.write(c.l.context) + return c + } + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + c.l.context = a.write(c.l.context) + return c +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (c Context) Object(key string, obj LogObjectMarshaler) Context { + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e.Object(key, obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. +func (c Context) EmbedObject(obj LogObjectMarshaler) Context { + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e.EmbedObject(obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// Str adds the field key with val as a string to the logger context. +func (c Context) Str(key, val string) Context { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) + return c +} + +// Strs adds the field key with val as a string to the logger context. +func (c Context) Strs(key string, vals []string) Context { + c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) + return c +} + +// Stringer adds the field key with val.String() (or null if val is nil) to the logger context. +func (c Context) Stringer(key string, val fmt.Stringer) Context { + if val != nil { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String()) + return c + } + + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil) + return c +} + +// Bytes adds the field key with val as a []byte to the logger context. +func (c Context) Bytes(key string, val []byte) Context { + c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) + return c +} + +// Hex adds the field key with val as a hex string to the logger context. +func (c Context) Hex(key string, val []byte) Context { + c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) + return c +} + +// RawJSON adds already encoded JSON to context. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (c Context) RawJSON(key string, b []byte) Context { + c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) + return c +} + +// AnErr adds the field key with serialized err to the logger context. +func (c Context) AnErr(key string, err error) Context { + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return c + case LogObjectMarshaler: + return c.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return c + } else { + return c.Str(key, m.Error()) + } + case string: + return c.Str(key, m) + default: + return c.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// logger context. +func (c Context) Errs(key string, errs []error) Context { + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + if m == nil || isNilValue(m) { + arr = arr.Interface(nil) + } else { + arr = arr.Str(m.Error()) + } + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return c.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the logger context. +func (c Context) Err(err error) Context { + return c.AnErr(ErrorFieldName, err) +} + +// Bool adds the field key with val as a bool to the logger context. +func (c Context) Bool(key string, b bool) Context { + c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) + return c +} + +// Bools adds the field key with val as a []bool to the logger context. +func (c Context) Bools(key string, b []bool) Context { + c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) + return c +} + +// Int adds the field key with i as a int to the logger context. +func (c Context) Int(key string, i int) Context { + c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints adds the field key with i as a []int to the logger context. +func (c Context) Ints(key string, i []int) Context { + c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int8 adds the field key with i as a int8 to the logger context. +func (c Context) Int8(key string, i int8) Context { + c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints8 adds the field key with i as a []int8 to the logger context. +func (c Context) Ints8(key string, i []int8) Context { + c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int16 adds the field key with i as a int16 to the logger context. +func (c Context) Int16(key string, i int16) Context { + c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints16 adds the field key with i as a []int16 to the logger context. +func (c Context) Ints16(key string, i []int16) Context { + c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int32 adds the field key with i as a int32 to the logger context. +func (c Context) Int32(key string, i int32) Context { + c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints32 adds the field key with i as a []int32 to the logger context. +func (c Context) Ints32(key string, i []int32) Context { + c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int64 adds the field key with i as a int64 to the logger context. +func (c Context) Int64(key string, i int64) Context { + c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints64 adds the field key with i as a []int64 to the logger context. +func (c Context) Ints64(key string, i []int64) Context { + c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint adds the field key with i as a uint to the logger context. +func (c Context) Uint(key string, i uint) Context { + c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints adds the field key with i as a []uint to the logger context. +func (c Context) Uints(key string, i []uint) Context { + c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint8 adds the field key with i as a uint8 to the logger context. +func (c Context) Uint8(key string, i uint8) Context { + c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints8 adds the field key with i as a []uint8 to the logger context. +func (c Context) Uints8(key string, i []uint8) Context { + c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint16 adds the field key with i as a uint16 to the logger context. +func (c Context) Uint16(key string, i uint16) Context { + c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints16 adds the field key with i as a []uint16 to the logger context. +func (c Context) Uints16(key string, i []uint16) Context { + c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint32 adds the field key with i as a uint32 to the logger context. +func (c Context) Uint32(key string, i uint32) Context { + c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints32 adds the field key with i as a []uint32 to the logger context. +func (c Context) Uints32(key string, i []uint32) Context { + c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint64 adds the field key with i as a uint64 to the logger context. +func (c Context) Uint64(key string, i uint64) Context { + c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints64 adds the field key with i as a []uint64 to the logger context. +func (c Context) Uints64(key string, i []uint64) Context { + c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Float32 adds the field key with f as a float32 to the logger context. +func (c Context) Float32(key string, f float32) Context { + c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats32 adds the field key with f as a []float32 to the logger context. +func (c Context) Floats32(key string, f []float32) Context { + c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Float64 adds the field key with f as a float64 to the logger context. +func (c Context) Float64(key string, f float64) Context { + c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats64 adds the field key with f as a []float64 to the logger context. +func (c Context) Floats64(key string, f []float64) Context { + c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) + return c +} + +type timestampHook struct{} + +func (ts timestampHook) Run(e *Event, level Level, msg string) { + e.Timestamp() +} + +var th = timestampHook{} + +// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key. +// To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Context has one already. +func (c Context) Timestamp() Context { + c.l = c.l.Hook(th) + return c +} + +// Time adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Time(key string, t time.Time) Context { + c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Times adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Times(key string, t []time.Time) Context { + c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Dur adds the fields key with d divided by unit and stored as a float. +func (c Context) Dur(key string, d time.Duration) Context { + c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Durs adds the fields key with d divided by unit and stored as a float. +func (c Context) Durs(key string, d []time.Duration) Context { + c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Interface adds the field key with obj marshaled using reflection. +func (c Context) Interface(key string, i interface{}) Context { + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) + return c +} + +type callerHook struct { + callerSkipFrameCount int +} + +func newCallerHook(skipFrameCount int) callerHook { + return callerHook{callerSkipFrameCount: skipFrameCount} +} + +func (ch callerHook) Run(e *Event, level Level, msg string) { + switch ch.callerSkipFrameCount { + case useGlobalSkipFrameCount: + // Extra frames to skip (added by hook infra). + e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount) + default: + // Extra frames to skip (added by hook infra). + e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount) + } +} + +// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run +// to use the global CallerSkipFrameCount. +const useGlobalSkipFrameCount = math.MinInt32 + +// ch is the default caller hook using the global CallerSkipFrameCount. +var ch = newCallerHook(useGlobalSkipFrameCount) + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +func (c Context) Caller() Context { + c.l = c.l.Hook(ch) + return c +} + +// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key. +// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger. +// If set to -1 the global CallerSkipFrameCount will be used. +func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context { + c.l = c.l.Hook(newCallerHook(skipFrameCount)) + return c +} + +// Stack enables stack trace printing for the error passed to Err(). +func (c Context) Stack() Context { + c.l.stack = true + return c +} + +// IPAddr adds IPv4 or IPv6 Address to the context +func (c Context) IPAddr(key string, ip net.IP) Context { + c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) + return c +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context +func (c Context) IPPrefix(key string, pfx net.IPNet) Context { + c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) + return c +} + +// MACAddr adds MAC address to the context +func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { + c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) + return c +} diff --git a/vendor/github.com/rs/zerolog/ctx.go b/vendor/github.com/rs/zerolog/ctx.go new file mode 100644 index 00000000..44d3f4bc --- /dev/null +++ b/vendor/github.com/rs/zerolog/ctx.go @@ -0,0 +1,51 @@ +package zerolog + +import ( + "context" +) + +var disabledLogger *Logger + +func init() { + SetGlobalLevel(TraceLevel) + l := Nop() + disabledLogger = &l +} + +type ctxKey struct{} + +// WithContext returns a copy of ctx with l associated. If an instance of Logger +// is already in the context, the context is not updated. +// +// For instance, to add a field to an existing logger in the context, use this +// notation: +// +// ctx := r.Context() +// l := zerolog.Ctx(ctx) +// l.UpdateContext(func(c Context) Context { +// return c.Str("bar", "baz") +// }) +func (l Logger) WithContext(ctx context.Context) context.Context { + if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok { + if lp == &l { + // Do not store same logger. + return ctx + } + } else if l.level == Disabled { + // Do not store disabled logger. + return ctx + } + return context.WithValue(ctx, ctxKey{}, &l) +} + +// Ctx returns the Logger associated with the ctx. If no logger +// is associated, DefaultContextLogger is returned, unless DefaultContextLogger +// is nil, in which case a disabled logger is returned. +func Ctx(ctx context.Context) *Logger { + if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { + return l + } else if l = DefaultContextLogger; l != nil { + return l + } + return disabledLogger +} diff --git a/vendor/github.com/rs/zerolog/encoder.go b/vendor/github.com/rs/zerolog/encoder.go new file mode 100644 index 00000000..09b24e80 --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder.go @@ -0,0 +1,56 @@ +package zerolog + +import ( + "net" + "time" +) + +type encoder interface { + AppendArrayDelim(dst []byte) []byte + AppendArrayEnd(dst []byte) []byte + AppendArrayStart(dst []byte) []byte + AppendBeginMarker(dst []byte) []byte + AppendBool(dst []byte, val bool) []byte + AppendBools(dst []byte, vals []bool) []byte + AppendBytes(dst, s []byte) []byte + AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte + AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte + AppendEndMarker(dst []byte) []byte + AppendFloat32(dst []byte, val float32) []byte + AppendFloat64(dst []byte, val float64) []byte + AppendFloats32(dst []byte, vals []float32) []byte + AppendFloats64(dst []byte, vals []float64) []byte + AppendHex(dst, s []byte) []byte + AppendIPAddr(dst []byte, ip net.IP) []byte + AppendIPPrefix(dst []byte, pfx net.IPNet) []byte + AppendInt(dst []byte, val int) []byte + AppendInt16(dst []byte, val int16) []byte + AppendInt32(dst []byte, val int32) []byte + AppendInt64(dst []byte, val int64) []byte + AppendInt8(dst []byte, val int8) []byte + AppendInterface(dst []byte, i interface{}) []byte + AppendInts(dst []byte, vals []int) []byte + AppendInts16(dst []byte, vals []int16) []byte + AppendInts32(dst []byte, vals []int32) []byte + AppendInts64(dst []byte, vals []int64) []byte + AppendInts8(dst []byte, vals []int8) []byte + AppendKey(dst []byte, key string) []byte + AppendLineBreak(dst []byte) []byte + AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte + AppendNil(dst []byte) []byte + AppendObjectData(dst []byte, o []byte) []byte + AppendString(dst []byte, s string) []byte + AppendStrings(dst []byte, vals []string) []byte + AppendTime(dst []byte, t time.Time, format string) []byte + AppendTimes(dst []byte, vals []time.Time, format string) []byte + AppendUint(dst []byte, val uint) []byte + AppendUint16(dst []byte, val uint16) []byte + AppendUint32(dst []byte, val uint32) []byte + AppendUint64(dst []byte, val uint64) []byte + AppendUint8(dst []byte, val uint8) []byte + AppendUints(dst []byte, vals []uint) []byte + AppendUints16(dst []byte, vals []uint16) []byte + AppendUints32(dst []byte, vals []uint32) []byte + AppendUints64(dst []byte, vals []uint64) []byte + AppendUints8(dst []byte, vals []uint8) []byte +} diff --git a/vendor/github.com/rs/zerolog/encoder_cbor.go b/vendor/github.com/rs/zerolog/encoder_cbor.go new file mode 100644 index 00000000..7b0dafef --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder_cbor.go @@ -0,0 +1,42 @@ +// +build binary_log + +package zerolog + +// This file contains bindings to do binary encoding. + +import ( + "github.com/rs/zerolog/internal/cbor" +) + +var ( + _ encoder = (*cbor.Encoder)(nil) + + enc = cbor.Encoder{} +) + +func init() { + // using closure to reflect the changes at runtime. + cbor.JSONMarshalFunc = func(v interface{}) ([]byte, error) { + return InterfaceMarshalFunc(v) + } +} + +func appendJSON(dst []byte, j []byte) []byte { + return cbor.AppendEmbeddedJSON(dst, j) +} + +// decodeIfBinaryToString - converts a binary formatted log msg to a +// JSON formatted String Log message. +func decodeIfBinaryToString(in []byte) string { + return cbor.DecodeIfBinaryToString(in) +} + +func decodeObjectToStr(in []byte) string { + return cbor.DecodeObjectToStr(in) +} + +// decodeIfBinaryToBytes - converts a binary formatted log msg to a +// JSON formatted Bytes Log message. +func decodeIfBinaryToBytes(in []byte) []byte { + return cbor.DecodeIfBinaryToBytes(in) +} diff --git a/vendor/github.com/rs/zerolog/encoder_json.go b/vendor/github.com/rs/zerolog/encoder_json.go new file mode 100644 index 00000000..0e0450e2 --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder_json.go @@ -0,0 +1,39 @@ +// +build !binary_log + +package zerolog + +// encoder_json.go file contains bindings to generate +// JSON encoded byte stream. + +import ( + "github.com/rs/zerolog/internal/json" +) + +var ( + _ encoder = (*json.Encoder)(nil) + + enc = json.Encoder{} +) + +func init() { + // using closure to reflect the changes at runtime. + json.JSONMarshalFunc = func(v interface{}) ([]byte, error) { + return InterfaceMarshalFunc(v) + } +} + +func appendJSON(dst []byte, j []byte) []byte { + return append(dst, j...) +} + +func decodeIfBinaryToString(in []byte) string { + return string(in) +} + +func decodeObjectToStr(in []byte) string { + return string(in) +} + +func decodeIfBinaryToBytes(in []byte) []byte { + return in +} diff --git a/vendor/github.com/rs/zerolog/event.go b/vendor/github.com/rs/zerolog/event.go new file mode 100644 index 00000000..0e2eaa68 --- /dev/null +++ b/vendor/github.com/rs/zerolog/event.go @@ -0,0 +1,780 @@ +package zerolog + +import ( + "fmt" + "net" + "os" + "runtime" + "sync" + "time" +) + +var eventPool = &sync.Pool{ + New: func() interface{} { + return &Event{ + buf: make([]byte, 0, 500), + } + }, +} + +// Event represents a log event. It is instanced by one of the level method of +// Logger and finalized by the Msg or Msgf method. +type Event struct { + buf []byte + w LevelWriter + level Level + done func(msg string) + stack bool // enable error stack trace + ch []Hook // hooks from context + skipFrame int // The number of additional frames to skip when printing the caller. +} + +func putEvent(e *Event) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(e.buf) > maxSize { + return + } + eventPool.Put(e) +} + +// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Object methods. +type LogObjectMarshaler interface { + MarshalZerologObject(e *Event) +} + +// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Array methods. +type LogArrayMarshaler interface { + MarshalZerologArray(a *Array) +} + +func newEvent(w LevelWriter, level Level) *Event { + e := eventPool.Get().(*Event) + e.buf = e.buf[:0] + e.ch = nil + e.buf = enc.AppendBeginMarker(e.buf) + e.w = w + e.level = level + e.stack = false + e.skipFrame = 0 + return e +} + +func (e *Event) write() (err error) { + if e == nil { + return nil + } + if e.level != Disabled { + e.buf = enc.AppendEndMarker(e.buf) + e.buf = enc.AppendLineBreak(e.buf) + if e.w != nil { + _, err = e.w.WriteLevel(e.level, e.buf) + } + } + putEvent(e) + return +} + +// Enabled return false if the *Event is going to be filtered out by +// log level or sampling. +func (e *Event) Enabled() bool { + return e != nil && e.level != Disabled +} + +// Discard disables the event so Msg(f) won't print it. +func (e *Event) Discard() *Event { + if e == nil { + return e + } + e.level = Disabled + return nil +} + +// Msg sends the *Event with msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msg twice can have unexpected result. +func (e *Event) Msg(msg string) { + if e == nil { + return + } + e.msg(msg) +} + +// Send is equivalent to calling Msg(""). +// +// NOTICE: once this method is called, the *Event should be disposed. +func (e *Event) Send() { + if e == nil { + return + } + e.msg("") +} + +// Msgf sends the event with formatted msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msgf twice can have unexpected result. +func (e *Event) Msgf(format string, v ...interface{}) { + if e == nil { + return + } + e.msg(fmt.Sprintf(format, v...)) +} + +func (e *Event) MsgFunc(createMsg func() string) { + if e == nil { + return + } + e.msg(createMsg()) +} + +func (e *Event) msg(msg string) { + for _, hook := range e.ch { + hook.Run(e, e.level, msg) + } + if msg != "" { + e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) + } + if e.done != nil { + defer e.done(msg) + } + if err := e.write(); err != nil { + if ErrorHandler != nil { + ErrorHandler(err) + } else { + fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) + } + } +} + +// Fields is a helper function to use a map or slice to set fields using type assertion. +// Only map[string]interface{} and []interface{} are accepted. []interface{} must +// alternate string keys and arbitrary values, and extraneous ones are ignored. +func (e *Event) Fields(fields interface{}) *Event { + if e == nil { + return e + } + e.buf = appendFields(e.buf, fields) + return e +} + +// Dict adds the field key with a dict to the event context. +// Use zerolog.Dict() to create the dictionary. +func (e *Event) Dict(key string, dict *Event) *Event { + if e == nil { + return e + } + dict.buf = enc.AppendEndMarker(dict.buf) + e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) + putEvent(dict) + return e +} + +// Dict creates an Event to be used with the *Event.Dict method. +// Call usual field methods like Str, Int etc to add fields to this +// event and give it as argument the *Event.Dict method. +func Dict() *Event { + return newEvent(nil, 0) +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + e.buf = a.write(e.buf) + return e +} + +func (e *Event) appendObject(obj LogObjectMarshaler) { + e.buf = enc.AppendBeginMarker(e.buf) + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + if obj == nil { + e.buf = enc.AppendNil(e.buf) + + return e + } + + e.appendObject(obj) + return e +} + +// Func allows an anonymous func to run only if the event is enabled. +func (e *Event) Func(f func(e *Event)) *Event { + if e != nil && e.Enabled() { + f(e) + } + return e +} + +// EmbedObject marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + if obj == nil { + return e + } + obj.MarshalZerologObject(e) + return e +} + +// Str adds the field key with val as a string to the *Event context. +func (e *Event) Str(key, val string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) + return e +} + +// Strs adds the field key with vals as a []string to the *Event context. +func (e *Event) Strs(key string, vals []string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) + return e +} + +// Stringer adds the field key with val.String() (or null if val is nil) +// to the *Event context. +func (e *Event) Stringer(key string, val fmt.Stringer) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStringer(enc.AppendKey(e.buf, key), val) + return e +} + +// Stringers adds the field key with vals where each individual val +// is used as val.String() (or null if val is empty) to the *Event +// context. +func (e *Event) Stringers(key string, vals []fmt.Stringer) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStringers(enc.AppendKey(e.buf, key), vals) + return e +} + +// Bytes adds the field key with val as a string to the *Event context. +// +// Runes outside of normal ASCII ranges will be hex-encoded in the resulting +// JSON. +func (e *Event) Bytes(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) + return e +} + +// Hex adds the field key with val as a hex string to the *Event context. +func (e *Event) Hex(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) + return e +} + +// RawJSON adds already encoded JSON to the log line under key. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (e *Event) RawJSON(key string, b []byte) *Event { + if e == nil { + return e + } + e.buf = appendJSON(enc.AppendKey(e.buf, key), b) + return e +} + +// AnErr adds the field key with serialized err to the *Event context. +// If err is nil, no field is added. +func (e *Event) AnErr(key string, err error) *Event { + if e == nil { + return e + } + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return e + case LogObjectMarshaler: + return e.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return e + } else { + return e.Str(key, m.Error()) + } + case string: + return e.Str(key, m) + default: + return e.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// *Event context. +func (e *Event) Errs(key string, errs []error) *Event { + if e == nil { + return e + } + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + arr = arr.Err(m) + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return e.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the *Event context. +// If err is nil, no field is added. +// +// To customize the key name, change zerolog.ErrorFieldName. +// +// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined, +// the err is passed to ErrorStackMarshaler and the result is appended to the +// zerolog.ErrorStackFieldName. +func (e *Event) Err(err error) *Event { + if e == nil { + return e + } + if e.stack && ErrorStackMarshaler != nil { + switch m := ErrorStackMarshaler(err).(type) { + case nil: + case LogObjectMarshaler: + e.Object(ErrorStackFieldName, m) + case error: + if m != nil && !isNilValue(m) { + e.Str(ErrorStackFieldName, m.Error()) + } + case string: + e.Str(ErrorStackFieldName, m) + default: + e.Interface(ErrorStackFieldName, m) + } + } + return e.AnErr(ErrorFieldName, err) +} + +// Stack enables stack trace printing for the error passed to Err(). +// +// ErrorStackMarshaler must be set for this method to do something. +func (e *Event) Stack() *Event { + if e != nil { + e.stack = true + } + return e +} + +// Bool adds the field key with val as a bool to the *Event context. +func (e *Event) Bool(key string, b bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) + return e +} + +// Bools adds the field key with val as a []bool to the *Event context. +func (e *Event) Bools(key string, b []bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) + return e +} + +// Int adds the field key with i as a int to the *Event context. +func (e *Event) Int(key string, i int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints adds the field key with i as a []int to the *Event context. +func (e *Event) Ints(key string, i []int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) + return e +} + +// Int8 adds the field key with i as a int8 to the *Event context. +func (e *Event) Int8(key string, i int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Ints8(key string, i []int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) + return e +} + +// Int16 adds the field key with i as a int16 to the *Event context. +func (e *Event) Int16(key string, i int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Ints16(key string, i []int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) + return e +} + +// Int32 adds the field key with i as a int32 to the *Event context. +func (e *Event) Int32(key string, i int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Ints32(key string, i []int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) + return e +} + +// Int64 adds the field key with i as a int64 to the *Event context. +func (e *Event) Int64(key string, i int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Ints64(key string, i []int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint adds the field key with i as a uint to the *Event context. +func (e *Event) Uint(key string, i uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints adds the field key with i as a []int to the *Event context. +func (e *Event) Uints(key string, i []uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint8 adds the field key with i as a uint8 to the *Event context. +func (e *Event) Uint8(key string, i uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Uints8(key string, i []uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint16 adds the field key with i as a uint16 to the *Event context. +func (e *Event) Uint16(key string, i uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Uints16(key string, i []uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint32 adds the field key with i as a uint32 to the *Event context. +func (e *Event) Uint32(key string, i uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Uints32(key string, i []uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint64 adds the field key with i as a uint64 to the *Event context. +func (e *Event) Uint64(key string, i uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Uints64(key string, i []uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) + return e +} + +// Float32 adds the field key with f as a float32 to the *Event context. +func (e *Event) Float32(key string, f float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats32 adds the field key with f as a []float32 to the *Event context. +func (e *Event) Floats32(key string, f []float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) + return e +} + +// Float64 adds the field key with f as a float64 to the *Event context. +func (e *Event) Float64(key string, f float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats64 adds the field key with f as a []float64 to the *Event context. +func (e *Event) Floats64(key string, f []float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) + return e +} + +// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key. +// To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one +// already. +func (e *Event) Timestamp() *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) + return e +} + +// Time adds the field key with t formatted as string using zerolog.TimeFieldFormat. +func (e *Event) Time(key string, t time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Times adds the field key with t formatted as string using zerolog.TimeFieldFormat. +func (e *Event) Times(key string, t []time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Dur(key string, d time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Durs(key string, d []time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// TimeDiff adds the field key with positive duration between time t and start. +// If time t is not greater than start, duration will be 0. +// Duration format follows the same principle as Dur(). +func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { + if e == nil { + return e + } + var d time.Duration + if t.After(start) { + d = t.Sub(start) + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Interface adds the field key with i marshaled using reflection. +func (e *Event) Interface(key string, i interface{}) *Event { + if e == nil { + return e + } + if obj, ok := i.(LogObjectMarshaler); ok { + return e.Object(key, obj) + } + e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) + return e +} + +// CallerSkipFrame instructs any future Caller calls to skip the specified number of frames. +// This includes those added via hooks from the context. +func (e *Event) CallerSkipFrame(skip int) *Event { + if e == nil { + return e + } + e.skipFrame += skip + return e +} + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +// The argument skip is the number of stack frames to ascend +// Skip If not passed, use the global variable CallerSkipFrameCount +func (e *Event) Caller(skip ...int) *Event { + sk := CallerSkipFrameCount + if len(skip) > 0 { + sk = skip[0] + CallerSkipFrameCount + } + return e.caller(sk) +} + +func (e *Event) caller(skip int) *Event { + if e == nil { + return e + } + pc, file, line, ok := runtime.Caller(skip + e.skipFrame) + if !ok { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(pc, file, line)) + return e +} + +// IPAddr adds IPv4 or IPv6 Address to the event +func (e *Event) IPAddr(key string, ip net.IP) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) + return e +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event +func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) + return e +} + +// MACAddr adds MAC address to the event +func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { + if e == nil { + return e + } + e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) + return e +} diff --git a/vendor/github.com/rs/zerolog/fields.go b/vendor/github.com/rs/zerolog/fields.go new file mode 100644 index 00000000..c1eb5ce7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/fields.go @@ -0,0 +1,277 @@ +package zerolog + +import ( + "encoding/json" + "net" + "sort" + "time" + "unsafe" +) + +func isNilValue(i interface{}) bool { + return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 +} + +func appendFields(dst []byte, fields interface{}) []byte { + switch fields := fields.(type) { + case []interface{}: + if n := len(fields); n&0x1 == 1 { // odd number + fields = fields[:n-1] + } + dst = appendFieldList(dst, fields) + case map[string]interface{}: + keys := make([]string, 0, len(fields)) + for key := range fields { + keys = append(keys, key) + } + sort.Strings(keys) + kv := make([]interface{}, 2) + for _, key := range keys { + kv[0], kv[1] = key, fields[key] + dst = appendFieldList(dst, kv) + } + } + return dst +} + +func appendFieldList(dst []byte, kvList []interface{}) []byte { + for i, n := 0, len(kvList); i < n; i += 2 { + key, val := kvList[i], kvList[i+1] + if key, ok := key.(string); ok { + dst = enc.AppendKey(dst, key) + } else { + continue + } + if val, ok := val.(LogObjectMarshaler); ok { + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(val) + dst = append(dst, e.buf...) + putEvent(e) + continue + } + switch val := val.(type) { + case string: + dst = enc.AppendString(dst, val) + case []byte: + dst = enc.AppendBytes(dst, val) + case error: + switch m := ErrorMarshalFunc(val).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + case []error: + dst = enc.AppendArrayStart(dst) + for i, err := range val { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + + if i < (len(val) - 1) { + enc.AppendArrayDelim(dst) + } + } + dst = enc.AppendArrayEnd(dst) + case bool: + dst = enc.AppendBool(dst, val) + case int: + dst = enc.AppendInt(dst, val) + case int8: + dst = enc.AppendInt8(dst, val) + case int16: + dst = enc.AppendInt16(dst, val) + case int32: + dst = enc.AppendInt32(dst, val) + case int64: + dst = enc.AppendInt64(dst, val) + case uint: + dst = enc.AppendUint(dst, val) + case uint8: + dst = enc.AppendUint8(dst, val) + case uint16: + dst = enc.AppendUint16(dst, val) + case uint32: + dst = enc.AppendUint32(dst, val) + case uint64: + dst = enc.AppendUint64(dst, val) + case float32: + dst = enc.AppendFloat32(dst, val) + case float64: + dst = enc.AppendFloat64(dst, val) + case time.Time: + dst = enc.AppendTime(dst, val, TimeFieldFormat) + case time.Duration: + dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) + case *string: + if val != nil { + dst = enc.AppendString(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *bool: + if val != nil { + dst = enc.AppendBool(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int: + if val != nil { + dst = enc.AppendInt(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int8: + if val != nil { + dst = enc.AppendInt8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int16: + if val != nil { + dst = enc.AppendInt16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int32: + if val != nil { + dst = enc.AppendInt32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int64: + if val != nil { + dst = enc.AppendInt64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint: + if val != nil { + dst = enc.AppendUint(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint8: + if val != nil { + dst = enc.AppendUint8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint16: + if val != nil { + dst = enc.AppendUint16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint32: + if val != nil { + dst = enc.AppendUint32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint64: + if val != nil { + dst = enc.AppendUint64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float32: + if val != nil { + dst = enc.AppendFloat32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float64: + if val != nil { + dst = enc.AppendFloat64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *time.Time: + if val != nil { + dst = enc.AppendTime(dst, *val, TimeFieldFormat) + } else { + dst = enc.AppendNil(dst) + } + case *time.Duration: + if val != nil { + dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) + } else { + dst = enc.AppendNil(dst) + } + case []string: + dst = enc.AppendStrings(dst, val) + case []bool: + dst = enc.AppendBools(dst, val) + case []int: + dst = enc.AppendInts(dst, val) + case []int8: + dst = enc.AppendInts8(dst, val) + case []int16: + dst = enc.AppendInts16(dst, val) + case []int32: + dst = enc.AppendInts32(dst, val) + case []int64: + dst = enc.AppendInts64(dst, val) + case []uint: + dst = enc.AppendUints(dst, val) + // case []uint8: + // dst = enc.AppendUints8(dst, val) + case []uint16: + dst = enc.AppendUints16(dst, val) + case []uint32: + dst = enc.AppendUints32(dst, val) + case []uint64: + dst = enc.AppendUints64(dst, val) + case []float32: + dst = enc.AppendFloats32(dst, val) + case []float64: + dst = enc.AppendFloats64(dst, val) + case []time.Time: + dst = enc.AppendTimes(dst, val, TimeFieldFormat) + case []time.Duration: + dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) + case nil: + dst = enc.AppendNil(dst) + case net.IP: + dst = enc.AppendIPAddr(dst, val) + case net.IPNet: + dst = enc.AppendIPPrefix(dst, val) + case net.HardwareAddr: + dst = enc.AppendMACAddr(dst, val) + case json.RawMessage: + dst = appendJSON(dst, val) + default: + dst = enc.AppendInterface(dst, val) + } + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/globals.go b/vendor/github.com/rs/zerolog/globals.go new file mode 100644 index 00000000..e1067deb --- /dev/null +++ b/vendor/github.com/rs/zerolog/globals.go @@ -0,0 +1,142 @@ +package zerolog + +import ( + "encoding/json" + "strconv" + "sync/atomic" + "time" +) + +const ( + // TimeFormatUnix defines a time format that makes time fields to be + // serialized as Unix timestamp integers. + TimeFormatUnix = "" + + // TimeFormatUnixMs defines a time format that makes time fields to be + // serialized as Unix timestamp integers in milliseconds. + TimeFormatUnixMs = "UNIXMS" + + // TimeFormatUnixMicro defines a time format that makes time fields to be + // serialized as Unix timestamp integers in microseconds. + TimeFormatUnixMicro = "UNIXMICRO" + + // TimeFormatUnixNano defines a time format that makes time fields to be + // serialized as Unix timestamp integers in nanoseconds. + TimeFormatUnixNano = "UNIXNANO" +) + +var ( + // TimestampFieldName is the field name used for the timestamp field. + TimestampFieldName = "time" + + // LevelFieldName is the field name used for the level field. + LevelFieldName = "level" + + // LevelTraceValue is the value used for the trace level field. + LevelTraceValue = "trace" + // LevelDebugValue is the value used for the debug level field. + LevelDebugValue = "debug" + // LevelInfoValue is the value used for the info level field. + LevelInfoValue = "info" + // LevelWarnValue is the value used for the warn level field. + LevelWarnValue = "warn" + // LevelErrorValue is the value used for the error level field. + LevelErrorValue = "error" + // LevelFatalValue is the value used for the fatal level field. + LevelFatalValue = "fatal" + // LevelPanicValue is the value used for the panic level field. + LevelPanicValue = "panic" + + // LevelFieldMarshalFunc allows customization of global level field marshaling. + LevelFieldMarshalFunc = func(l Level) string { + return l.String() + } + + // MessageFieldName is the field name used for the message field. + MessageFieldName = "message" + + // ErrorFieldName is the field name used for error fields. + ErrorFieldName = "error" + + // CallerFieldName is the field name used for caller field. + CallerFieldName = "caller" + + // CallerSkipFrameCount is the number of stack frames to skip to find the caller. + CallerSkipFrameCount = 2 + + // CallerMarshalFunc allows customization of global caller marshaling + CallerMarshalFunc = func(pc uintptr, file string, line int) string { + return file + ":" + strconv.Itoa(line) + } + + // ErrorStackFieldName is the field name used for error stacks. + ErrorStackFieldName = "stack" + + // ErrorStackMarshaler extract the stack from err if any. + ErrorStackMarshaler func(err error) interface{} + + // ErrorMarshalFunc allows customization of global error marshaling + ErrorMarshalFunc = func(err error) interface{} { + return err + } + + // InterfaceMarshalFunc allows customization of interface marshaling. + // Default: "encoding/json.Marshal" + InterfaceMarshalFunc = json.Marshal + + // TimeFieldFormat defines the time format of the Time field type. If set to + // TimeFormatUnix, TimeFormatUnixMs, TimeFormatUnixMicro or TimeFormatUnixNano, the time is formatted as a UNIX + // timestamp as integer. + TimeFieldFormat = time.RFC3339 + + // TimestampFunc defines the function called to generate a timestamp. + TimestampFunc = time.Now + + // DurationFieldUnit defines the unit for time.Duration type fields added + // using the Dur method. + DurationFieldUnit = time.Millisecond + + // DurationFieldInteger renders Dur fields as integer instead of float if + // set to true. + DurationFieldInteger = false + + // ErrorHandler is called whenever zerolog fails to write an event on its + // output. If not set, an error is printed on the stderr. This handler must + // be thread safe and non-blocking. + ErrorHandler func(err error) + + // DefaultContextLogger is returned from Ctx() if there is no logger associated + // with the context. + DefaultContextLogger *Logger +) + +var ( + gLevel = new(int32) + disableSampling = new(int32) +) + +// SetGlobalLevel sets the global override for log level. If this +// values is raised, all Loggers will use at least this value. +// +// To globally disable logs, set GlobalLevel to Disabled. +func SetGlobalLevel(l Level) { + atomic.StoreInt32(gLevel, int32(l)) +} + +// GlobalLevel returns the current global log level +func GlobalLevel() Level { + return Level(atomic.LoadInt32(gLevel)) +} + +// DisableSampling will disable sampling in all Loggers if true. +func DisableSampling(v bool) { + var i int32 + if v { + i = 1 + } + atomic.StoreInt32(disableSampling, i) +} + +func samplingDisabled() bool { + return atomic.LoadInt32(disableSampling) == 1 +} diff --git a/vendor/github.com/rs/zerolog/go112.go b/vendor/github.com/rs/zerolog/go112.go new file mode 100644 index 00000000..e7b5a1bd --- /dev/null +++ b/vendor/github.com/rs/zerolog/go112.go @@ -0,0 +1,7 @@ +// +build go1.12 + +package zerolog + +// Since go 1.12, some auto generated init functions are hidden from +// runtime.Caller. +const contextCallerSkipFrameCount = 2 diff --git a/vendor/github.com/rs/zerolog/hook.go b/vendor/github.com/rs/zerolog/hook.go new file mode 100644 index 00000000..ec6effc1 --- /dev/null +++ b/vendor/github.com/rs/zerolog/hook.go @@ -0,0 +1,64 @@ +package zerolog + +// Hook defines an interface to a log hook. +type Hook interface { + // Run runs the hook with the event. + Run(e *Event, level Level, message string) +} + +// HookFunc is an adaptor to allow the use of an ordinary function +// as a Hook. +type HookFunc func(e *Event, level Level, message string) + +// Run implements the Hook interface. +func (h HookFunc) Run(e *Event, level Level, message string) { + h(e, level, message) +} + +// LevelHook applies a different hook for each level. +type LevelHook struct { + NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook +} + +// Run implements the Hook interface. +func (h LevelHook) Run(e *Event, level Level, message string) { + switch level { + case TraceLevel: + if h.TraceHook != nil { + h.TraceHook.Run(e, level, message) + } + case DebugLevel: + if h.DebugHook != nil { + h.DebugHook.Run(e, level, message) + } + case InfoLevel: + if h.InfoHook != nil { + h.InfoHook.Run(e, level, message) + } + case WarnLevel: + if h.WarnHook != nil { + h.WarnHook.Run(e, level, message) + } + case ErrorLevel: + if h.ErrorHook != nil { + h.ErrorHook.Run(e, level, message) + } + case FatalLevel: + if h.FatalHook != nil { + h.FatalHook.Run(e, level, message) + } + case PanicLevel: + if h.PanicHook != nil { + h.PanicHook.Run(e, level, message) + } + case NoLevel: + if h.NoLevelHook != nil { + h.NoLevelHook.Run(e, level, message) + } + } +} + +// NewLevelHook returns a new LevelHook. +func NewLevelHook() LevelHook { + return LevelHook{} +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/README.md b/vendor/github.com/rs/zerolog/internal/cbor/README.md new file mode 100644 index 00000000..92c2e8c7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/README.md @@ -0,0 +1,56 @@ +## Reference: + CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) + +## Comparison of JSON vs CBOR + +Two main areas of reduction are: + +1. CPU usage to write a log msg +2. Size (in bytes) of log messages. + + +CPU Usage savings are below: +``` +name JSON time/op CBOR time/op delta +Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) +ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) +ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) +LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) +LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) +LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) +LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) +LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) +LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) +LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) +LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) +LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) +LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) +LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) +LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) +LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) +LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) +LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) +LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) +LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) +LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) +LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) +LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) +LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) +``` + +Log message size savings is greatly dependent on the number and type of fields in the log message. +Assuming this log message (with an Integer, timestamp and string, in addition to level). + +`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` + +Two measurements were done for the log file sizes - one without any compression, second +using [compress/zlib](https://golang.org/pkg/compress/zlib/). + +Results for 10,000 log messages: + +| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | +| :--- | :---: | :---: | +| JSON | 920 | 28 | +| CBOR | 550 | 28 | + +The example used to calculate the above data is available in [Examples](examples). diff --git a/vendor/github.com/rs/zerolog/internal/cbor/base.go b/vendor/github.com/rs/zerolog/internal/cbor/base.go new file mode 100644 index 00000000..51fe86c9 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/base.go @@ -0,0 +1,19 @@ +package cbor + +// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. +// Making it package level instead of embedded in Encoder brings +// some extra efforts at importing, but avoids value copy when the functions +// of Encoder being invoked. +// DO REMEMBER to set this variable at importing, or +// you might get a nil pointer dereference panic at runtime. +var JSONMarshalFunc func(v interface{}) ([]byte, error) + +type Encoder struct{} + +// AppendKey adds a key (string) to the binary encoded log message +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if len(dst) < 1 { + dst = e.AppendBeginMarker(dst) + } + return e.AppendString(dst, key) +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/cbor.go b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go new file mode 100644 index 00000000..bc54e37a --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go @@ -0,0 +1,101 @@ +// Package cbor provides primitives for storing different data +// in the CBOR (binary) format. CBOR is defined in RFC7049. +package cbor + +import "time" + +const ( + majorOffset = 5 + additionalMax = 23 + + // Non Values. + additionalTypeBoolFalse byte = 20 + additionalTypeBoolTrue byte = 21 + additionalTypeNull byte = 22 + + // Integer (+ve and -ve) Sub-types. + additionalTypeIntUint8 byte = 24 + additionalTypeIntUint16 byte = 25 + additionalTypeIntUint32 byte = 26 + additionalTypeIntUint64 byte = 27 + + // Float Sub-types. + additionalTypeFloat16 byte = 25 + additionalTypeFloat32 byte = 26 + additionalTypeFloat64 byte = 27 + additionalTypeBreak byte = 31 + + // Tag Sub-types. + additionalTypeTimestamp byte = 01 + + // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml + additionalTypeTagNetworkAddr uint16 = 260 + additionalTypeTagNetworkPrefix uint16 = 261 + additionalTypeEmbeddedJSON uint16 = 262 + additionalTypeTagHexString uint16 = 263 + + // Unspecified number of elements. + additionalTypeInfiniteCount byte = 31 +) +const ( + majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 + majorTypeNegativeInt // Major type 1 + majorTypeByteString // Major type 2 + majorTypeUtf8String // Major type 3 + majorTypeArray // Major type 4 + majorTypeMap // Major type 5 + majorTypeTags // Major type 6 + majorTypeSimpleAndFloat // Major type 7 +) + +const ( + maskOutAdditionalType byte = (7 << majorOffset) + maskOutMajorType byte = 31 +) + +const ( + float32Nan = "\xfa\x7f\xc0\x00\x00" + float32PosInfinity = "\xfa\x7f\x80\x00\x00" + float32NegInfinity = "\xfa\xff\x80\x00\x00" + float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" + float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" + float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" +) + +// IntegerTimeFieldFormat indicates the format of timestamp decoded +// from an integer (time in seconds). +var IntegerTimeFieldFormat = time.RFC3339 + +// NanoTimeFieldFormat indicates the format of timestamp decoded +// from a float value (time in seconds and nanoseconds). +var NanoTimeFieldFormat = time.RFC3339Nano + +func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { + byteCount := 8 + var minor byte + switch { + case number < 256: + byteCount = 1 + minor = additionalTypeIntUint8 + + case number < 65536: + byteCount = 2 + minor = additionalTypeIntUint16 + + case number < 4294967296: + byteCount = 4 + minor = additionalTypeIntUint32 + + default: + byteCount = 8 + minor = additionalTypeIntUint64 + + } + + dst = append(dst, major|minor) + byteCount-- + for ; byteCount >= 0; byteCount-- { + dst = append(dst, byte(number>>(uint(byteCount)*8))) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go new file mode 100644 index 00000000..fc16f98c --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go @@ -0,0 +1,614 @@ +package cbor + +// This file contains code to decode a stream of CBOR Data into JSON. + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "net" + "runtime" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +var decodeTimeZone *time.Location + +const hexTable = "0123456789abcdef" + +const isFloat32 = 4 +const isFloat64 = 8 + +func readNBytes(src *bufio.Reader, n int) []byte { + ret := make([]byte, n) + for i := 0; i < n; i++ { + ch, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) + } + ret[i] = ch + } + return ret +} + +func readByte(src *bufio.Reader) byte { + b, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) + } + return b +} + +func decodeIntAdditionalType(src *bufio.Reader, minor byte) int64 { + val := int64(0) + if minor <= 23 { + val = int64(minor) + } else { + bytesToRead := 0 + switch minor { + case additionalTypeIntUint8: + bytesToRead = 1 + case additionalTypeIntUint16: + bytesToRead = 2 + case additionalTypeIntUint32: + bytesToRead = 4 + case additionalTypeIntUint64: + bytesToRead = 8 + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) + } + pb := readNBytes(src, bytesToRead) + for i := 0; i < bytesToRead; i++ { + val = val * 256 + val += int64(pb[i]) + } + } + return val +} + +func decodeInteger(src *bufio.Reader) int64 { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { + panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) + } + val := decodeIntAdditionalType(src, minor) + if major == 0 { + return val + } + return (-1 - val) +} + +func decodeFloat(src *bufio.Reader) (float64, int) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) + } + + switch minor { + case additionalTypeFloat16: + panic(fmt.Errorf("float16 is not suppported in decodeFloat")) + + case additionalTypeFloat32: + pb := readNBytes(src, 4) + switch string(pb) { + case float32Nan: + return math.NaN(), isFloat32 + case float32PosInfinity: + return math.Inf(0), isFloat32 + case float32NegInfinity: + return math.Inf(-1), isFloat32 + } + n := uint32(0) + for i := 0; i < 4; i++ { + n = n * 256 + n += uint32(pb[i]) + } + val := math.Float32frombits(n) + return float64(val), isFloat32 + case additionalTypeFloat64: + pb := readNBytes(src, 8) + switch string(pb) { + case float64Nan: + return math.NaN(), isFloat64 + case float64PosInfinity: + return math.Inf(0), isFloat64 + case float64NegInfinity: + return math.Inf(-1), isFloat64 + } + n := uint64(0) + for i := 0; i < 8; i++ { + n = n * 256 + n += uint64(pb[i]) + } + val := math.Float64frombits(n) + return val, isFloat64 + } + panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) +} + +func decodeStringComplex(dst []byte, s string, pos uint) []byte { + i := int(pos) + start := 0 + + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} + +func decodeString(src *bufio.Reader, noQuotes bool) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeByteString { + panic(fmt.Errorf("Major type is: %d in decodeString", major)) + } + result := []byte{} + if !noQuotes { + result = append(result, '"') + } + length := decodeIntAdditionalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + result = append(result, pbs...) + if noQuotes { + return result + } + return append(result, '"') +} + +func decodeUTF8String(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUtf8String { + panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) + } + result := []byte{'"'} + length := decodeIntAdditionalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + + for i := 0; i < len; i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst := []byte{'"'} + dst = decodeStringComplex(dst, string(pbs), uint(i)) + return append(dst, '"') + } + } + // The string has no need for encoding and therefore is directly + // appended to the byte slice. + result = append(result, pbs...) + return append(result, '"') +} + +func array2Json(src *bufio.Reader, dst io.Writer) { + dst.Write([]byte{'['}) + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeArray { + panic(fmt.Errorf("Major type is: %d in array2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditionalType(src, minor) + len = int(length) + } + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + dst.Write([]byte{']'}) +} + +func map2Json(src *bufio.Reader, dst io.Writer) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeMap { + panic(fmt.Errorf("Major type is: %d in map2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditionalType(src, minor) + len = int(length) + } + dst.Write([]byte{'{'}) + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if i%2 == 0 { + // Even position values are keys. + dst.Write([]byte{':'}) + } else { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == majorTypeSimpleAndFloat|additionalTypeBreak { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + } + dst.Write([]byte{'}'}) +} + +func decodeTagData(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeTags { + panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) + } + switch minor { + case additionalTypeTimestamp: + return decodeTimeStamp(src) + + // Tag value is larger than 256 (so uint16). + case additionalTypeIntUint16: + val := decodeIntAdditionalType(src, minor) + + switch uint16(val) { + case additionalTypeEmbeddedJSON: + pb := readByte(src) + dataMajor := pb & maskOutAdditionalType + if dataMajor != majorTypeByteString { + panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) + } + src.UnreadByte() + return decodeString(src, true) + + case additionalTypeTagNetworkAddr: + octets := decodeString(src, true) + ss := []byte{'"'} + switch len(octets) { + case 6: // MAC address. + ha := net.HardwareAddr(octets) + ss = append(append(ss, ha.String()...), '"') + case 4: // IPv4 address. + fallthrough + case 16: // IPv6 address. + ip := net.IP(octets) + ss = append(append(ss, ip.String()...), '"') + default: + panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) + } + return ss + + case additionalTypeTagNetworkPrefix: + pb := readByte(src) + if pb != majorTypeMap|0x1 { + panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) + } + octets := decodeString(src, true) + val := decodeInteger(src) + ip := net.IP(octets) + var mask net.IPMask + pfxLen := int(val) + if len(octets) == 4 { + mask = net.CIDRMask(pfxLen, 32) + } else { + mask = net.CIDRMask(pfxLen, 128) + } + ipPfx := net.IPNet{IP: ip, Mask: mask} + ss := []byte{'"'} + ss = append(append(ss, ipPfx.String()...), '"') + return ss + + case additionalTypeTagHexString: + octets := decodeString(src, true) + ss := []byte{'"'} + for _, v := range octets { + ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) + } + return append(ss, '"') + + default: + panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) + } + } + panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) +} + +func decodeTimeStamp(src *bufio.Reader) []byte { + pb := readByte(src) + src.UnreadByte() + tsMajor := pb & maskOutAdditionalType + if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { + n := decodeInteger(src) + t := time.Unix(n, 0) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } else if tsMajor == majorTypeSimpleAndFloat { + n, _ := decodeFloat(src) + secs := int64(n) + n -= float64(secs) + n *= float64(1e9) + t := time.Unix(secs, int64(n)) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } + panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) +} + +func decodeSimpleFloat(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) + } + switch minor { + case additionalTypeBoolTrue: + return []byte("true") + case additionalTypeBoolFalse: + return []byte("false") + case additionalTypeNull: + return []byte("null") + case additionalTypeFloat16: + fallthrough + case additionalTypeFloat32: + fallthrough + case additionalTypeFloat64: + src.UnreadByte() + v, bc := decodeFloat(src) + ba := []byte{} + switch { + case math.IsNaN(v): + return []byte("\"NaN\"") + case math.IsInf(v, 1): + return []byte("\"+Inf\"") + case math.IsInf(v, -1): + return []byte("\"-Inf\"") + } + if bc == isFloat32 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 32) + } else if bc == isFloat64 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 64) + } else { + panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) + } + return ba + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) + } +} + +func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + major := (pb[0] & maskOutAdditionalType) + + switch major { + case majorTypeUnsignedInt: + fallthrough + case majorTypeNegativeInt: + n := decodeInteger(src) + dst.Write([]byte(strconv.Itoa(int(n)))) + + case majorTypeByteString: + s := decodeString(src, false) + dst.Write(s) + + case majorTypeUtf8String: + s := decodeUTF8String(src) + dst.Write(s) + + case majorTypeArray: + array2Json(src, dst) + + case majorTypeMap: + map2Json(src, dst) + + case majorTypeTags: + s := decodeTagData(src) + dst.Write(s) + + case majorTypeSimpleAndFloat: + s := decodeSimpleFloat(src) + dst.Write(s) + } +} + +func moreBytesToRead(src *bufio.Reader) bool { + _, e := src.ReadByte() + if e == nil { + src.UnreadByte() + return true + } + return false +} + +// Cbor2JsonManyObjects decodes all the CBOR Objects read from src +// reader. It keeps on decoding until reader returns EOF (error when reading). +// Decoded string is written to the dst. At the end of every CBOR Object +// newline is written to the output stream. +// +// Returns error (if any) that was encountered during decode. +// The child functions will generate a panic when error is encountered and +// this function will recover non-runtime Errors and return the reason as error. +func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + bufRdr := bufio.NewReader(src) + for moreBytesToRead(bufRdr) { + cbor2JsonOneObject(bufRdr, dst) + dst.Write([]byte("\n")) + } + return nil +} + +// Detect if the bytes to be printed is Binary or not. +func binaryFmt(p []byte) bool { + if len(p) > 0 && p[0] > 0x7F { + return true + } + return false +} + +func getReader(str string) *bufio.Reader { + return bufio.NewReader(strings.NewReader(str)) +} + +// DecodeIfBinaryToString converts a binary formatted log msg to a +// JSON formatted String Log message - suitable for printing to Console/Syslog. +func DecodeIfBinaryToString(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeObjectToStr checks if the input is a binary format, if so, +// it will decode a single Object and return the decoded string. +func DecodeObjectToStr(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + cbor2JsonOneObject(getReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeIfBinaryToBytes checks if the input is a binary format, if so, +// it will decode all Objects and return the decoded string as byte array. +func DecodeIfBinaryToBytes(in []byte) []byte { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(bytes.NewReader(in), &b) + return b.Bytes() + } + return in +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/string.go b/vendor/github.com/rs/zerolog/internal/cbor/string.go new file mode 100644 index 00000000..a33890a5 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/string.go @@ -0,0 +1,95 @@ +package cbor + +import "fmt" + +// AppendStrings encodes and adds an array of strings to the dst byte array. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + major := majorTypeArray + l := len(vals) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendString(dst, v) + } + return dst +} + +// AppendString encodes and adds a string to the dst byte array. +func (Encoder) AppendString(dst []byte, s string) []byte { + major := majorTypeUtf8String + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) + } + return append(dst, s...) +} + +// AppendStringers encodes and adds an array of Stringer values +// to the dst byte array. +func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { + if len(vals) == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + dst = e.AppendArrayStart(dst) + dst = e.AppendStringer(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendStringer(dst, val) + } + } + return e.AppendArrayEnd(dst) +} + +// AppendStringer encodes and adds the Stringer value to the dst +// byte array. +func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { + if val == nil { + return e.AppendNil(dst) + } + return e.AppendString(dst, val.String()) +} + +// AppendBytes encodes and adds an array of bytes to the dst byte array. +func (Encoder) AppendBytes(dst, s []byte) []byte { + major := majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} + +// AppendEmbeddedJSON adds a tag and embeds input JSON as such. +func AppendEmbeddedJSON(dst, s []byte) []byte { + major := majorTypeTags + minor := additionalTypeEmbeddedJSON + + // Append the TAG to indicate this is Embedded JSON. + dst = append(dst, major|additionalTypeIntUint16) + dst = append(dst, byte(minor>>8)) + dst = append(dst, byte(minor&0xff)) + + // Append the JSON Object as Byte String. + major = majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/time.go b/vendor/github.com/rs/zerolog/internal/cbor/time.go new file mode 100644 index 00000000..d81fb125 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/time.go @@ -0,0 +1,93 @@ +package cbor + +import ( + "time" +) + +func appendIntegerTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, major|minor) + secs := t.Unix() + var val uint64 + if secs < 0 { + major = majorTypeNegativeInt + val = uint64(-secs - 1) + } else { + major = majorTypeUnsignedInt + val = uint64(secs) + } + dst = appendCborTypePrefix(dst, major, val) + return dst +} + +func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, major|minor) + secs := t.Unix() + nanos := t.Nanosecond() + var val float64 + val = float64(secs)*1.0 + float64(nanos)*1e-9 + return e.AppendFloat64(dst, val) +} + +// AppendTime encodes and adds a timestamp to the dst byte array. +func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { + utc := t.UTC() + if utc.Nanosecond() == 0 { + return appendIntegerTimestamp(dst, utc) + } + return e.appendFloatTimestamp(dst, utc) +} + +// AppendTimes encodes and adds an array of timestamps to the dst byte array. +func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + + for _, t := range vals { + dst = e.AppendTime(dst, t, unused) + } + return dst +} + +// AppendDuration encodes and adds a duration to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return e.AppendInt64(dst, int64(d/unit)) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations encodes and adds an array of durations to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, d := range vals { + dst = e.AppendDuration(dst, d, unit, useInt) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/types.go b/vendor/github.com/rs/zerolog/internal/cbor/types.go new file mode 100644 index 00000000..49316aa5 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/types.go @@ -0,0 +1,477 @@ +package cbor + +import ( + "fmt" + "math" + "net" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeNull) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, majorTypeMap|additionalTypeInfiniteCount) +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) +} + +// AppendObjectData takes an object in form of a byte array and appends to dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // BeginMarker is present in the dst, which + // should not be copied when appending to existing data. + return append(dst, o[1:]...) +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, majorTypeArray|additionalTypeInfiniteCount) +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, majorTypeSimpleAndFloat|additionalTypeBreak) +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + //No delimiters needed in cbor + return dst +} + +// AppendLineBreak is a noop that keep API compat with json encoder. +func (Encoder) AppendLineBreak(dst []byte) []byte { + // No line breaks needed in binary format. + return dst +} + +// AppendBool encodes and inserts a boolean value into the dst byte array. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + b := additionalTypeBoolFalse + if val { + b = additionalTypeBoolTrue + } + return append(dst, majorTypeSimpleAndFloat|b) +} + +// AppendBools encodes and inserts an array of boolean values into the dst byte array. +func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendBool(dst, v) + } + return dst +} + +// AppendInt encodes and inserts an integer value into the dst byte array. +func (Encoder) AppendInt(dst []byte, val int) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts(dst []byte, vals []int) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, v) + } + return dst +} + +// AppendInt8 encodes and inserts an int8 value into the dst byte array. +func (e Encoder) AppendInt8(dst []byte, val int8) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts8 encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt16 encodes and inserts a int16 value into the dst byte array. +func (e Encoder) AppendInt16(dst []byte, val int16) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts16 encodes and inserts an array of int16 values into the dst byte array. +func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt32 encodes and inserts a int32 value into the dst byte array. +func (e Encoder) AppendInt32(dst []byte, val int32) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts32 encodes and inserts an array of int32 values into the dst byte array. +func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt64 encodes and inserts a int64 value into the dst byte array. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts64 encodes and inserts an array of int64 values into the dst byte array. +func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt64(dst, v) + } + return dst +} + +// AppendUint encodes and inserts an unsigned integer value into the dst byte array. +func (e Encoder) AppendUint(dst []byte, val uint) []byte { + return e.AppendInt64(dst, int64(val)) +} + +// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array. +func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint(dst, v) + } + return dst +} + +// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array. +func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array. +func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint8(dst, v) + } + return dst +} + +// AppendUint16 encodes and inserts a uint16 value into the dst byte array. +func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array. +func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint16(dst, v) + } + return dst +} + +// AppendUint32 encodes and inserts a uint32 value into the dst byte array. +func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array. +func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint32(dst, v) + } + return dst +} + +// AppendUint64 encodes and inserts a uint64 value into the dst byte array. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, contentVal) + } + return dst +} + +// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array. +func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint64(dst, v) + } + return dst +} + +// AppendFloat32 encodes and inserts a single precision float value into the dst byte array. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + switch { + case math.IsNaN(float64(val)): + return append(dst, "\xfa\x7f\xc0\x00\x00"...) + case math.IsInf(float64(val), 1): + return append(dst, "\xfa\x7f\x80\x00\x00"...) + case math.IsInf(float64(val), -1): + return append(dst, "\xfa\xff\x80\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat32 + n := math.Float32bits(val) + var buf [4]byte + for i := uint(0); i < 4; i++ { + buf[i] = byte(n >> ((3 - i) * 8)) + } + return append(append(dst, major|subType), buf[0], buf[1], buf[2], buf[3]) +} + +// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array. +func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat32(dst, v) + } + return dst +} + +// AppendFloat64 encodes and inserts a double precision float value into the dst byte array. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + switch { + case math.IsNaN(val): + return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, 1): + return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, -1): + return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat64 + n := math.Float64bits(val) + dst = append(dst, major|subType) + for i := uint(1); i <= 8; i++ { + b := byte(n >> ((8 - i) * 8)) + dst = append(dst, b) + } + return dst +} + +// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array. +func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, major|lb) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat64(dst, v) + } + return dst +} + +// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := JSONMarshalFunc(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return AppendEmbeddedJSON(dst, marshaled) +} + +// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6). +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ip) +} + +// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length). +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) + dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) + + // Prefix is a tuple (aka MAP of 1 pair of elements) - + // first element is prefix, second is mask length. + dst = append(dst, majorTypeMap|0x1) + dst = e.AppendBytes(dst, pfx.IP) + maskLen, _ := pfx.Mask.Size() + return e.AppendUint8(dst, uint8(maskLen)) +} + +// AppendMACAddr encodes and inserts a Hardware (MAC) address. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ha) +} + +// AppendHex adds a TAG and inserts a hex bytes as a string. +func (e Encoder) AppendHex(dst []byte, val []byte) []byte { + dst = append(dst, majorTypeTags|additionalTypeIntUint16) + dst = append(dst, byte(additionalTypeTagHexString>>8)) + dst = append(dst, byte(additionalTypeTagHexString&0xff)) + return e.AppendBytes(dst, val) +} diff --git a/vendor/github.com/rs/zerolog/internal/json/base.go b/vendor/github.com/rs/zerolog/internal/json/base.go new file mode 100644 index 00000000..09ec59f4 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/base.go @@ -0,0 +1,19 @@ +package json + +// JSONMarshalFunc is used to marshal interface to JSON encoded byte slice. +// Making it package level instead of embedded in Encoder brings +// some extra efforts at importing, but avoids value copy when the functions +// of Encoder being invoked. +// DO REMEMBER to set this variable at importing, or +// you might get a nil pointer dereference panic at runtime. +var JSONMarshalFunc func(v interface{}) ([]byte, error) + +type Encoder struct{} + +// AppendKey appends a new key to the output JSON. +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if dst[len(dst)-1] != '{' { + dst = append(dst, ',') + } + return append(e.AppendString(dst, key), ':') +} diff --git a/vendor/github.com/rs/zerolog/internal/json/bytes.go b/vendor/github.com/rs/zerolog/internal/json/bytes.go new file mode 100644 index 00000000..de64120d --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/bytes.go @@ -0,0 +1,85 @@ +package json + +import "unicode/utf8" + +// AppendBytes is a mirror of appendString with []byte arg +func (Encoder) AppendBytes(dst, s []byte) []byte { + dst = append(dst, '"') + for i := 0; i < len(s); i++ { + if !noEscapeTable[s[i]] { + dst = appendBytesComplex(dst, s, i) + return append(dst, '"') + } + } + dst = append(dst, s...) + return append(dst, '"') +} + +// AppendHex encodes the input bytes to a hex string and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte and encodes it as hex using +// the hex lookup table. +func (Encoder) AppendHex(dst, s []byte) []byte { + dst = append(dst, '"') + for _, v := range s { + dst = append(dst, hex[v>>4], hex[v&0x0f]) + } + return append(dst, '"') +} + +// appendBytesComplex is a mirror of the appendStringComplex +// with []byte arg +func appendBytesComplex(dst, s []byte, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRune(s[i:]) + if r == utf8.RuneError && size == 1 { + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/string.go b/vendor/github.com/rs/zerolog/internal/json/string.go new file mode 100644 index 00000000..fd7770f2 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/string.go @@ -0,0 +1,149 @@ +package json + +import ( + "fmt" + "unicode/utf8" +) + +const hex = "0123456789abcdef" + +var noEscapeTable = [256]bool{} + +func init() { + for i := 0; i <= 0x7e; i++ { + noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' + } +} + +// AppendStrings encodes the input strings to json and +// appends the encoded string list to the input byte slice. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendString(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendString(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendString encodes the input string to json and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte in the string looking +// for characters that need json or utf8 encoding. If the string +// does not need encoding, then the string is appended in its +// entirety to the byte slice. +// If we encounter a byte that does need encoding, switch up +// the operation and perform a byte-by-byte read-encode-append. +func (Encoder) AppendString(dst []byte, s string) []byte { + // Start with a double quote. + dst = append(dst, '"') + // Loop through each character in the string. + for i := 0; i < len(s); i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if !noEscapeTable[s[i]] { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst = appendStringComplex(dst, s, i) + return append(dst, '"') + } + } + // The string has no need for encoding and therefore is directly + // appended to the byte slice. + dst = append(dst, s...) + // End with a double quote + return append(dst, '"') +} + +// AppendStringers encodes the provided Stringer list to json and +// appends the encoded Stringer list to the input byte slice. +func (e Encoder) AppendStringers(dst []byte, vals []fmt.Stringer) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendStringer(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendStringer(append(dst, ','), val) + } + } + return append(dst, ']') +} + +// AppendStringer encodes the input Stringer to json and appends the +// encoded Stringer value to the input byte slice. +func (e Encoder) AppendStringer(dst []byte, val fmt.Stringer) []byte { + if val == nil { + return e.AppendInterface(dst, nil) + } + return e.AppendString(dst, val.String()) +} + +//// appendStringComplex is used by appendString to take over an in +// progress JSON string encoding that encountered a character that needs +// to be encoded. +func appendStringComplex(dst []byte, s string, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/time.go b/vendor/github.com/rs/zerolog/internal/json/time.go new file mode 100644 index 00000000..6a8dc912 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/time.go @@ -0,0 +1,113 @@ +package json + +import ( + "strconv" + "time" +) + +const ( + // Import from zerolog/global.go + timeFormatUnix = "" + timeFormatUnixMs = "UNIXMS" + timeFormatUnixMicro = "UNIXMICRO" + timeFormatUnixNano = "UNIXNANO" +) + +// AppendTime formats the input time with the given format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return e.AppendInt64(dst, t.Unix()) + case timeFormatUnixMs: + return e.AppendInt64(dst, t.UnixNano()/1000000) + case timeFormatUnixMicro: + return e.AppendInt64(dst, t.UnixNano()/1000) + case timeFormatUnixNano: + return e.AppendInt64(dst, t.UnixNano()) + } + return append(t.AppendFormat(append(dst, '"'), format), '"') +} + +// AppendTimes converts the input times with the given format +// and appends the encoded string list to the input byte slice. +func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return appendUnixTimes(dst, vals) + case timeFormatUnixMs: + return appendUnixNanoTimes(dst, vals, 1000000) + case timeFormatUnixMicro: + return appendUnixNanoTimes(dst, vals, 1000) + case timeFormatUnixNano: + return appendUnixNanoTimes(dst, vals, 1) + } + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixTimes(dst []byte, vals []time.Time) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].Unix(), 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixNanoTimes(dst []byte, vals []time.Time, div int64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].UnixNano()/div, 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/div, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendDuration formats the input duration with the given unit & format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return strconv.AppendInt(dst, int64(d/unit), 10) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations formats the input durations with the given unit & format +// and appends the encoded string list to the input byte slice. +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendDuration(dst, vals[0], unit, useInt) + if len(vals) > 1 { + for _, d := range vals[1:] { + dst = e.AppendDuration(append(dst, ','), d, unit, useInt) + } + } + dst = append(dst, ']') + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/types.go b/vendor/github.com/rs/zerolog/internal/json/types.go new file mode 100644 index 00000000..ad7f7a88 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/types.go @@ -0,0 +1,405 @@ +package json + +import ( + "fmt" + "math" + "net" + "strconv" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, "null"...) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, '{') +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, '}') +} + +// AppendLineBreak appends a line break. +func (Encoder) AppendLineBreak(dst []byte) []byte { + return append(dst, '\n') +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, '[') +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, ']') +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + if len(dst) > 0 { + return append(dst, ',') + } + return dst +} + +// AppendBool converts the input bool to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + return strconv.AppendBool(dst, val) +} + +// AppendBools encodes the input bools to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendBools(dst []byte, vals []bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendBool(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendBool(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt converts the input int to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt(dst []byte, val int) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts encodes the input ints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts(dst []byte, vals []int) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt8 converts the input []int8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt8(dst []byte, val int8) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts8 encodes the input int8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt16 converts the input int16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt16(dst []byte, val int16) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts16 encodes the input int16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt32 converts the input int32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt32(dst []byte, val int32) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts32 encodes the input int32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt64 converts the input int64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + return strconv.AppendInt(dst, val, 10) +} + +// AppendInts64 encodes the input int64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint converts the input uint to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint(dst []byte, val uint) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints encodes the input uints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints(dst []byte, vals []uint) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint8 converts the input uint8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint8(dst []byte, val uint8) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints8 encodes the input uint8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint16 converts the input uint16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint16(dst []byte, val uint16) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints16 encodes the input uint16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint32 converts the input uint32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint32(dst []byte, val uint32) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints32 encodes the input uint32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint64 converts the input uint64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + return strconv.AppendUint(dst, val, 10) +} + +// AppendUints64 encodes the input uint64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendFloat(dst []byte, val float64, bitSize int) []byte { + // JSON does not permit NaN or Infinity. A typical JSON encoder would fail + // with an error, but a logging library wants the data to get through so we + // make a tradeoff and store those types as string. + switch { + case math.IsNaN(val): + return append(dst, `"NaN"`...) + case math.IsInf(val, 1): + return append(dst, `"+Inf"`...) + case math.IsInf(val, -1): + return append(dst, `"-Inf"`...) + } + return strconv.AppendFloat(dst, val, 'f', -1, bitSize) +} + +// AppendFloat32 converts the input float32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + return appendFloat(dst, float64(val), 32) +} + +// AppendFloats32 encodes the input float32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, float64(vals[0]), 32) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), float64(val), 32) + } + } + dst = append(dst, ']') + return dst +} + +// AppendFloat64 converts the input float64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + return appendFloat(dst, val, 64) +} + +// AppendFloats64 encodes the input float64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, vals[0], 64) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), val, 64) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInterface marshals the input interface to a string and +// appends the encoded string to the input byte slice. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := JSONMarshalFunc(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return append(dst, marshaled...) +} + +// AppendObjectData takes in an object that is already in a byte array +// and adds it to the dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // Three conditions apply here: + // 1. new content starts with '{' - which should be dropped OR + // 2. new content starts with '{' - which should be replaced with ',' + // to separate with existing content OR + // 3. existing content has already other fields + if o[0] == '{' { + if len(dst) > 1 { + dst = append(dst, ',') + } + o = o[1:] + } else if len(dst) > 1 { + dst = append(dst, ',') + } + return append(dst, o...) +} + +// AppendIPAddr adds IPv4 or IPv6 address to dst. +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + return e.AppendString(dst, ip.String()) +} + +// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst. +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + return e.AppendString(dst, pfx.String()) + +} + +// AppendMACAddr adds MAC address to dst. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + return e.AppendString(dst, ha.String()) +} diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go new file mode 100644 index 00000000..efd2a330 --- /dev/null +++ b/vendor/github.com/rs/zerolog/log.go @@ -0,0 +1,476 @@ +// Package zerolog provides a lightweight logging library dedicated to JSON logging. +// +// A global Logger can be use for simple logging: +// +// import "github.com/rs/zerolog/log" +// +// log.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world"} +// +// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log". +// +// Fields can be added to log messages: +// +// log.Info().Str("foo", "bar").Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Create logger instance to manage different outputs: +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Str("foo", "bar"). +// Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Sub-loggers let you chain loggers with additional context: +// +// sublogger := log.With().Str("component": "foo").Logger() +// sublogger.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"} +// +// Level logging +// +// zerolog.SetGlobalLevel(zerolog.InfoLevel) +// +// log.Debug().Msg("filtered out message") +// log.Info().Msg("routed message") +// +// if e := log.Debug(); e.Enabled() { +// // Compute log output only if enabled. +// value := compute() +// e.Str("foo": value).Msg("some debug message") +// } +// // Output: {"level":"info","time":1494567715,"routed message"} +// +// Customize automatic field names: +// +// log.TimestampFieldName = "t" +// log.LevelFieldName = "p" +// log.MessageFieldName = "m" +// +// log.Info().Msg("hello world") +// // Output: {"t":1494567715,"p":"info","m":"hello world"} +// +// Log with no level and message: +// +// log.Log().Str("foo","bar").Msg("") +// // Output: {"time":1494567715,"foo":"bar"} +// +// Add contextual fields to global Logger: +// +// log.Logger = log.With().Str("foo", "bar").Logger() +// +// Sample logs: +// +// sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +// sampled.Info().Msg("will be logged every 10 messages") +// +// Log with contextual hooks: +// +// // Create the hook: +// type SeverityHook struct{} +// +// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { +// if level != zerolog.NoLevel { +// e.Str("severity", level.String()) +// } +// } +// +// // And use it: +// var h SeverityHook +// log := zerolog.New(os.Stdout).Hook(h) +// log.Warn().Msg("") +// // Output: {"level":"warn","severity":"warn"} +// +// +// Caveats +// +// There is no fields deduplication out-of-the-box. +// Using the same key multiple times creates new key in final JSON each time. +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Timestamp(). +// Msg("dup") +// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +// +// In this case, many consumers will take the last value, +// but this is not guaranteed; check yours if in doubt. +package zerolog + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" +) + +// Level defines log levels. +type Level int8 + +const ( + // DebugLevel defines debug log level. + DebugLevel Level = iota + // InfoLevel defines info log level. + InfoLevel + // WarnLevel defines warn log level. + WarnLevel + // ErrorLevel defines error log level. + ErrorLevel + // FatalLevel defines fatal log level. + FatalLevel + // PanicLevel defines panic log level. + PanicLevel + // NoLevel defines an absent log level. + NoLevel + // Disabled disables the logger. + Disabled + + // TraceLevel defines trace log level. + TraceLevel Level = -1 + // Values less than TraceLevel are handled as numbers. +) + +func (l Level) String() string { + switch l { + case TraceLevel: + return LevelTraceValue + case DebugLevel: + return LevelDebugValue + case InfoLevel: + return LevelInfoValue + case WarnLevel: + return LevelWarnValue + case ErrorLevel: + return LevelErrorValue + case FatalLevel: + return LevelFatalValue + case PanicLevel: + return LevelPanicValue + case Disabled: + return "disabled" + case NoLevel: + return "" + } + return strconv.Itoa(int(l)) +} + +// ParseLevel converts a level string into a zerolog Level value. +// returns an error if the input string does not match known values. +func ParseLevel(levelStr string) (Level, error) { + switch levelStr { + case LevelFieldMarshalFunc(TraceLevel): + return TraceLevel, nil + case LevelFieldMarshalFunc(DebugLevel): + return DebugLevel, nil + case LevelFieldMarshalFunc(InfoLevel): + return InfoLevel, nil + case LevelFieldMarshalFunc(WarnLevel): + return WarnLevel, nil + case LevelFieldMarshalFunc(ErrorLevel): + return ErrorLevel, nil + case LevelFieldMarshalFunc(FatalLevel): + return FatalLevel, nil + case LevelFieldMarshalFunc(PanicLevel): + return PanicLevel, nil + case LevelFieldMarshalFunc(Disabled): + return Disabled, nil + case LevelFieldMarshalFunc(NoLevel): + return NoLevel, nil + } + i, err := strconv.Atoi(levelStr) + if err != nil { + return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) + } + if i > 127 || i < -128 { + return NoLevel, fmt.Errorf("Out-Of-Bounds Level: '%d', defaulting to NoLevel", i) + } + return Level(i), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler to allow for easy reading from toml/yaml/json formats +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errors.New("can't unmarshal a nil *Level") + } + var err error + *l, err = ParseLevel(string(text)) + return err +} + +// MarshalText implements encoding.TextMarshaler to allow for easy writing into toml/yaml/json formats +func (l Level) MarshalText() ([]byte, error) { + return []byte(LevelFieldMarshalFunc(l)), nil +} + +// A Logger represents an active logging object that generates lines +// of JSON output to an io.Writer. Each logging operation makes a single +// call to the Writer's Write method. There is no guarantee on access +// serialization to the Writer. If your Writer is not thread safe, +// you may consider a sync wrapper. +type Logger struct { + w LevelWriter + level Level + sampler Sampler + context []byte + hooks []Hook + stack bool +} + +// New creates a root logger with given output writer. If the output writer implements +// the LevelWriter interface, the WriteLevel method will be called instead of the Write +// one. +// +// Each logging operation makes a single call to the Writer's Write method. There is no +// guarantee on access serialization to the Writer. If your Writer is not thread safe, +// you may consider using sync wrapper. +func New(w io.Writer) Logger { + if w == nil { + w = ioutil.Discard + } + lw, ok := w.(LevelWriter) + if !ok { + lw = levelWriterAdapter{w} + } + return Logger{w: lw, level: TraceLevel} +} + +// Nop returns a disabled logger for which all operation are no-op. +func Nop() Logger { + return New(nil).Level(Disabled) +} + +// Output duplicates the current logger and sets w as its output. +func (l Logger) Output(w io.Writer) Logger { + l2 := New(w) + l2.level = l.level + l2.sampler = l.sampler + l2.stack = l.stack + if len(l.hooks) > 0 { + l2.hooks = append(l2.hooks, l.hooks...) + } + if l.context != nil { + l2.context = make([]byte, len(l.context), cap(l.context)) + copy(l2.context, l.context) + } + return l2 +} + +// With creates a child logger with the field added to its context. +func (l Logger) With() Context { + context := l.context + l.context = make([]byte, 0, 500) + if context != nil { + l.context = append(l.context, context...) + } else { + // This is needed for AppendKey to not check len of input + // thus making it inlinable + l.context = enc.AppendBeginMarker(l.context) + } + return Context{l} +} + +// UpdateContext updates the internal logger's context. +// +// Use this method with caution. If unsure, prefer the With method. +func (l *Logger) UpdateContext(update func(c Context) Context) { + if l == disabledLogger { + return + } + if cap(l.context) == 0 { + l.context = make([]byte, 0, 500) + } + if len(l.context) == 0 { + l.context = enc.AppendBeginMarker(l.context) + } + c := update(Context{*l}) + l.context = c.l.context +} + +// Level creates a child logger with the minimum accepted level set to level. +func (l Logger) Level(lvl Level) Logger { + l.level = lvl + return l +} + +// GetLevel returns the current Level of l. +func (l Logger) GetLevel() Level { + return l.level +} + +// Sample returns a logger with the s sampler. +func (l Logger) Sample(s Sampler) Logger { + l.sampler = s + return l +} + +// Hook returns a logger with the h Hook. +func (l Logger) Hook(h Hook) Logger { + l.hooks = append(l.hooks, h) + return l +} + +// Trace starts a new message with trace level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Trace() *Event { + return l.newEvent(TraceLevel, nil) +} + +// Debug starts a new message with debug level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Debug() *Event { + return l.newEvent(DebugLevel, nil) +} + +// Info starts a new message with info level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Info() *Event { + return l.newEvent(InfoLevel, nil) +} + +// Warn starts a new message with warn level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Warn() *Event { + return l.newEvent(WarnLevel, nil) +} + +// Error starts a new message with error level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Error() *Event { + return l.newEvent(ErrorLevel, nil) +} + +// Err starts a new message with error level with err as a field if not nil or +// with info level if err is nil. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Err(err error) *Event { + if err != nil { + return l.Error().Err(err) + } + + return l.Info() +} + +// Fatal starts a new message with fatal level. The os.Exit(1) function +// is called by the Msg method, which terminates the program immediately. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Fatal() *Event { + return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) +} + +// Panic starts a new message with panic level. The panic() function +// is called by the Msg method, which stops the ordinary flow of a goroutine. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Panic() *Event { + return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) +} + +// WithLevel starts a new message with level. Unlike Fatal and Panic +// methods, WithLevel does not terminate the program or stop the ordinary +// flow of a goroutine when used with their respective levels. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) WithLevel(level Level) *Event { + switch level { + case TraceLevel: + return l.Trace() + case DebugLevel: + return l.Debug() + case InfoLevel: + return l.Info() + case WarnLevel: + return l.Warn() + case ErrorLevel: + return l.Error() + case FatalLevel: + return l.newEvent(FatalLevel, nil) + case PanicLevel: + return l.newEvent(PanicLevel, nil) + case NoLevel: + return l.Log() + case Disabled: + return nil + default: + return l.newEvent(level, nil) + } +} + +// Log starts a new message with no level. Setting GlobalLevel to Disabled +// will still disable events produced by this method. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Log() *Event { + return l.newEvent(NoLevel, nil) +} + +// Print sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Print. +func (l *Logger) Print(v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.CallerSkipFrame(1).Msg(fmt.Sprint(v...)) + } +} + +// Printf sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Printf. +func (l *Logger) Printf(format string, v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.CallerSkipFrame(1).Msg(fmt.Sprintf(format, v...)) + } +} + +// Write implements the io.Writer interface. This is useful to set as a writer +// for the standard library log. +func (l Logger) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + // Trim CR added by stdlog. + p = p[0 : n-1] + } + l.Log().CallerSkipFrame(1).Msg(string(p)) + return +} + +func (l *Logger) newEvent(level Level, done func(string)) *Event { + enabled := l.should(level) + if !enabled { + if done != nil { + done("") + } + return nil + } + e := newEvent(l.w, level) + e.done = done + e.ch = l.hooks + if level != NoLevel && LevelFieldName != "" { + e.Str(LevelFieldName, LevelFieldMarshalFunc(level)) + } + if l.context != nil && len(l.context) > 1 { + e.buf = enc.AppendObjectData(e.buf, l.context) + } + if l.stack { + e.Stack() + } + return e +} + +// should returns true if the log event should be logged. +func (l *Logger) should(lvl Level) bool { + if lvl < l.level || lvl < GlobalLevel() { + return false + } + if l.sampler != nil && !samplingDisabled() { + return l.sampler.Sample(lvl) + } + return true +} diff --git a/vendor/github.com/rs/zerolog/not_go112.go b/vendor/github.com/rs/zerolog/not_go112.go new file mode 100644 index 00000000..4c43c9e7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/not_go112.go @@ -0,0 +1,5 @@ +// +build !go1.12 + +package zerolog + +const contextCallerSkipFrameCount = 3 diff --git a/vendor/github.com/rs/zerolog/pretty.png b/vendor/github.com/rs/zerolog/pretty.png new file mode 100644 index 0000000000000000000000000000000000000000..242033686debf04c5b1756c166d05d09767594e3 GIT binary patch literal 84064 zcmc$^Wl&vB69#w_Ah^2|+}$-;aCdiicM0z9?(Q1g-GVy=cXzm4miNnUZEfxU-9B}z z&Y3$k=S+9c%+pVIxV)?wJPZyD004lO5EoGd0KSz00AHY>zJ9h;J?*{%05EkH!ouy zB31z6QCag`#I8A6u@lXSbVw0j7$^2!8$Aqv1w4&vNKrD#Pfx*2=$6xu^c#1I&<;daPf9$p?mnOYS6}HEl+qT>TYR6w2rTGzcV;6 ze_*iaU?&cZE8nU!6biu|hJ&|NL3|uJ0PV&mlB13o6KTk#un1`MT+okFSU42NfcF-& zdy{)nud$9Pq`U@Ms|9%r4u^FCB*|V*rJ~VJ-x!k0qex7LV;k*ltC;!K&`>z0X?LEq z6S9H;Cc5-W*o}-d{1#cT?&0&p8cC>_FPjwN7MPNT$!um&!pVGx;h;Ip*z$W!v!PHL zFJqtDXc|e?rQV%yp_CUu@Z#7Xcx6!xqtOblPEG?!t_p%#jwO?$tG{F8k36Upe18pOJBiNI|`)^!tn)1Q^@B#c)q|qX##cDJ_7vLlc z#4-OxfHk&FVO0l-X%A_49f}m@ZypwkmIiM<8o=vuaHo$<8)`@y{7{EYZ9s7z@@@|m zBZS)2ukjm7ZHId!1d=G&*Zqs|HNj6L@b5&*K_SQAhr=KYLw^&&4PrtI7R0HOLP!WR zBl8mrVtl8GS1f`n7jga0Hj2Ct?ugMA81S3FUz-(Wd{y7tzG!i*ZV#HW46q6Jq zS(>9XTM5E^z)A1Mpg0o)Q*NC*Exgpono(E%&I&j!Zf2}bfKFib;N=Lr;f#@;p;x`| ziXA8QG{7wMuAj+a?6>p=uQpQ+UMuFZe}{iZ^w~()$leXNJ547fKVELY%rL`XZJ*u9 z^4|LO<}*K#@z;+(BrC|?P_y6|g3^@a)?|33S>HE^?os(sRKkWuFbyf0l1C|Oh*D4y zk>pVAkRZcINL>;j#v;*VcH=POmf|1dG{u+2-;0nNzO6=_3&&8HC8j3QjJoW@?uYKT zQd3ixQ0Gw3tGH87C{rsVRB9`psJw{G7Of}-P_j^=jBAebOX4079MC08HW$4rDU};%l`YY^_Q0(8Bl<^&)rCVli7$bvd5~kAhdXYw{zNO*z8P5yjB*#L7n5qq1rI z>={%z3UKMavg&;Kw*Ar2gw7PFuV;=fQqE^Cc4xjzg=fLX2gf#ltmeFmVi#K%b87_T z{6awx@2RNGAwx~O+d)GBJNH=JktQx;eN-jrOsF$Hi{?9w>ZP~FgF8oJiDeskWk?jqMObDK4otIStE zl4D4yucxnPwSAVihjdWapFg&fT-at*vG;8RVZ=_>R<=#nD~o{Bid&4c*51~^chB&w z-?iMa?Lg86-bLqh?fliVVbl0>A3yP`eRiXyz1q>r?d|k(hu*o?iSVBBN&If*aCYF2 z;UMD@BfZBiFBo5{XN#xrt>Y7l7yiTR{Q4Hp$eaIDBbBPsZ>2}+lRKsdgO~H$_}4j~ zd2heR$IhZksDA6ZjA~9z?vOhGYrgX35PBm2?v@p2PRX)HX zWNMu=D==?kdNaw>SY*CP@I^S3=JuI6tnBqzak&=?+iF*u2pQ*l zT+v!#-~?sKWC_+Fwnp*9?IbFs+|bTbI2j%4r|7AAEJ^7i)h0Y~^YD6^J%(l-XZdh7 zY!q$0@*3)DZb5gG3xxXl*353aICVg{1{nTg-KOz#0>v>Z{#FYLGY!ue(!A{}RWhV| z5uDTdt0k{;?V!8cX>`YScz5JRU#C;&+4sZ1e1`$H9(EOW9W$L)v%AdAp?6nDDvs<# zYArQ`j!tu}ht_%(bg+NGHQ}eT(^OzR)R8PfqZI)cnS%cCj`S#OQ*T*c_uDM_sUlmz z&~N{JrFqTeVQFz&l9NnLGFx&&s;A1Jw(U0YP7YmQY{|6hLi4EGcEl6t33}zh1+t$r z%u~|Po~?7b810e2mL$krvCph}w|P6hOw;YtCD6@9ltmm?A8IYzy(^}xcyv(LRrqH1 zwJfglTOV2X)DQTHZORU07xM^wOuoHb*3{`lFQzOWblh~nx(aM2{x03a9A;qidU#5? zd~c8Kk!u%l{dmEN&By@z2D!9Y(44T=7s!IBD}d%*);8U3tJWp!b7H_r%LN3Auf_9- zk;JwT8sSy)*gAM?fmP=hAzNtw;ao^sB(;&Xxa3E6DCipMDBdX=81tznuU08-lB?#C z;n#82bS=4_w9m}DI9Q!*^to_aBg~TGcRK^#-|1iyGibMCdERJ?{TNA?;!o9N@Y3OT z2zDqwsd!r)mTjF5(cAR3Id;6V>^yX+erZ2?NV%liEVX;y)7<6 z*qPqUN#~pE$m*E6JlOK`>G=y94KzV?K$IdV7ohgDzW)8nk-2?Zd^xAo)z_6Rx0}Oz zZ1mLitaJaC3MwdFC_Mlh24kDi&S^#nLD2Q%-}Cr@?Td_09r7~=)?Xw)RPOn`eynmO z@h*I5K5IUjPHACY`R;# zBY}_dDHMs&>OxD$l>YHF8S!7PO$h;P5dP%3-$z-74-XUshfRSB0Uv`#EDvSB!4}3a zhs@CyzS*nSq)Y%7fM3vz6gma+YHB3kOT(D8b%qTU_LWF43#A{Iko^CAy;h-2jJ8f4 z>mKj=59Aj={+@oti*vMcr|*32;KhuC zb=$J_-oScoS6H6`arM0V@%0SMga?iCN>72*1qVo>>WkMd&lqBh|4=`+mWs0Qy*d3p zHC4AMbY`xlowXs-O)?Po0?xy6_aUv|SRJ(GKp43Ca`Cj(V#EI?$5cjQIO% zt%#_UM{XEO3Q+oY?)%@suqZ@WIgFeu)J4mv2DiwJF!Yzk+#;8`!EFlSV?{~dGm04j z8~C0WMbd7$qUNwJ`{NOR-DETyyZ7@v_@KglH@&QI(h1*fO&T&EM_01R2jMie zw|JZ$SN-$9fufErM6c{bsLgWS1Erz@qS0DGZJmf^9o}ij{#iwc&>rJAxc7da)16&U zq52ykx9#Js6z7fAJAOT`MX)%X;stGo8oZ7(9Guy`vTKkO7)B9G9YvCChw|wEoq$_+ zj$BWarj5H2C*ATdYAeddV_pfW$QMy7S)MPo>6+6weNYU$KnRi?BlvAz$frrrP@MpO z#+#0i2mIX|%kaaA;0dBD&Qqs1x61WSThW0pW5}j$WJ%0`Q7~ZUq`G@kwIi`*VTWa+R-Ig|X!0)D~`5tD4T#J4~V zrk&UzhB;^ydU4%;s!hC~oF1w1?k^&AjWG6dclJL7)NE0bH`pCRtoAa-oQ^#0`Eb+I z?8p+Hf~KSOQQ7*^lfY~gC`j;Vzq(UWy=oO*5OJ5s%c|NVN|{I^k)vumHi+Qq(}R0v{PZxzU09c9Hz~ki^E+^>f@4X8|l2ryu z^GPcjq+tIc@9jruY_)GXCUu}ltho~lpyw)H8tA8!3ST2WSl<8DJkU>=>AYgRP40Ot zl{Y-lBFME==g_y6+0gXYdV9W7wBqXI(h>KB`W|(BwzLg*YZHb_ z=h`pEND*#&@^z>Ox}6BzTaU{xfF ze{*%MGY)vRvUr%Nix*J(smKn@o|2XVoISs`qNpb~NQ|=-pF`5}h11Gb-bv1hB`nIh z6!#Al&*@(>TOb6oFx}_G{HY0SNq(LiRJ2mY-di^$Cfi~2QdAV@389tnmtZCzx*4-3 z`xCU=*Q~qlJad&({5x8jea$!1>89%mOEx-H-ONr> zt3Qj-1OC@3l<9-t80tDHTGO(se+Oz~Yp7Cn%RbZyevca$dbU@<^Y$7aX;F5PW$*Rb z+32as`f{|v>jXEI9tS-Ba_);t&qxM~hxE3m(@>{6;UAle=3_)}DODil zDv`fTo3UiXw5A-Do%MxA_x%AzEgblD+{9*j|Af-bAJBXvkE?45FFLLtCaD}EKT^<3 zG88~NiP;(Ic(jv~>8#q1l;$(;6Q{phkJ@)BBySQ!UzBv6L`#AZNrFMFzw86uc1;rvcj?g7tI4=S))_(BKAwDfNg zQz{evYm0ID^Dv{zoPsHQODg`}l)M;FfbwAE;7LRF6#(!+7IKh@c4yAGkvNe$O-G_8 zBOil{iKC6P5qot&@n`(i-Kmj7ic~Un?X{=PPn0{jV4eJ<3NOdH@x>~&yF|+I>31QL zCT?{P=k86Wru=#Qi;7;3HG{mDousKgZ@Gpqu{=V3znmAacw?;vqu8Ip?djp8fA~`! z#eAn6jiM+MU{)ruiy}ApYAt^Wf0X6Z3{$7JJlyw4Ji`nQrm(P?6iWAC8ezTCo3gOv zCAcpR_IEum2C&4Q>?$5S4?`w`w71BP^xFgA{G(6&E_ys6B;gMU6pMBWHNR#3ypr=BEW{l`k^|h_;D4zL`}PF z;HWrDXhmTe!kKQE)@{?xzQr?y-DchA2bg<*fTnzIeWf6wzbi)>5N*Q5A383uL@^?U zn`)?9x)4DE4^Wn$c3rXqwTEQQ%@|cAw2}To3cc`3oeE#)>&#*PhkQobu0@>NexZ>|G*||8A4kHz{ zFp*McKnevM9gCM*4|mcWqzQYA;7Oxg(qEJ&DDCc@82F1^@Zo|x&#%~8@;dK$Fk;%* z$v>W1bOQC^dQ|MXMHje6>83)=| z9BmrNgM-=j)t6w~B)(k1 z(ifu)B6TWQ-t=fQx5mr2mvv}IzXW^ix6+R@zeZR8iuVB1#l>0OO{4}t8dy8ZrnRq^ z0AL|wGad#H6F*^oflz+?#wM8htLN8J^!4ThIkO;#!zRMri-;KJG-`*t2)&3~(PSsD zm=n<$#MfVLKdkE9Y-(LR_ymP5sAniOLKpJEGXiP16mARID6-HZblv}WE0Ab~T(R|y zy67s}Uvk&x@5L@d*K%m~+IQ)(a{h67sxo}agc^N+IW36NyQ=IFxEbL|%=OXRUf#9^ zu6~?#J+;QSnq(Cdpmyo<;2R)adS=Y<@#;16>6!KDW(t=*U1ZG&RP%P%65{vpQ&NGe zgI+wz9q&E0F_+(L5GCJku8&(qyJJpyYJZ)(vgX|Y)k0i)D2N$vmT6v>C*N*rF--($o;zDw4X>%HRMT`$%K&0*u2c)>YgNqUeKfn#(w*s zUjBaXU;l!8dT*K;jM`Dn$3tI+I!>oqxM*6hZvEKO{xxTBH&LmmVeY3*<*aeNIxTyL z4Anu_&k++E8`qXx6Xq7nv*M1HI&H9VzM7Mb1wu=N7;7=@0pkvGQq&;#$-L+o)@R3@dJbPS`i(C>Cp}kxB*7%!_58} z0n1iJ4`VE&>%*C={zu!O9?{_MTDQ%VU`~mXsaGq1j4y-~&_t`}o>ls~|2~XyxxkzF zdLG6SzJyD;>CH^fza4g39qvr&QKJxhWE`Bt^5=M;9oCe7aCg^oTTi2Vel_aKbi%#q zo>x!93#%MI;qp1yX&mE9Ers;ucn6-hR#(Y<8{X=4@-%TZJw)y znYA>i2!7P{*zrCB?P=Ez6Ncml|8vbCSmdqJmK6FVL@jwt>zC$lr_$tfZwbuw9plrk zCcN|JmK9T?rS%pwrmf{>@#7*smb1xBJ&npnB?!k27JDgg&7DA2AFaJO&4U9yqt1?N zHJ`5dSJp()qCy07d%G|+yjoQcfo$#xEdS9+HjCle);lc38c5MfPB6;u_Io@5a%&Tu zdNpwg9Vfz$Ys^HoMz^c2F0ZEojL7%VA+As5o+pd%FdVy)0O_cC!#C2J%w}@uIcQMj)xt%g&+NzQR@p6kEC45$f|d z<=;wI55MMaUQFwdN4sdkTCi+R5o~PXXEWDoDoEJRW@U=8k~pc^UA1BFu-~Z_E>&1k zU~R&ZzWUYZIB5O4wCkNx_OV6NI9m~L#AD2s+oJ&c^ zrK{*G8!lz%0;V!FB6Zy}AvbE+`e%S{GpHvck~t)*o(MSP0pq9QUODb`&~)YYruOP| z(sJqoV(7h!PXv9)d^{*U@tbI*D^t_eE_$yz-CiCSsX><epRL<9upbw~G4n;?Np;A0gk_oQc<#adcgeM%Y zcZNjXLLdt2L68v~t+fekFN77H9d3X@y-Hwa(z0MZt)P)?bhFjzfD50fL8S*SN^y94 zH?d2s*^&g{*}O)S1-lqQMil6jlzhv^7M=Bc@)>-VpnDxQH8NvNNey4))lO5w+n(}C z%DgVcv6Nmx9dZDaUZ<9?V=aVM@Vo$WTjT_I$LeiQe*4~G=7ja+@GuPMK6^n6p$ug- z3GW68xa$%k(-_?iLi>FWwEz2IxWgdq_e*hOoP-b0dU{hWNS5Q;rTk4SL8@qt*G3gbZc36+ z$t-Y0gp6ubT$l#^UJS+$KHbA*a0xxbcb{D)oO2G~Ab6y2s+%f=b8Iyu~t|4b8 z{QC8tY2>jL&@LLxmqA*Dj>~Okzil0JZ3e6l;xng$J;;QyT9IvC0h68z`$ zdQw78n^WcW7*1PM6S_@|Hg={NeD$^1yCqt4>LrnG6l#C2XtS$E#;jB8l`Kg_fw|ag zDN3@(O>oaz<*klaq>8ZRqWn~b2`waT&b1in)5~8xGy)xTY>JPyo#PW%PP`Zr$PPRH z>^?^8t%oPM&CPA@cKjII(87+=^&NXWYFT}WrY7?01>d5l@hLC4oM@T>s0YC15-^5jEcVDh(Bm|%Pb)JM4 z){59=;x)yt?H|9?_tEftNI5-Oxt@#+96~>KX7~3XqaJTFWqmrBQuXRYmJw5UgE|ND2C`F7y+@zu4Yr3>5Q6f4T`lvCp1rX;K!gun@JmvWfQ`U6p zpM`ySc8bfXN`{nSuVv^%7(%58Q#X=FI6)?bI5k@{qTN(OcGK+oL?k@&&PCjQFxcUtF1L z>O(0P?w+o^(yKMS!sR~N^>wP8!UMf^A=XK0kil1z60~Y8sZ z&R9J^2WD+yyf&M!(Fsv*(mb)b zv1`PxKqCEVgb*4gAiD4x6!N{$>Abe_!lL;u+O}!~PTjczeoc*8jZyGy!I;02M$Ke^VjIESCIa_Y`ITRwtmXiK2(EWoXSB994}LIWx3OyJD!G2V0lw5_HL!)HSd( zmZ4A9^{FHX@yd28P`|Kt6(r7%SJc8!(;JPIdohfy!E(4Kkjgi#(XlhtAa%)-`}2r= z+-~Qfc-b*r&Jj=UxnezokHJHygz`H}r5mXIQ&QV2 zP+{F%Kv_XATM|LUP;faZm%CYj_Ip6nDMw`uqV0rkJ&9{+{Rj1& zeumGF>GI`%{kwB~Jh-5RC^#NVGq@hZ_isp9?>jiPE-6H+i@padr8}&UDL$M|Ul8y` zYrtql==D+98lI~5=A6F@pc;w0}mOIWj6`?TS}+)IftI?PQNSV9u7I}xgQ&GpAUM%WgO}p?v;Z#X^r79)bqKnKzlxXcFLehh2 zveH^XSFvEZJSDZ2@^8M@v*>&j5Kb-6D z^6|T1Xup0fWEskL2N}(i?m)hgw>K~R&Gz;`W_kbO6!n6B!bEy4uQun*Wz;IE+Pqdp zOt0*O9-!0$9IzDB|*I zMfh?R;fkVy#s1(Peaxrl(>W*VEX(V24&a-OM&CBj&mIA0bdodD2S5UQRl zUG~~=Q!UZ;mgY^-v*G&Xu4_1b(DM-T{zHW_8+uN>E( zB_JHUFm9j@+JN=y-eB(RQ$T)+xC|^*fL~Q=iAH9mo40JP#j&+cRjnwPA+Bvk?#l|L zhSI^w6=d)x={Q8!p?&`X$(^yU>+a-t^qz<#smkE4uj^{GJidDjrUU#|Owz~yK>i@C zpF0#TW=IIJ-F98EiGiin^yazfG1Y9^f5(_rdAN04uz;!m`sp}1OPjBiEa|cZf#~<} z&2Gi9Z{E4J-4Qj3A%WMSM7b(DHqLgtI?t87YP#N?BEAI+dwM4`#p@Nz5WA(Upo4C* zyNiXpj%9j09`j__37(&e`Uk5EbDzDv?BA? zW;PE7uLw#z1?;QAvDXyWwnXC77ixsSa4-~5=j~J~bfHP5gp5t3mw-b~jdPfn?(Y9= zsQyf%Q?IsT7ntjkNC|n#wi!g4xZ-JHL2vZNTb+#*Sy8wWOsaLWB)-%*pEyhmrC3m( z*I&=!r0ua-=$0hrbD=Wx{<@J{6HN4Hc6QdR)-1g4I5g*#Ie_T)?J{09S&TPfP=Odh zDt|>jXM%92me{Czq|_m(Kx`lPX5Q0Dd@AE*mxWqlYm6NDVu$+~Zpd}JO-=tN?b^z! zq07s1s}Nhn)M-g<)~FvQPutUX`;u542T<}fyR&jzuRCv*sLuvhX4&)evYx-s2)8Zt zWfA`!1hUWTMa)=>D#htknMOv-B4T2-;hVpW)Z$fpt{XbrXgAm4ytGWRB`Z}gx6a8N z&2&7`op#WacWT>cY3P~KNy*eyZS$bBADZm0kwDjWb;p)TQ>L2Ky>%#{xF*@oK{1=j z*MqOY0y%z{Ctrt9vA4&QTda5ck`JR6znb6(xOr7e@B~3>@tI0t$VyObSmpHU<=6Ia zZ#1PE%!4h?;Q_SIDePE6KIx_SgxSzHI` zHfb*0p?J`LRC8Dd3lKkUY-Rl3VAz?-G%M+UuM*89(TMHcmd!W!IxMA0bL%xN3*RMh zP-Z=$=xLd;~hvQF#&Uz4EQ)qzWo*}zn(Yvw~Un9HOeeCyeN^bAmR@pYh;kT=_as)5nX z*3YHaq?yga;{xMe{z)*wNAD3BQX9Ed2d$Xo%zW=tT`NHM2`(!0Z7&&aCLbRLvX^=L zml(9Xg?lWmmJ;HfYKbCN7`U%llLva=k z0;B+uNT8O#9Sj|`)HP7X4RDp~{PL#=&gL+K(URYZ$ZPMgv8^9$J$JYMPH6=W%#?1$ zzma`xY1J>iKi9Sm{?5N*0~%^m&Y;6Z(%JR59%VRntDnR^)I?2vNBfET#(qbfOoi2g zj9vTsaN0bP%AnWn{jzjz#Jq&^_7~5fgx;j^h6_1!CV{P==50p75YLMlK38*S-=$}d zPFlnwDY3QPK=a?yKoZ}Xd6#hk81{``FovK#`GkGP1f6?UGY2APjcRjHhXwZuCCA#M zf+$66eSaWRNk7B7#t4vippP?n$!_^F{+tIKd7dsqNUByceYhE))6EPQsCoAjZYP7E z!uTY&KNqvo@5BN{&-FkGuzn7bo<3m9`;DP$)4AN8%bMA_$h}9RsN}k_a9+26`vT*4 zb}07j#;gNj{Blx{onGv^3}}F&WV2@?tLqO_%_=4@Uan*k;O{?w^%Ew?R8N>%E_u#v zY;065p2C^N&dKRs{osW1i@zC$76PVo9py+=FL*~pwY-jWbs)cmS*+XHwWpq$Z2RJl zJ3)*YO5~?AOyx4}FwBfAAbq1K>?C7tF}UawO?FtW(j}XGk4TwQv`eId(4gUYTcBp{ zPNsHZb$usVr)>Oy$EfRP_~)iXz_$8oaT-#@7^E_34$1P;>n2`LL^;yn+x(9*skA?&PZ?XX(i?&nu*jc9H6PhsBT4 z5yLW<08>f{3wb*kbO8F}*88a6mOmIL>vue@jf773QtkKPin8kfVeI%sQK92G z=*5USGUoL)yd2Mji1Wh!dm&$XKf08&;N4vqKfwlgQ({rovg&5S61!*3%$$=|?^_^V zHHLK{$qGx{Y~KInJP=yXs6ovd1e`kztB5uFRQ5iK|;#S*_z!bNs)x<@@~EH>*M6!X!&#Z1Cc53sJ$+K9uWok zeC9FL-4Db5)(^$)-^Ak0Nqd%%gw}~3mF{5u^mII@butFzpyjkHvpV(%0YL))rj9J~ zJnya*kCW?qn!M^w^KlVO_bqX(j3L!_kDI)-VaSl#@yE3P1xDd#K%TB?c?(`eD)_AR zyP|!adNGdM&h8t*JLYh|-n0$FofkF#gcY`JAY(#*_nvw0`nVC-TRa7hY$3PMKKyam3@uuv5bAq0mCus zNmwe+Ot`3=6)<-y) zbyeCjbVH5{3xyH~wj5c}`a_<}g#qceHgr%(L>`^o$UfRPXnDA^X> zm*XAtnfiqZDkH;A{d#t^`S)lP{%S_(`R?_EfPn4fwcaD}+6w8Zb1-|t?+hJw=athK zO}1s9<5llFwcZc=lsHXz&f*{>BZkeJaH#K8gIpj`V3QmjX{)otm8Bi&BAzSH2jZNh zs)NgVuGG+KeaKXf@A-Y$EbL30h>+CbN=}O**Pc=VTJ5I#L}R3C-h*oMPRCsJC)P>d z&L?TD3MCpLWP_1+NRoz`J;Qnu|DG1ijL)%J%)PCtpvLcUh7=q7{z=GnabW+?@${il z``#=jW3si{_}7I~3WP6g#x|JH5+zcv$QU#FH$ z4Fq!y-E3}%Il%bkmXxw0l&ipgV8}f4cs^Xd*nJLFtQb4PFEHaWxl@U1yvyz@7!;P? zU&mw$F%SW^j9Ky8a8YtE#*OS9oMU`Elh6_C1x8OgyQEwb`L@YEA(bT1y;tpUPPpLV z6FG$B&VeMNs5-3Dl?p|4Q2UJt=rOCPx4DOaD7|P&1N8d`*CT>G5seR_JB%9WyZU8+ zyK!CpszgO?t9Y^{lAYo~iN~e3(LLz=WMm5JY9%J{39u3q8q!t7Vd}3_?Ye)=n0Z7h zoCF7;P1i}ZtgP2K$%RFg%`a~hCqmGf{w+(`3K~!8l^(+x|K!Ychl-m$F$Xl)MsM|~ zKw1nB_vtrv$~9e79H`ENh6rSZXl|7;)h+3Hpqz$`}8;ex){O% z+%`=OdH|85;}5fNVcwsrH@8&nNdGg&-)&*=tI|wO?pb;%FCP^P)A6O~`ufl4VRn{j z-Sxl2n?Thy0BwuyI*kYM#^ zt%sr4b1*J1g#B}_xWs0Tj$?NS`bjE=)Qi)}HDT|hht<~up&$Y9a9Tb!LWF+89E;09 zr`;zdF_nWCWp%OmhO@O%*e?z=us2Y5jedS+Hq$bt;`+4T!YJ8~?keH2l{0!40x9gK z46s7l4z;shCf?&^gY>?){tSuM{mB1Jr%=-TC&`y^LD!+5hE{ZQ|BTz)PEKxaEM7@^ zx(lHp1(86 zfLel8(B7T^9!^Tn^q&Ye#BgD0z{?avbZ`vUrnq>yJ~*9k&u3}qBz(r!3HaXxjm{Eq(T?OdyYLF z_hGLguq}ADHV%jP-Il=EPn=TTKTklc6E<_ua~)bxR^s?QNmo;%Yr*=Vg-N^aAz`uX zlL#n78o4jv=DEQ6YJkfXyFFvHvY|Jy9E;yXJ#nkVF*GtVVkT}ClMM`1rOax)r#q-} z54b#udHOV2?43YHlgs}Ud-JK==)-{lBbE)xrr)Zzd!wCa!G+nUCgt0DK0+;CU&ws^ z?T;c*ej+U-1R0f1T{%W_Z7IsTXvE|ux}WhwLci?QNv+L2zV0VLVluo}|6m*LIC10c zCu|6g!q(Q_seQ^pbs9NvY}@#7ys8R*;vqdEn9-+YY)w|+nP!9=*pii#lj9k4YV&PY z$;EPT3=Vvu$cE|>l2bJAb8GUt_b1pV))W^qXG*@D%zkf49Ua63|3o*6<M^IPsA1?PL+eXS&MB&dG7*HalU*quh}j zY{O7^7LH5HV5gbCtLPH-sFg|3>RP5*`5{3;8L?)AkvL}Duu|6JtdY@a!CFb9Rb4ey zu)_XPL$A_2Pd{wRoKB08ncXqGU!e;PC-?ly)w#k+4P7od-0Y%e_|jT>nG5= zX8+MfaZi6(^iVsd6(cH;D$t+XUeGU2+5g`gblis@BR02$TY@zhP?D9c9~de6Gq@`% zGQOJO-i|bVj}2Vrd&pW%Dc6_&5{_1$<|+G=9`7?`|4Q8u+XPRsS-?xY`p-OoU;W#b z9zK=pnxP@9d8>^o#}$qq&XdFH@z|+ds@r$&E;GN@n^AfJs%efBw}8Y#=vej_3O_qMUZq`r(Ix={-!}Pv3m<1JRXnx9db_< z6=&xzUoY4O?*78V0wK$@&vf9fCVgZ;jr+{C9@eiQCspnUChj4^w7IHe?3CXaDxQU3 zV$_29w}-<|vY`}x@bRFsY=!K<_FlF zy`(VY5enkHImdYj(fQ>xKhM{Q;zpp|>x%<(e+;_SmA}}>P+fXoqh9b>o5&2!X)$hL zLd3d_?G+~v-8YS4&tH0Ey`B!?@@uA!eqt7O3~M)I6wpK2zZ#=!a<+4#dK~MeES>z| z^@<>R8sQL(*^1JQl9_aMUT<(?4>_mrO@Wu)?bW}mntQhrThqwdSRTSKz@Yv?y=QV@ ze1wAG?IoWFbGkHrDh*mhK|mJiSgHLLz)yT!sY&aP9ezxAcQ**sO#)ecn++|7_P^j*c{P|U&ab8^f@bNBxV?=LA&y&gU4WW`LIm%_QP;+~#4%Iz$Cd^*(0 z!y~m&e>cOTCT#v>BDy@?KRi5ew|Brj^iAL9e6Oy@=e^a@)2khD(63cd$?0L8J`kv{ zM+rC9v9_)fAr6nhctuMQ8>vbZH#GVbGHr&46fwJv7AG~`U!R|zIttV9nHk32R|cJt zN_5=8L>%0rq|>_1)Leu+NcF#?94;&$asDr~{a%$_)aSojEiaYW3ql`vf(}IiQ5tYt zn7P(#>k`w9I?rrj{WudvBIW1mk|$4{Flp;(`R1fnQPinwwCUBmWB>W(TzH|J3laRv( z1b&GO07wxaf&TUO_e1>e!~W_Ce`c}l)GN37+a8oYYh&yK0f1+YI!VQ_%~mamg|R?z0}^ zc0qADt`0^pTfAuM9Rs=k4xeY+E^J zFHUlrLrtx2T3z)`8C$8SPQ6^~Z;&ms8PHlePqAz{Ng{3Rm-H^3vb0veO4TKQdEZE# zK7-U_0KQr73N2R?OrM|d% zuAeOid{;||csH^#jqJ9#vSp9XCl|VIN74U$$Bdk6i~h`zP_BJeGhH+2X9Ttx+fd`H~A9ke#K4x@>8Fcqq-M81P`&kor62uy;01PvfvhJczI!Y@qAsZc8kr&voceCLHt>9tM#cW^Zt37O|EgugqF|qw_vz+VRB=2GXwQA_w zGqWWcrJ9O)!t@w@s36CRb@|U4kEzew$xhH0KuVn`M2Fwd{^I#&$A48;WkZGnj>u0d zwfw$2avN8giLS>}BTkV2m1PYuPCgJN^>2R$Jqvl$E~#^@+n-_e@83S1(H9;rGX~zB zK91opEU_EzW9v{IZY?xfIap-C)qb&ORF@;CpIcK#U+rad){hT=J6`r2*l|9|>>Nom zP*$MJE>@3yIIwv=)ggWp`Sf4?F{B+A;sPeOe9AQorm{s#xbA4Q}Do$Ooe*^Ow{;0>&w{`9@n zH211(Bu>CCYP*{yEIM^F+J^8dx13S6@D!@%ow!k%h!lwV{jI~!#TfkYB||oL?nvEe z8Br)yn>K&~xD(N~&1V=s%KAx?IcO-?y`^@XH-AhiB7+9sYhtCI&)V*AK2vn7J8{r{ zCJtwC5G&#htCyDQPbT% zf9mAM>d%dJakZ!h8 z(nw27NrQB==?>}c?oDrCvpEa@&-2E8&%O7Iapqt!7@HXEwdS1b7oYEUk~?@DSeANs zrg=N;aG-miz`Q?y{yyhs2K91&em+%GuZirR`!YhMXk}=)N}9XoaVR{$=;hL)Ul|4? ztv&9^AH^g*1ioOw`QIW@fon1LquB5yNe7wan+Jb)+2)NE6qhj!FE_XzalU42Qmlt) zu`lhOj?KTeYKla5s@`MHCYaVDV+eCgP9bsm1kgoG*Vr5MHM^v#k?BwI;LC=*$Dif6 zE6em(puzDFKbtf3n4CcH*IK)XlSTf|F|(a_*JC)*ql%gQA)vR;W#*WJ)Ks@%5trp! zv*&J+pIy%ogN>bW-Tz`ahXKxSss&=d7_s6n28F)Ff`5q)iB^CfAZJ0`EkjX3-x3QT z*PPWrwP{I<;thI}=RP|5pI)z|!%Z}B9pitp(sAIV zUq6M5{HwmmVzP(ZL4HxX%8=%Yo|k`Jvb#{}z1fGnVzfg`w&Rg==b-NkJ#UMJ5J*u_ zPNtOWl!EDtraGE-Fh_cChb*_Bnw#Iep?q`>kqZWpC9Dp^-K6)#TdY8ok1;hirY+3c z87S`sjSiJn{T>sn$+R1d2Pfe?-~Z9%JD6I+5H?NtsgS=Bf%w$Db0~hzNyEBqR!A0V z8aWa^T%JOD=!W421}{p@;1+_L>|Lj%9Y?-wfRW8C>G91@Hwgn8ub&FL3u+Q0`Z}So z07CMD2;in@SK9>S$N|v^WZnc>(MHcDT1ST+ZL>z^M zTjTE7ut_jxK7&lULQjS-tT_KXGFs1wF*QIfnS_pIpmR&|?*i;987`h&m=t$O^_uZd zBWmoT{J;L4NN2!AqI}b_@+bSa?T9Ce{Y%jEng2uS$hm#?s!cCdqb*A_mZGY^VWaV`KDqsTpGKZT?0F-=@*8#cQV@ z3Z0ys+=Nyw%=p<~XS@`g^4skC6OhlIpKl1jThSt<#La#k6^5-`Ovt5O5Kog&=IK%n zZ%870YPWkj^mVbGh3*^ylpk&-s%$(43EA?HrR7x7pgAAu^lze@v)yNf@|x&wFp(JifO$w3Xz~chcVFf)trPM``5EpMPX%TBT-pu6=&@>ZQSg;?l~} z#^-lljRHifGKKdtCkd(78G@v#DZn5B2?}})RDee5u3l-xIG&Z0y##o5vxhE0ed+*RDhEKl_KVC65Hn4kwx$5s%M8dH=yF8! z)n$;-Ux={|jKcAq*4Q6WHUMvX%)NmTC315=$=iYO$LD;ub>8@WAeh@~N+j)WqaV6C zxVNXJrf!G^UBcq$UtP+5-`mj#Jxng$m*y<2?Yg~d^ll`3*_*W@Vkm)ODQNO?hk?M{vRYadu7J6OSlXt$EnKXwdNKM3qJLnx+{SbL4ox%gcDbRex!E{I{`2ifP9)aOs zxLEAoBQ7>8`w9TEJ~5K!=p?3z^*7%ahP6#Si$f{aO2N|(n3>hAuH|%mM zc$v6rC!qy6+{;4Nvb-)PdqwURKDeOViSj2D35*Y5^rXU93pu$1{Pz52 zlrkNaR${C3;9qCBSx3IXAD64vS2{bf~MSO`?DlE3({lK29yf_ z4-q!f^8bmjQBE(E0$A=;cfUIJCV;-%sLF-NgN~bgZ-PYfG?e;+^7jiw{4(+=_x zJ$;UEea3qi_=ltqgXY2INT+kEsKz}+r+!FYsKE(t3t(kvNe+U6_uw&&Zbw)h=fnYkWo8FfSPuvnz+c}AN&UV&~sVC z6+X`5+aawWw3y(X%=Ox0|KuM;etpQvJ9MC40jrpW~rqHH(oYvh-Xg@jyJdCGwdb#e*f>hdw}Q}gx3-0t78 z@gj#kUBUvye`^%qQ)MAuLOfi+`_x5Lt?!At=d)`;rPFvj!Rxr&5IJbhK)}#w6dp?Y z_qW1S=xy}a7RD15N;iAWw-~-v%TYk;HGxH=g1bdmQz--`qde?aJ)@y7zkDQ0pLjs<3&o5A3jvC?8R zUq!$a;>i14UyS4MkYvNz-9y9s!Ytbr;G>zt2Onoy^>ot=v^6dD3 zkr3Gy)ZI>cMU1o5ClXgrg&gd=^G7SyF&^fW|N4>aB~c@LZ+eEfyuEixe*H6GGCVpo zZ@#?tRkVXRyAXZRGja_sHj&HKe&+n^gf}1{TA}{Ahqk>=mN`+XFCDI8auK2s`CME> zl#c85QC#3F)gjI|VS4P}4IN%z_j$KSaqMKn5L-9J)3Bi5C|#cY~0oLgBEOUCqWg z%sxo+`6jI&Z*PIQc8Ec5{q?+b4{8232T+<;^5|OabBK&gf2a*0e$r%cy7f-__OivPXJ4{uXF# zu@F$5*+cpGS2+|Y(gK9Hpo4!h^Ip7o=^4Ixi#Z+z?#bz~r|QU@DQ!itiG7l@nSh71o{+yFW5#PX5#OYQlS|hVAM2YTKKbjM8Wu`4jJW<3)_Ojs((G!&>)8F1#x2+qI_9;J`3yB|gGjX}8xCA)+2Rp>l zi!M(A#(8>0F7fNeLPF;(@#|k2!VSfzN1buD;jgGQPw%@tuY>vO41FOjf!Xz%CVf$%SZ?oREHDA8gB(&hNiafpzPCJq< zfo3*YOs2^$dkgv{8G3$ha`?w#&Efc4F#9{dq=Vk=qp$U(@TiyS8io`>rA5+cy0L~7 z5Q6?7>4Thk@h z)PhRhS%)^3O(#2VjPS2tOIiaTcc~lxI<|@y$ zII06AO5*WUm3W{0WM|*|*`+Rh!VU(ic9hFf-vMGxUUY8{Q5v6|Zl{9oAuNQ|Dc$;X zc9o@JV2R^~7N_}I$W19-ap9o1F`@kQ#`~8u*4)4-QuDAfZnuzI!D7;47+qqY!mah9 zqAg3gD`p-Q5dIUZ+iiP(WXz(TE-*~#pU|fgitTiw#PRRe#gV-u_hf?OD!ZsB`hq= zAynS&6w%6s5PWWS3EQ8& zq+>r(L4t3!cFxMmA+oIn>5J#2n!p_USB1!?!Kq}c?l@6^ULaJ$FJP%$(PnQ&+jP9mc4MAmlB z_9rX^0fkG6^ld0kL?=(pMVepuVQH zhh0h?D6X*Hdc{}0(}t33=a%(%#B4n7QMDM(3Muv;XCR_rzVs8vhPZ}nVu1Ye03g+J zeE@Ad+ZwD3rT}N8=NGj?UbE$jBN;unjw^4ADQHFkrJIf1nB<9c7`~Ts#)kUtFlt;g z9>n1nOr1F_5YC2?oYbqzsgan!&+dD?r>h$T@WmBox?2$A<~EXe*j+LFI~vHXMKie_ zS<$Sl9IxxGeb#_9p!?|(c>xU+k&61;*`ugg%N$A5mLckDnMz`WA^kc$(Rzho=)WBi zZmQtA*%KHt989eUgRXQx?|rxzZ8J;KSLxOU&Hg9hLDor%lj2>&x(C{22R1J8mO1p8xcb_UYrhn^(8M}x2@zpH~p%yeTkWu|ecls!Mhk4KVIutqO;;)2%l zA<`@Rsj;z@LVO&uklQfXc`*T%5Fpzx3^L1md9_YnqT!4E`gXlRysX`RW_2wy%oHM` z(k)b9dpj@@2NJFj`3w~D^i9aGI#eN6f{}etO;`{Fb{Nt-LdOL8N`HzA$QQ+#y_h)4 z-*l=vyG>z=W}w_j zJd|NDb|3L14&`f_PLTJYAzshZ7+@*iF?e)P6x)ivezrO9bz`Vn_$UVC=HhE@>zaHE zhV(WrX3Ke82x+eCu=+zRfZ(Qr?asHdAX)~xVZc>IsE)t0(>-Ur8zf?Q68w-Ot{fy% z1UpVB7h=|v?m6)WxL%Kwek#HZ=@vgH`;AIvfkxdK5|hf<-Uav2dOK6 zW_C`|K!VvB@7I(rEXhAVTlSm}ryX&ZF|ax$CZHm7s@5rZ(NS^ogWrhdHv-4wo$!h8 zps0Cv#^@;A8;@*grJG7ZLWJKl4>^=cCkQAKEuFSbI2(EoXk$$A=ccuadFkoto2-JU ziuaj)gS0MNrZgyORDT0}%F1|7G|`%`#h3{B(r+xaKRpko1+dWd|KA}jI> zACC#MPIj#dqy7i=qDp+Ja zS{!ec5bk4*pV&cR@Sxu;8G2y2&cX!kSP}U&Pbcc=XOKn~oCn!)`7^;T7u(e? zhZn(~&VSOg*7EBMu9t7F(^CV)J3Eu2Q-7>r0qW4ooz2VCfX>cNke=`zZHyVKbRa|; zV^OelI@eV(W((|%$Wi2UgHfh68O{tt>Ou@@I==|b6xkCfgwy^ks-MxEeC|_5=_t%c z{&=H+mV)7bV@aJ|34v5dE02ojC_}A^(PcSEM8{`k#G*d<9W;+ym}#~n^LP`Hs@;X$ zA-XVc=?i-dCAE&{6rqoAl=a0oi_LKJnMrQrONQ>6^?>=$11cX0`pdb0Um_R1k(8d! zXR+>X9AavEzc#vFBOM}0XxBjNQ|$1>k5IpIt~Fmmi2$jR)F#JA8>4dFLc_2Em_#$N zANRkz+QJ>nO#{Re+HZPW&AHEu-ICesn}L|=k~qec*Q4(CcAN>AaTyz<#gG|u6%-;8 zD2uG#1FDRMArr2D+bfVk9$Pfics8tAMgZ@!NdgtLehe%KL^c>w?m9*tgg(UG-^Q|Q z)Azo|-SOK1PoHZX&4z9!*4H>^5z^~trkqEe48TA z%I*2!w06=}SpNCm-pnvVsl$+*5!RArcPQk0MVUK{fgJBTlFWlyuhpq$wfbfQf%R{vwBX?j5H+Sm z>y-d?dlgDqnNa^E9i_7*p z76($6X)pwdRF2Tigb9_V*CF1T{1eid^cmb44xjgz4@q*{JlO_eQMV^bYYM24>*kV$ z-Re^Zi8#7^40A+ho?M`7y*-PEB!N9OjDVto$CH&4 zLC1#&FN!by@}{Fdf7^BY>!?g^HrrkLl7*lU{oSq>J19!)zj>drxQFc5)w}e9Z9)@% zZbR>gg23#uMg2G}-WZOV(AO}s!-|a;fL&TZom=^749d z($|{^%SWut+znm7Io2*$yJ~JjL!6ud?ID%&(qTx!=UkevXeMG@fmo1Gy^UFJ$1-Bxv?rW_(;}dYWr0yE> zE(2|%@zpq4=Ci5PJ9d}~>imnHRk*!aH?+GxVCpC2J?2Yo<+FteVn0B=XRu3jvsxIx zUsh;yL1w(J6Qm5y2~(?}x6Z}s(DCS;LehyZfa7B!Z>*`#F%&$Zn7(Ze+I5AgVQye= zRY=Zf)vwUXLUBDG_oyz7<{Qj$VeZjnH}ToNCMDZd9~$B$-tC%?&(fbMfYCNmbov~y zl~-HNGYEw5HghTRbr>cbCz3*p z$}}evl$QCsCMHDQy||}Pd%P`_upsacGJ!fL-`a01uY%_nVjD~`UMFi@U_4`5shW|N z%@x+T)K_Aw*e7-s6Xst#xp&=odCe!C*BH!lFy>p%oGPAW3PkMj1tc=v5Qx%NMYU2v zHpLR?p)O2;4Lt%PjpXusr22E?h!9O(V0|9GfKP(VitwccyGv}|5)f%1Xy4%=F&NA6>KIo+L@@0HQ3jT~Ndi*dKFLhp`Wg+=~!`HF-ZXFv^(eJ|z zuIkL>DjADzX6JO?p@M`hUcph=UTJEDog&ty@|ayGI_W?9G;CZEPpJdfFtt9QhIX{l zvATcMl$RaY2DC#3P~@4zl5pOrfRIRY{cNpQ^{vVZQ@*E4<(cPD|AaB)%qlmKW2qP; z%s2Ku-p)}aH&@2l^L<)v?H-*6R$282ROCsfc{x0EeExxwNeNG&8tC`Qa-h}QHpY$N z*gX_@^etn6q+gtU8o9Z^VP5h1+A4QFeJ!l@@v2duS2$xS4;_mX;l~uIDtPa{_T5)G zRNL2xw?8V3Cf!G1LvREdq7I6AotE%1rG-9!Cq_X-fGGAK;-q58ie%4e=woBQ8e#to zV7)14{}t=i#5SI7eryBxeVA#-`Q{N(Lb$2sLiKkIHUq7?M8)dCaRWn*l;lcRQ(Ie? z$1zl^f1?wi@iS3+tJZ?GWbc6!UG)3^E7B{%zc%V>V+d|o|52DtSPGuE@p3#O)yr_k zaC>krikXYRfoI%n`3|X{id$S_rYH2uPvtl1Qe-|@XC0x8dXA?|ie>Bc8C@?JUiPHD zgPox0u9w~h_oqUo4JVk9!&i5(>4lRT+wRweM4f~C&=fU^+JF_oFB(ksuJ@!mi5<`5 zgM9hT+M+&ss({WW@eE}dehoCuz2#j)dG|1Syd6E;l4oaZwakBFyNegk=EW)i*)6DE zD+5T|P1Vq}`Kn9HLgq1(4b(02FO{RoMSszXmEO*mqDi8doZ%HI;z_DBVXQctS+9nf z{4_~#w0%BzOh#(TD40JF$TC%UwA31YSI?>`vVIlJ!>s_@sL9b=bH7a>I9!%#f&*>{ z0K!X-5ii>qIKP0;XHUAn5jTO)%~Vx4nMZ{S-_?KK`5R2cL~3Ux5pVKY7%V$sqKcW( z%>B)nXgkHZ!p!-aA0gUw z)yPabCzp^(}2 zL*>O29C}!2%in5H?iB2BWkoyHd4drK+O6RAwCpJbGYbtuSl-35_l?uV)tGOO>fQ*T z7woI|;J%$8(r)(fyW$Yf>Gy>Bk#}yxNm~65e`;taW;QYc*}Sm0j`QIr#oN(U?Jo%t z;avtI#JdLqp1}bRsLMiAoup>bEjZp#w`UZgc8gQl+KJc8N8ckbjZBD zpQ1NwB2*qH1g1PR2^plWhfY5{)nG6x$_mEN2@0W_EQZv{s?P2SX;(zH;_l~Q!)G7J z;O7gn%R&u44;hzJCq}vthlTnfv@RHKqf575$J5weoxBz)=EYs*#a7Jx@CG~;op^0k zN;-!J=pB;6dW*A{m)AVsLALCxN_$J3DcQ4)bd;k0W*ax)Nqp8yT) zM0tAEq@nOT>Df;$z*KzAROlfZRJR~c7l$-c_a~~Th%Sn}LP7dK|1T%}T@KB=`jl<}|fX#Biv|h~&)+S4l~MWb{chPHkH>Lyh!Kz1^@oebRz0g0O5i(q;QK zdHqDXDA%QZRSkiC$MgOs_S{b@eSso@ntD?UbnQK0OQC}!fZ42$JESQyw40)rN*hu} z+mC%dnn4vO&X=E`Z$aRdbfDD}{uG#dEHJ7*0?rAFiGn|Da)U1u7Y(vmWhHP7v{m$vM=qJ1l&oN&^l7mWb(cc{GgCw$fdfaf{4^4~L;38H z(1nv->>|_8`QU;vy=UvkFa-S_WNf>`ThVgrjXnYP9g>F6l(;24I{0X(0=XY_xZrG?#eXJvEHzIF?iPP>czpUoSVZ-UyJ#w z`?uWIh+~K6=P(--&OvgUlCQ9#NNH!5;-DEo#@k8uhw4|tc3UCg$h9qV+&{dr;MJMT z%XGM@$t~GJ^~9ymu2$52EHE`J>|(2jH_PG{7Q)?p#O&0~u0nu4dEbRBBip|)ey zndrD|O;;>kfW-$~HT67e-nnK}T}j}YW)B@4}Q z1@WH1+;|u4SwocqYg?vh7Gep z=?&muc1~R1Kl@xB?KUcenJ}pq7gKdr$EM+!>kbJ6MU{U;({hGT8Yne~CxBA` zPaKjCwZnB95OHL*YcqKd`f_`^m%<{%&7063b$U`4IP-+22|vT=Ktg-A-n9otgT3@Z${l-(k?3 zwGZ2+yz$ID{PVCvtBH4hV;|_@f=nzaJKP!|?E_PZJ=2MFJ$d@Ab8Kd2=kqI0D`p-x zT>|o!3?L?Z*OZ$+_xRtIP^ziJr?R5__M`Fe(Q-v(oN;e_yYXNW1cC8`7cBo*@AkN;PKnY{_(pH0v~dn3=6PuKE8({ zJ~7$@cCN>7Jz10g`!~LTxYSh;!lW%&MfnLIfov9`$DacRWm@PX!uHmt3+v72eaVyl z*L(kum;Nt*sN-uwg|;1C+?bhFVZZJobBvkh4;H$(ZuR6itcVjO`O#v4jt>4cKYtcE znAo0Uct(H}$Xh(9?iB}^M%5hY)KR0~OPP2X*p>e32Z%{lrKM`8&?VnRiv52f)2olj@wt2%NS5@*onszNNGX$UFVDmi9LBsZipHipg zIGIbj+~-Srf-m8vJ2|zlXO=7r$O<-Yv_NVN973_vyH#i5HLi%RHHa7k#pcD@tqJeG zjA8X3sZ6ev?1<=h9`&H>>%FJ29~rLTJxNo{cLrP?4LmndcfZTU{^K5L;NA6~ur%v2 zlEa7{V73Do-gRt#@vGP@US`IfIC;aX$1de?QoL~FxSLXaA9wO8-QY)h@-2V@E9|?h<}r%1M)C}?nPKUIJFS9*fA@i>XkqGwte|c*>U&Aq5YdDn=QbwpW9$e zCd=zwb?fT6C8Qq>c+jm(K1&Z*G@Zy8w#S{OP6)l7%w{(AvncG5C;MYQ(ZhNz2fh10 za-#e})aRMMe#bX&cAzeY>e81dt>{V$&>Jnny5^nPRDr@K^*;xb)u6#ZQ{6dp`Rn(c&ige>1Hmt9XjVbbJijE;My_CJfD9w>148x-wB0Jl+*ksjevL=xXwi8`30+Y}^kU zKTOx1VH01kJO~7IZjX(SQia&sg%C>V)K+S1rga$pvNtWrDBs-Qd;vIq(j71R@CO*r z0OJ&8e3^U=q3 zkB{8^()8TzGqT)|w%f?QdOdS(x&!G)`1$iH@$q+lf-)?pg&&`4r0WT&)^2tgx@vSh zcCOoRK4uaR3;rx&p%`M6Z{Ahi`X+!oYJaa@!xeTj#+#IZ<*=uxWFw+YeE83G%6c>b z4ulZi2B}GTq5+wQxT}jxqEa|7Rh$FdG&>g0EH^6w^_C_G96Z#_ziq#AMh7snmqFS^ zQDsK*`LViapN-27C3vn?0?V{SlyhELs$&#xJq()(eUXMPko_5M7A*O&5Pd+RL-CIW=wa zf%;(ZW!Q$t`^lWD=TrwtnisQet9^e-=1C~D%A1$nXU>2)LbE%i%&>gflT%n2uz%)* zA70gDF7T-dtL4r|rxAHUF19W%iVF)*Pc3KyW|ZDt4Z2GqWhE#JE6FGIn@M^SBm0{T z+F(4IHx|dPHA41nJxiC3-fGM57r_`>4bt?r^T&w+%6H82Px1Z$Cr{ODdqeVc%7p$f zQ~>n-ABK>e+F#{0l^xM(04|uguy~7#PRpQ_i%0>$oXt2% zTx5eLZFpgV995NyIlM%XQl`4jrfX%a$o^d~T%N8tf9sK9{yrx8<*1_ErQ)=`}?}#9gPgit&KXdT&mO z-;EOKjc!MijUR?h1QLunKqdf!xVY;*KlRLL4_P<^%zT%9RSQ0YWMfH3`a!_?bd5dC z=U5WIX7l~8;&P_tjusn@)q))k9P|G@;r;b8o4PR+nVl&+KhUWa`?X*8>*f=ifO+P} zCb$tJ3aEIHn@r+X$TZ-0EMc+DJiiKXst?P1tkkT}1SfH7uI)4xRrjOzAnLhiRI&6*92V<)ezUs6+5^*6G0_Pf8mZdTKGrpfFH0Suy{7p^@#^ubO~-9d&Ull5>fyx`HR75>Y-sT03vC3e`_brqB!g zS;cL`l^p%V(<}~JcDk7R%}VtQg5e#SZrhn<`Zi)GbFkmynoT(Bm6Xr?I+@k25WaRi z!}q*x_lzFK6Q_s8XbC&lQ`5bTi^PcxVls>Qq2JCE>uLt77|&FRHnFjnd{I}Yy8Zgu zfS05TuA_mvgc8b@4~-1$eQQ}$hs-%)+do_38Y8TqHulI9iL*h^K>d525QpL_itq=9 zV?0f(Bp$0ajfUEI{6VahIvb?7>zJkN@)ua}X?ERU!z=|uQI=7iQiiai@VG+iKA@|} zy~tu1O4%(kQxH5^qa~*qoC%R;6)Uj4-XuUtoT8DmIl-#2OiT81+Jemi-jIZBD;}Ki z#QZNmLo{v1`FmH?@`wzUTYa*?4?`!s(hQf7sqX=H7!TYv(71SJ2L}u~0@gX%dpr?Qdl3Zsu(m zat_tMy};dPqz0_vECjFBg=xeOD}em28ONYpy-0X+X<}w(X2MrSBZD2Fo0h{Y#rC3s zRPCDVkZ*Zq92yR=DmDR=%`x?LNag$Zlpd_mQ42A`U+pmKE@@;(*HXMH^jZR zY4rQQA>1kgYknC*cpoA^r-*)qApc+>Ak?g|Swh`+V{L>GhEm4vvpgCZ<;pVYuHX!cv=%WyxW3 zbC#Ognj!5MIuj#=00O2to!!CcA4g6mTSg9YR}m!3lVUx$+**y7QR%eSXLo|SW15zK ziQc=`C;~p2YfJ&Tq8go=H`OVPs<3NSQQg+*!d(38dcq8B(8W&BJjPu0S*zpQ2|q**?cEG3g2ezXd6Q+IdwSBbv)nq6_7x*@ho6xmE^((MXu*Hq0jqOo8VXS0Me6 zLUa5>m0vJ9GyR>j%$5QLHZVIlMFg6CyDK+%;U+wXRRk{pTPtFPbOvh&mccNm)DyeH zX=`WR@-xunV3|FQAraRgv}=>mdXTvMCxS@}HP*4D&;+!dmBW=@PV$vb9h{@=c?Q4sLMZ(Kg@k>%6!6WX~vV(TySqmIfwYyfyQOpFNKZR z;E^&wV}*%x+RH~)L#vmI2ieR!l)zjYBX}gcOUPW)>_tfI00>@ic^mJG2k(MnRAfXR zR{+mOw&PI@ictavfn34YUe$h zrWjI(tJS|4)555%(mg_>=YL!dA_0Wt!{P+etvG%wRfkY~DC!5IiIOYzZ-x4%fEn6n z^DGnUB79fkG}_>P{eE_N-RvI+$(}PCXEJM~T-`M|Ko30bGNFR+)AjJ7Es9X#%*k~2 za7@Wc1oI`se8$ZlDjX?zhFtX0B{hpKtZB69FN4}z&{^=hA$?X;N6vc+EV}gdq0Fte zukw+WO~vd7D2N4?j^Ds>AJCIh!4Ae2{CGy2C5C)mMg{1GX$WtUvSlEcjr73UnzYrm zbZ~uvlPJ2tx^#KVwVwq2ap8JuM#J*bB+)>vcNk+C!@zH8MiKN@Aq~6)T!FLBNske= zuhT5RnGJ4>EHDBxI@;Q&BiGN)+AI3o(Ey_w{JlLCz@{d2&yzLL^g4s}UDjRx1-vfX zDWlb0vFXtjzGF-ggST)+-wQCTIZ~5S|Jp%u<3ByvC3;vOeH>j;N5^JZ8)7g?zcxBk ze)$24*6u^|nLHJsD(J=hFYZqg9@i$8eFw#LvmX^E$4V`WA*BhT(6Zfg4d_Nz1Amfx zMxfT26b+325cq|Lh5mba(ZIeXKuP?av+S~4^?K+jyttZT-L4wN+wGywC%aRcp#uxFUCIw;0UEtdlMuvh#~ z3=aU<6ydf{EHTYHMxyU6Sqeh?eG2La`yGf_DkQ?`;?Hwy>~V9Gm0k5*P9!ER!i25J ztiw&gs}k?kL|Qwl-pqr`BbyofN=F;WcFeC5NkVwNptC0~+j=tKg4utHLCWQJ6hp8X zbJ(Atv0 znbh{Ua)RDv?F1IDP5f-xrzGM5QnGnUZ2FtGwe`}#8o(71q&Z@vYztYyr0wO6T6Y2n$HGu? z08efq&cptI7noAP-};Lv0laV{er00$NfTYBP_q+!a;;^0ABLX61we@;YtgcnQ|4UU zHbPeR_V&}(uFtvlZ|GJGlY?vWol!s%(HrO?KO4=s6reO61NGy97V&k@te=juwr~WA z)K!A3Ux5NTa`ZZ7p)^VDEaBC;`m_qqxkkMS$sH)n!cmc9)&CO!QzSvY*)oqmXL>+Rppa&~#LGyOK3NN$hp0hw94>((^;|~UF1H3T zw^H_a`I~%Bpw#vsrkms%zRt%u2VV&j{gpPXBzo%BzoML1(U3QF7OY~!BR;(f*Pzfr zSv2g&hsEaH_??l-jpQU3-gz^>D^<}e#zF?!d-k@p zAxslx@xf8e+t_fJ_LFY~MGn5?53y6Gp&#`eewIeDN+Dw8SfL<4O8cP#5ri|Cs4nk- z24L$ghKLYpLc&x&#t$lpEqpc$a1=BHGKi}Q%+u5x;1GkG$e78W`n{yBSng##8OGS- zRMN#kUX-uyq6!gvBtqD;F}~cycSm}WssffiES?pmJ->u(8|;Axx|}!^tr-YbCCKI1+8EBLt-Z5fZCoEA_$v{o*z(a96!Qtrrv ze3Y#99N!=A@9($QF`fmyqADBLodT?7^gVe9MK#$QAv>)T-_T~FhZnSIo1aopzwD|8 zEFzG>%?+VVEm6YcfDF!V>q7}z>3`ukpZ*8OnVBFH^NTU2-&2_ZPmfrzQwrL7U>|PS zcEVOVvzHQ-Wuuz3k@~bS@(7R)>D`hQ7oB0Ffb`1EJ|>muo`rfZm|HRT&30H;FEj(r zh4uM|?V~h`K2L9GP3cX6Z6OmDoSC7J1|dWkrJH{#P6tqw*&e}6eh=?4>V@-qF+xb! zEG4H9jptO3*0rJGnU9{+=vES&TKq#L$tc)L!dD){DckhB z(*nc-R>@a+h%qo7@t;M`s@xY)=IVRFcT5HU+W6>U zcFd|90fh$rIZ{*{3uwCNba5(%Y)d_^@{0ckCK-qR1x!-H0JCx|Gw~(61mMhzI$p$R zxUS%ul2g#E+gtbJ@1;vs+d55qZR}cwd>ZKDfCv_F#2k0Q9p5woawu(&bH+D1_F0DJ)&`C?Qa^Fv(EAfrxy*UJbz{eJH6D zloh~2Fu;PkUjo5x-aHj*&_2UevhS<*jIHtg6?!wh+UaWtaRsg=6q zKd$Cjn70gWJtj@j%KA8&bAEu9f6B(m3Kfj1K?uKy%Ju$zq>~W`4V-#I03b3QyyIaU z{Wve*E9AeimG{sWb1e01yiQrP2S`N`SYKokwHQ%X4eGAMD@NMH%171^+OT^@@|%*4 zhEy9bL}(8)A!K5moK9~l_@up(!0yv8@0DyCUQ49cT3W3)oS=}2UN#uuDgF-wh*JOr z`gZR4Dve7WoH-M^bNJ$B+pTd{by6c!1c53?+ zAC9m$*ELU-GrEH|0LPXEw?}_}g#h4*5x;xl{L-wmlnVoW)bkb2If71_cb_Vj(y~Ha;ukxKK0+&4os!=E13- z!J|`WDd!7AgyE(P&I@UPVF5gzWASi;uHz5`=&FtjqFrevjO(I`PDI}fJug#e=dlbOeDustWhCs)H4sLGWtcY7S$nLW+J zyTxrc<^M6Y0GwWvrYpVVnE}|7*D`?Ky?^&AY4w?#`al&S|Fzh6V5e8zQzq`nBmz@S zR{?aO*d}nt9vV{6UG*}L zXb$k-WB{sGfbElBcI=@dZ{om)oFM~GDii6JsnAfpDewN65Dwj!A(vs;)JWbc*h1#m zJEX1Ox!y+%asj901pYIT-vf2Q|37cf@}$4so(KP5-ku`VYYtO{2UVDluAWsp?-i$) z53@J$fjii&S6j?wgL$elZE=u$0oCjW7MNiY{ycDhR@5F>zl0vyj;q)>Gdu<7{M*xvpW?( z0P1X+&+j4MUf13WL7}x!7<57r!PS&lubK6~n0xE6DA&bNTg4!yq@+>0RHREv8l)Sf zkq!aLQMx1~C8fJdkWf0LTO@|=jsXTZ&!B7Xwbowych0%K^T+r8;dP1Q0Q1hg@jUl) z-?$fMj5on}C8NFKDtRkyHlw++$#g}6@^_us9*W6&Lqn$4_-CC@@d`yVWMd^0YIl&T z**{nZebhh^;;0O-BtQr-<%fdTHl9N8=p^ZAwk`6q^7c>2qeyA8mM6mCr$OVPoZ^^o zH3MJ{^{~Rm%V8pxP_>7mEVaLlnuHp_sEKcWsNNVspZ_Ai!D+n~56xL3bqx_yncSI* z%DP>I9E+tM#g&6)h&j{R2hCT?@ILi+MNu%kSj8(OVG)7hu5tpR)UHV{fG@#}*XT%L zjrv_t(L*7sKxrAo7G9wBurXpEcFyP5bI$I5J}#7B#w6&dpv(_Zh!> z&nNw3&L1q?&$mR%tqg5fdKOo!m$3~5&Nh16P=jol&sChay8wWBrm!ktf%rwe!KceQ=?q zJR&z)Y{T|Z1`q-)71O^3yOxzUtj)=1g(`#=E(u;oW-Ity>)UH9p-=^!8SW4^0dbcEmM7`;gSyXpWY6TO^BxriDA*uTqo&iDS3?_m{8&OnWo6D; zUW~5u3L}OZ%X0QHQk!`2lG?+*2p&}bn(!J&h2$?D7Fy3UU-5GJ{jUYP(+xS=g1!%J zRIYt&XH>fT{eYXiav}d%1y+uXd%<*p3Q^Fi;&?-Fu{J;M(|G#Mz6$FTYZRw435EzI zd@+F)>%=cPWf{|-(m??%Y>i2&*)Fo@pVuxebKM=WI3~Uhf&f1Jcp0 zUOg6Y#@kNhIQqx~;4r%fQwm6a6i1?e)psMhwvdI+XUH?(Ou;CGb?K+o1HL50{@?vV>@Cd=4hwk;;Jt` zV!tqYZVzuu=MH77Cu;msY;7^C$y{>0z?|g)j38;P*KVuA*v=4Z@DEj&qsQ11aXw;+ z>vU>j(YytF+N%z+r^oj(gqmjf0p8f&zpD2O-`K&skZ7g*N3iYd9OeHB#l~qzTP-A? zm^n5uWt>Bhb|b(7&vjJtgVQqJz3xToCmLJHI-vB`w6^zmqRgws7q)O9{85`Fc5&;+ z{SW8dxy<+nUx0VCp+UEu^4ln}(<5)L4^Aco;CaftWUh0^HT(sKt0@(@KV0A3vL2L< zYA4Y1`crlz!d$`p^C5hfxWSdf&gU!QR#@iG{!l(vCXq$Xhl`c>*Qb$y0japS56WdU zsA{Q=A2py~Ei9Ro1D+(?P0#USiJ~&iQ^$K6UYK5uaAmlq1jPi;&yybhr+{=Zw;S7b z9fSIYTbE*@4Vclsnl}D)rSJqaGV0`3A%xl9-E6xM z5uR*v!(}D{DL!3uHh+W;9)z5H=t(0ffsdZfgK_onOU2(0r&KSyac!LP8qBbMkA|25 z_yg|#7OLO8GWt_Il~%oWjrS&KEphTU)}_i(VBnQK{oHx=EEE)14pJ(0YWfz7I57DH zwZ8V!|Jt?DtMKu&H#onsT??y}Yn9&Q zgSrh*p=$96LwA=VXW>oav(Fn?$Tq66xd$|0n%aD_(prqrH32;8ho)>~s-%)u!$Ga! zRE?EhDf)N2&f_R*!)hv5YU|D0-a^HN-l-(u*TND9Xb8vfY7Yk*t&^6UllF@{ssH!g z<3|Y#V^vUQODAde$(uSrO-kJ`1S&uUdP`$6PtB$6r{4Shc*m0rIpmBc9(wTVy#QmQ zx)$dPHQxSDb(&T2i&}w5gcuP&$;8fkTrY;$e_DSB1BD0BrJp2j`@O5Lc}z{SwxY$0 z-rlWwsBxv|T&ff=2Oe?P({`ef{(4%`&dvD{> zQ?R^`|HRtKY4q^mG$QN=k`ZHLo#*~?L3DR)jN#;;63Y5>R#*@J&7zqer(795M`@5+ zo_+5Fno;sC%mL}<<8oXxsA)rTX!=N#GTLVSUeihMd3%lSdqL12zyEyl(Qewe6?*79 ziG<^NH7aB;<-hqrZ;m3cZeW%B1J! zau6hek(q?4+uQ#i$uy!21evD1^mj6C*5HavE0Nv}LgIX^yOg!HUHDRPpp+O_yD*D9 z`Dcfd^jHRwIA~`j^&NgP!=o%yBO_{YH`*~{9@jVHa+;^MrU0%p0bR_Uk<8`KBg{I` zK74^DJsI6&5}Qj8aQQJWbj7I+9_uKL{q&IQ_3%{l?(tB9ahJ0Onfod^TI@lzd#3p} zysvZ0)R{eUm5Td`ggwfbr$!}?eA(AoU#{~KvBLYUEmhs!@%s)0L1|zev1Z}u6SL9L z?Ay$*1wjE!5_kS{jQ$ow@{~zA8o>q6>-)x}Vj{ucM9&x;SeWBVB}U!>|4{WLKwc(? z1P=)_hJNf0MK-0RU_-0*?=WOoXG{cy%?D~Q>^gw&C{bGlZ)a; z10NJ1OINT6Ovr9fh*^}*WY;^qIP!z9>k+(IDqA3{VFIUPJweB9rI={R%)vd)o>+d` z?NB*aiAPri+snA#dIZ8Y9iHloNEi5thyZG11c$y1|qM#v+{=- zl$sV!39R5fo_$_kl<@k5u-x2Y2tH|s8&hyxK98X>HC+ii~I`fa8`%M6F z&iynffV2VNY`D{YtG{8kf~7qLfhu9FhmJ2p6wghpllk*<3&92qqJqgaJSWx{%*YV^ zc`@$+r6#1uh+T-2jN4oSNj^WZxOC3R)KSv1_~~W|>dx_;=T++OCWuZ&$b|;QHfV#}vcD6r*NBB1$>R z`}zjcUtM^I8Z?cAx8gNPQ6c3DIjoe=o*Y}Bsx?7%^5^nVt;N{=Q*khKm-FV?U7b(! zr#*T(0fMRs{7rAqJcoG0wv^(=;v>Ysr{EgzjKlJxBHl$)|D-6w|M^5iFi1(!)6kDM zBh;7C=O;&&MtzXV&D{Zo0E^0u~Z%&p6+bpxbRm`FCQyN zqCOeym%4Qx(WED0qD$&fZQ~M&CpK`}#H1pw_K4Pv)21<_ArLhTd)gy{>?r_0Q{@SO)#woZ=6+wc;0dPO!uN} z8{C2u3+}Ny9dE`G`gzz+d!wRL?r^}tqOYF$VD$(DSXY0Y-V62z9d|)vRAA+BFHSbj z4ZJv9>ln7h8IJzkaU6(O;s$t={h5y|h>&Kz;^RhK|HQ|k{@?O(HF3ZBxX(wsD+NLw zQW<#s9FMquQE==67aP+(XnIv@x6@Thv)0G*os&btI$+o&w94jn)!iy~fX zjgJM-Xc*}j!aI!yVeH*5H)FwV^S=|x!y|iGk&MNoL?Ksn91#Xjrtn%Nv1f9ehDP=P zF(@pT7S@#7j^W=FniHXe4{$8kwmKyd+U;#Dg313@wfSw@-XR}Ks`K!aZ{?uAXS)yl zqg9fsRLEuY)YZC^K?%yg63TV+!haBRr)!mN-lNOoBdCun~|yP48INRaTI` z3#mQLn`8d@MfFx_G}*kGs!(oLO%Eb4{oGqkacgV>EVSeW3RDRk3%q;q;w04Uaf=APJQV$p>Qw+Nn#FV z%>s>$&3pzqC)^9mAC=wVL8^@P%okQ1B6wacX5XP$yV(JnDgBQ!R55bPnF9gpmE;++ zejg^_nON=`^0$eYwO-<5m$*Qp=sQ0`Zl=i$b#OkOKk#COq_BaQRjP(+% zUEa*W+VI_SX`ug>nf};DL`s7;{hf+l_ekL=4W1(J!PF{U=lEAgt&9=o@3)^#iy&l!9r8K(^TE;IBc@<%JCx=vI8G~a1err`xaPF(k z+1$q?FXG$y6jnALJ;NFxAntaioYJv~$EU%aLh3&>3-V16d+aHI zIawAVM+0r~U;({*kQIN@vLDf(KHN&(v#jQEiPaHlRBzA3gkT9H>0@( zv4Z%VZVg`Dvb@8;SitE#s=ER18d(`SCX)$icW$OUyyuLXUm;C=VW(2WY~1>!<@mkY z_4hE5bu?UlpT@)P7^oUJnaz@`RB0;Q18D;#v2X%4n8jy}1{|RvNRa|Yu0?T70lFYG zOjGWK%X(+e-OD!%Oz9%|j&FPZL$`_g;ZrcqqnTzgI?%t)zPg2tt1QpCu7wB|SE)LB zsHmu zrEX{6wGExQtOU2)m*4A6l`9E1s1hb%Yw4pIMBDrx9N=Wi!*dGzwjxcdd*5Jbb;VoZ zeqS=iW421)Hu6(_|Ia!^w#|dY%mo4t*!38V#{86U?-wT_=-=DPk}7wCDC!k+PhU98 z|4}Syc(f||hA|B#Q?N*qamu-5`3(e8oUWxt5QRQ0%FkKBZz?YEIA*-ZRmY)R&Xgkg zOLoZOx_TfkeS~6j=|sPxPHK2J9-a!jERP7SzkWcK?#!1!iCkXI*<`XD@l;MtYzd5^)bCXD@ia^H3g!fo{JoG9d2JLUp3nhR+uz(H4e5@^?pOnr|bGB8em;b(6fM9|N2<|6!S2 z+8l-sO>i!4y55!O5*Z=7yrs};-J4SjcYRPJuDha0a;2p7pu`4w=z{ATR-MTT%?X+v z>WxcM)zhWrqOzVTri?!{yS3})9b9L|2B_|=Ug(=jR;uRX`>86aNuBF@PKJhX%JsM{ z9c4w4hCU;%b`j)fqtL*<0o2E{6E;Vdn$%62!Vln3t+ujP>(vO!q~CmIvVHou7b@YM z6>0J~_8rtI?-VTgMV;t=aEyn~&^B?1e>N|9vGmR+RqWO665ZXye3zmdclX3ISllly z3C_Kbe5)TY<<6ZSD7@hVW5S=pNg%1a{+{EhXE0%99H4jd9h|hyLt8RTFYd70Joav~ z#$mr#HcWE-<`wkYZ--^P?~W>3>GwYcsO@Y4jy*6On0C}wRzO#+F&ATA$l<)taM|94 zkcYDGJK>M$C}{P*vBSe!$9re{`|UC(eu-nce|;kwS-AUf7tqL!%2$IyFbX)7(o&o7 zeo&o9b4-kHZsCMSa*OaECQOV9A^eND>(=><3Evm;in$}Sr(B}g9ypyzQ4{ZYG@LTj z%H0PIH7|En$I5~FKxb(2p0>;GF3w4yVi+V-FxEO>mmnuaBnu;rkyNxWtXf{KtTH^@ z_twb~`y;37z*o^W4KnyDFKci#2haDnFdVEKPiN0qr0oht(IBJlWk^NR#U;3BW`q3q zo_d@MKaB)6YCjW1je4%a$zWB1wDpVoTG^|5>-}tI6U$bu2i!6GfX%g=D?2F)w_IGs zu@rK>8WOTVlCA0L=p4 z?UArBnY#>n3P6O;z$d8L;nNrtQc0Gg$Ud>lhk0W{H%&}K)jPFZWT5JssMA}zT=;UB zW|1NAQL%=6xA3Lg#Q^No)MfATgha~ssOnEDjy8;S()?_Ka#8KXxABE5aeTza zpH!E0A7O`wN=~T;7$-k0k4{{Q^qej?`TBIXO0`145c_aUrjh=M#BDcuvi81rLrS9aEb~EXKcj;1Dr{4?TGRd6t zN5umbT+u-kE@shF(}KIW&|;9jhFCRM7ddLUYu%p5Rw*3$Qpcj^<-K0o`$e5< zJFbabT$v(*V(@|X!A<(dmtCrlMAP8ddj7Rw9iN*t#N!?{9HoCxcmWg1*dG92;jY9# zcEDoZoPs9nBRM%FGmt+dlE+rU3t4D z^bV0sZ<|tBod^kU#;|1i2o~KrU{k<3J&dhrVm12I6>!XZ3;x(L}6ZWn*S3(n90VsymbW-nmPf1P*oXv*ik}@NybM=L`IYE^(JdNOvzK; zEp|QL{b;A_M9KeHGk>4$O)p)MiWy)Z*i~YCh6@feYHDgU@<)ii=PdKOKyDF?Cihaq zvH3&UD&DPq=mCdMWUP59Sdzjt4g<9B&>be_=7JMj_P{8gwb5l;m9BAv+vV&Vp)6A3 zdafQpRTFs)x`{>x^(MosCvh&*qdOtkLRHOgDQZLmYFJ^FIK@rmY_T`W`ECrQZP_j0 z9}J0~JcJ`IxUP^w%0mvmKaj%awVK4Y>h$l)J{0Vfs~EX6s#se98AlZvl%3NHOwy@V zun&IuzSgcz6fIi(*3@QS zk6d#g($vM5NFweRO?VOeSDKKh1<-`}|Dp-s)fL6hAu()~SRam zL0%0{$16PGJ~NXeeS}XZBpPu2QA*k8QhO08yD#sl6RJkWEBu*78^DZ_d zaCi(;F-q7im`+Sg+{nW7O9Bqld`1>dup%ki)B)S*5a1kJZcbdypFAz6dl{e@Ff3k* ztdaN%5!hmn?9WZdgWdj_85_f!=1}RZKQV?&PyTR40>;qk9~i@+KQV^oDu*h_TQo+l zQsnuku0t0*yUH4Tc!q!&w5#vX*D15^q)nEH%`BuaL`xsA-cLh?fK$NT$qQ{(@c!lE z!9r*tk7P}$Jfe!hK^MSMyyzc6$Rl`;Xck6(hh63@N|}?w+9w?kZ}xkdlp9PW6v>MP ze!{O>^e0oLd2~;DvdX6d=T2k@)s}lW@F*t`3^I;oU>kD!^&H&pxPzGHqkuCP?a(4y zl=A_PDM;$_>C?d}CnwQVz{UFuo$sf81Uj&xBzKC3alP|Bri&2?(`6y$6lu|8c$Hxi z!z-A59WQQ;fh4$Bfv|S|CuE4gt;sz{66q2jLB94BHE_~POS}7t!N!W7~gW& z7+-gDM}H@4)^#rJd{%*E;@)X0zbQ6nfyV$1GmO-%k0_OVFEukA(W)w9qGJ-R^J#~G zg!iEFHY0TX0~!iHl2%L9z_#yuW#_j`c1lpYVB#cchNO33}7Wx9=6 zC2YFK4eui7H3^?FDi8rh5Z`J-@Q4K&zj;Jqhd+44beAh0Q9$BcdZT|eQ|);g<{(R}9jaLVgTT3Kq1LzNQpgkYnW-o>I;A8+LJH+gWLB#RX+~F|Qg2f6L9}k)a>5cRVSPFt++>pGEl` z4|m_Dc7pcBs&YTLF)AjYco>eJxw?nnJ@1_VOfDB2*cXXJD)=Kwvs*1Xh3%TiH6BM= zh((NAo2m-R3{AR7Q7MXD<6pP@adYhYQ)2bJ)6=NbJ-8Cp@apk-4HK!P*+6wXLkGNlYW4#)Nr3;C{=$_2=^$FL%JMcHy3?C|9vRR5y6JE1|rRowslK$Lp zbEvnCWSdXycH7sA5q~%+?|jiJe|!CH+?_dxe(h^*Qn$4S^cuvH zF%7A!cj)mbV}6J2SWU_VY8hg1_8BtxiS007-8j}4J=D^!HQdtJk$T|(Kj-xkd{~_= z%$`w%7<^_jX#w_x<$RYA&Kyx;7L6$%6ixS`xE)mQInDFe+w3({Z$v(M!mwkmCB6jg zV$^+lYW&P!?4tYLeOC&W>iYA@H`iIvo@)%G`5oV2(PVG zbFmJqJ`{3#q_6kVDyUU$YWk7>%~(`OpTzsmDSV^jpva@sskcRDhelH+Aos=?UN_A!NKz_1>C0I3>?B7d#T{0KEse@ulA?lf zwFM+8q^O)G;)&EgzjNhQh$)RZoQ%`TRFtlWYSa{khJCPkv{C6lD5O-ukJJ*XTkm!O z>WbSL9|Ax+opr&>n+cu@F0lzm2cO62NTmvj^JRH!?2}$87>D=gT)6L9@*emj`6Sq* z$h;IU7CMMag_AX$CXVe#ghrulHh>7)5w4cQ+xKB-YVpFzhSq2=quR4N^{5=2UHbkK+!lNbt zi_j8(j?J0e{#Ri<+3xXP`}T4Yh|jE5EoRa7$E=JjZ=ZYHEn!@}8OW`Q!u}QuPjHG1 zs#z~1GWz0d8Iq}vOZP1^Ejr-i>XTPW1fH{1&j)26J3OT9$igoRGSJe&K78e>kd(Ak z3|iI><;@d~jpeG)sZ{m-9=aO2m#7XvqG3pQ^o@J>EH^!G&pq!B9{rc1_jR)j$>r)z zGxTI%ThH;%;rsA0)aL z?w4n9>k~S4v(+hUBN$1p|HeqF*Zl7p$xP(uiHAWO(MBO`N2i_7K!%Bp1b+PD0h2Le z`TwP_AVaTLl%ph)RoPro&jwUhFEzsL!gDcwi}jV{TqjmEO#+Cb^DT zebR3Qq4s2~JArmp_{R&-?_@^IV5PDYcMq0%GxsSdE0&JTi(@x}H%1;k2`F$&Qac!Y z2qN8V6qK3KUqV`|Up^=Z-~8Qd7n{9xM+K&&rc4;fp3w?ih6Ed(x}^z3_p4;Z_xHBK z#&t{Gg3~(JnBjako#<{4ttu23i+mmi!EDoxg%`bGHtM4Pb;*e@WCO>bljZlG3TaV2V%T9xd8OoZz&@oM^{G(W??vy)NSQ1f3Nx} zwuD9a#j`)ZgKMw1yXIdtbWBdn>>){KYXZ<$Tges0Yr}i2YW+!6A9PDOepXNP`|jok z=FZ&ea`h0;A;23@!gq|JQdTE0uAg(IR_XkKw(Tz23wAE($F*~}b+2npJ(s(0;%(BW znmY0@794Uy@IKm2Se!ozi%lVkTP^1_-&Nht-!a9efos6=F4C$jt#(-8BkiRqkV7OU z#He0sUFw_}Au5y=2xWhmbdZk=n@kwW&NB--%ny(lu7R0pkNpkKpPe2p6@w&PhfODB zhOgN7-@v3yliDxtccu`Sj%4&?6WM`E*#W21wfa~~r(pf@-@$jdS4~jcZBg6YSnCie2J7mY zdabnzpekJFz8%5rSbMi`G3!r`Lwioc=CmKF#aP?i?hJ#%C=et)X|Uu@zP4CwhWFB` z!t~5cVSGPzNZUmX37W1*x@^tX;6o4h1v}i(U*@;;Pe^_JjZfR;>qGdF5j;9rhbr#O z0!GC~dE4XDL&lu%$aGI0aC?H+u>Iq7$jp>lHM?yg{gAL0F=i#W}i?Bl)|+kNjSN9Y3o__g^#X zZ(Btav)RelwpQ2I*SDs#^BIwK^U+_u-<*xu23kJSP$aiPaCp6#e=u)Pe9)rdN;;+E z=SsQm0#-Bh@kkDC)p;4z)QQq`PnBD^8EC8POm~g@B;NQ4+zNE}sGq$0AQ~=%m6CX+ z4lMhrqm|w?JgcO|bf*D)yLAVW@yQ0$$YW|%#pJXpFVA&E+5GVMn3UTrdRqoL%yfaM zC8C1ui#iKy1fCIU$F~EqxuYvglK^kQYWe&7!+}pU(x1^uRFG{z&rlitRVCSZQE~Cu zfYqQX@|oY@`4s>8)m9%WGIBDYznCs;%ObcL-nq9D=ZGU|3AO^CWp>k?y7w9X6j$G? z`+HoSaczu!rRNW6;c>1R5ud<>-3k-*8NF{;R}0u%PLb-5>+`RVU=Pho?q=Lo#Cs|M^j?CyN-4 zH(uq0XM(g2`2KIHfBX9=o)&;KA-fDn1=@19U`LL{hC*Q0D0b(obcaYxh5U<|MWp$sXoLd zt7VT|S^PBCvZA&97Zds#Udzc3y!oY@w?(@Cw)g)K>13M~Kt-14z1fs7V$4tv@~Izx zI2CGP`%Tk;fKZj{kdfKoa13kxmb|kyn4m#&(B9)W?i?=+&<)i3^6h26#rjLSJJ)7s zB>#?ZlCIzv4w#p#GjX_S>0Ix#+wG)R)OeL%HDXq#&V-*Zl@(<#cYVGrnuB+;?+KAs z<>8=?R-7yt#@@l__>viKWl!H(P=Yh_s{B6jC@U^^@csV#7p6~pDo2+KwYN6M4;w8o zE-g5FR@qN0BNi_2o1&9lqT^~Fe)Q5=WsGrv1lqN|j33I6N-0?2`8txy8&|^fER6lI zV^vU>XU1@=&Y__WvY%F;+dse3^`M#1n-c&$q=Y*ZS|^K#34*3B+q~pN>yZv?_q*y` zqoYe*KwnZ}_~%2dpJ>yGlK}|k^Sy891w@O|)uoqx-Dt>rAIdO)|V zrg`;r1*|ax!sco@SeCTQ)f!lC@uGtdfS!Cq0w{m+*L&XN(}Iy=gFNal1rJ**gd7VNG#WUp;tJ^*Gc&UE#QzWi@}g|klZHI#tc#| zwAHKoONHI;Zu28_w_8o*v%PAu$@53$EivufBUa#LKQf{83jR2z=ak|C*JdGxi5KCqCOfql$EB~EwuWw0d(7UDeOU^ z9c}dt)CRT$XqN>V^S>=M(8Cia2*fIza%QvDC4C)_PyhiBaEI{*Ql zG||DaqWhDER?7W~l6{0_LpEeyQ}}cK8>fUG7H!yi|Enm+dl<~OaX0h7*HpZ0qf)s8 zz?CU$aexoYUk)3-m9%J=mj{Dy4=x&VtsnR_a{@0iX&%z;LBzu>f=VS6y zXHmw+z-eBmx^LXq`6WG4H{z`G*hL2@*Tb}W71Htng~@+iQYK5y_qoAl~mr?-oPi?~e=s*ZuI;s8y;Lf(I-JkRf4 zXa+s~eB1efDcWU3#0Ph6Sb}xSagqY6&e=Ohg{^ie&sY5sYsnXHeFU;<(IEH^HhbAy zR!H>lRXKrA23A~4KS{@IF6OXzmm6Zh(VP5hDW7MJC@@}PU375Q4$r6GTjVq}D(KnR zFF!DgzoaO{ZJ{kndC&VZF@Da?K4o$ph`esBrvvrPT5Tw#Ge6b3q`B1#IjUJ(ki+Iq zUzHC^L3LnAw@Qq0ndiOi40s#5vji!c8gh-5Ot~vwKJr|8I@TEK<0P`y;tK@i8EMR; zrW%8}bdN^&^(s}HYwKYb`IZddmqVqIUOJO}8y5a=li$B2k9yvqzXS-m8rE;d^ML=) zj3>2EI6cMxhVhI|@pgq4-QUuw-V!Qahi-AqtkhnnQ`V^Eqb>~p30SbcxO1^AQ{ z9jj~Jc>$9~Bu+0*dn%}3EKgWGipD5^2Fm9_2j)9(bxdI2zDy0PN`h*ZPc<% zFrw~?T%h?xWP=dOq7 zWzne@>uGJ5+5|tF_+6)&E5GMIGn2D6 znu&e5L&i@u6m)NUQB3Sh0>>k0GiJkg@5l}7wFUjI`vb|Gnz=r`7x$tZ0)}k z-WAolMP!QVG)*7H%OGy;pRPoq=BlY3xfqxjCq1`$bpL%@)lyrNjuzI z{7FZHmd^56S`i4o-uuQBE_qzP8?))*6ljpoE*(aVjaem654$?G=eCLU!%qvb`vMrs5qfLeF+iQzxMUaTeFAEQ84EI2WL z3EOQM1{G`>Mg}4(C8TezsO182eigZy7DbB?c2_^q@i{u<4jvU*Jvw3gY5k5d<|t5kX2XHQAH+lHO1ny!-Boua zVa@GskSeAVhTqln+Ve2+tU}(;PO~`nR=FTq`?64k%4^z9j=@5m=(L-+O8Rj8eE@}o zQJr~sOqNeCtRFC!4 zPL9v*!@3<%^0H=mG!JT0!d^)?+y_^g)2dVba;!IpwX1@4y^+Nf(ly&S&StWnsB1{0 z5DlY-#a`w=^mS531_dU*nW)*pfw*eQ%!0^)uZRS^EFuBVbmuAoKU>R;sMjNln09w*1szBBjR?gD5&I0_}%3vuZzMbDf4wu7&Nf%+caaU zusr*u{!A>{({|dBt+gKovJl-zQWSvFb0b~UFcnqfp@~|_dmxF>D>yJM$QX~;8u+$Z z7{!h9u1ptuN;DBM>?6!Ajlu{lP`ZKpNU0r$Y7{-d4o?BEL4()_8oBAdlVOQP+%?OG z;J)#a#|h%~Rr!&Lq4SwxET+tWXzna4_3d+b>Y;BSS<}p^arX{m#VRB6N%{!5%Bto1 z$l$f1cX2l@sK_F<7Yy>{>aAZ^axO(Uh*NnLr-NCk>>bv*Vv(cU5;iYNmMM^4% zN9*285I)Gj;WTByQGWEuVc2zWm4vE@hJ$(&z9w)+}IzM9*T1QC`TtE;5 z2u}RS;*?95Rrm>7?Dp(>bD%%wbO{P0+&Mu!6x-Ps+(RhUE+p*wHzsk9j5+UZoJd1L zLwic~l6R+vOw+HnF!GRw*K;VtrpciKgqgb=6Id1k4^lqiVV8}nMMtaTuHYJZtJ24+ zRej{)SG*a{Bcb{wiQ!}_H7{3@jv)#|)?_b?^!~@Q-TzvqCwna9B3<6l3S5Zh7Aj+E zDk|*ehr^jwoJ)@(Q?&=T9NWyz@|5ZB1)&ytrYgFy8$x|3BviR8b<3)s(t*r_F@~Mi zkHfas09Z3vwoSJn(7`)RIbFH-LAs0JE?mbNGGlv0~%<>$b*d0dB0i{hp)*MqnFVl<$S8C z;$NK3$5#w`+;Iy%K0fBDb#xT!oRb}H5t(#wTEI07oGKV&=Ujh%d#=W>Lo8oU{OHb{ ztV@2W<9e$;x^{fM!qW8185gN5)W+qtUjzwnt|ZDoW3G!SI|PTpKLQx-KDSjfF&z4K zHqrHzV&#fUy%TnT>rhwSc0+}@Fvb>7GqhJ@rcCc@U;!#-lE+WK=v(>B#>B$@Z5cfu z!IKB}7?9tPr)CP;E06u2SeGilnklI##`4cpUu67Q=lDPtbWQqZhy`Xj5>f}l_2r9? zKl^89=(@`=2|*6wczb6VT=vKJJZ@1nvP(vvphbb+Dk^r=)3A5x=4}&*RFl1{9!g(Q z%gm{#?cLiCaPb$P#!J{eEQ~@HEF}uqgkDj!=2%x0?eARt-zeITrqBL~qKz2)gQCss z?Zsg~@klN-vtV}E4eeXKP!bO&$v7KNq8`yK)FjlD)uBIh1LR`IoB827Q)`>9*W5R( zelDFKe1Cg!mJ^NwfyIF>m%`g3>=D;8NOLcN8-g{QW1<98J*j3z3 zcoDK6?=<-0XA*tAs?XP;a=|#Vn1W3A_qD%Oi*SzOj*NpOWaQa(|KxPwXwzQxP=^e!`o+lD3;E!s{d-_K>^V)t_^VSj>4htMFEw*#`g0^O{>K47e?2il1rYEyt9pW9^ zZgfX#LO-3KDeE3{W=m%}6;VeA-;Fz1_HPvl--*~wS`g-Z1T0yMqTwX&NR=x(G0L0W zFJvh{mtlg4WgPa8GQD>;?7GP*@b}76@3xO_Hve5TMt?pPVhGmwJ4tlDn!;}Um zAB7l=b)#$}f-&Q9W76F%&9R{_EkW$H-t$J^c8A>6jwTZ%-_`%@D{|}}4pP?A)jd0< zP#H5burx9=GfEuT$wu-+Muv!fhjoR|hKw)KQkZ*aT;1iLyz$DfL6E1g| zcut+#aVgX3YhW@3-i{Dls7>Xxfkhrur;G+JK{Xc*jfvgQuJ;yE zO^+*=b1Qp|XRbzXWD0|u-e4I#n5HmK$_v_c_gZWZG!(g|vb8E9&ze(Jp3)@jk zmQ~(d8Bh64*j#eku{mmpeNY)L=CU3&beLg)=BK|OYk4uW6>ndgtY&3M|GIbid2_LA zGk-mF?Dn}x`f5_MSgxmR8@136i|`$BGV891tpFWDFmMtaWHT>)egyA5i3t(2M}dgb zanJG=Aq%MV=o|FTlGaK1Dj3bYIqzNGf6vd~$(6Wi54r5QknV+Ju3kQ52zab=Tf;~2 zqEhIqyKfK$;bOk%b387$Z%opshpf%+`PwFWoxq8eoGSS6n;Utd;p(E@L)E&CSI1tT zEyZ67Un|PUIS78O292kCS53-g-2=C6*iod+P)n`PoKz(3**-pa7i_9i{$g1F^@mXr zYpg=!x(dC4VIl3#kB0~Uf85O zj=_p;%+b&9gCEVNoZk6ZFG`*nw==f7G~76ft2M@x1ZDOP49Kg>D+re!l^CZiv)Y*& zm1GrPAe}7V6}|NVh0420tQnF;w>^o?L@}sBz&d+2Ar@VlXuTn?!}c%cUV4+NnK4&}VJ8Z`bPw=j_lvlU50x9g5Fc2zJZiZ+wCZ(O8XE zk{gQ}vU|Fi0@igZ7$)1-l9?%Sn5xQi>rBTy$=Y%5hJ36LZr|k`Oxx~KZ+EPC2@RXp zfIo}HM}dZY>Vi-7Ig^FiKYTR7$ zLGRYvvDbDO01;6X2sIy9TB~f4tB}i1X4MQ~knH@Dk1S-n`e-5e=>2Q;>ge{!VjBDA z$KLl=7Dcm@+`1K0r^y0IAUA#rlqZx2zTI)Yn9#ey8xY|UWza-E!&X}G5o?!Zq8on) z-qaORV6_I<4zl8oXP~m|8YqZRfPn+{qL9(*V0d4IP3MRHW^y*vMhZTqsAEf%8@|QtQ~iy~7!O`?+}hl$6DD);VFPAUig5Wo4J^a`58|1Ew!|DK;Ks2j zBhjAr{<_RD=j#B3u2TQm}o2ddF$an3iR_7Ry6Cxly8`uy`l19`CjT)8L1)pakZ- z?>KdD^i3d7rZVO9elXZV(=U9u;NWB5eZ5#7E1AB_jU%vKvF&d2y^DK?95+6@7Aa_< zkrU&4Pz+k6O1=6XZj{ubs<3PIx~9-H_Q>{qKzI#yxUZlyv^J;zNxqM%@oSv&pik}Q z*13gDJ(Drf8JhPM){{t^Qkcr`eSB7WQu5q59=gL7{X?ZL`7Su}H<4F0^wX)~qw`2h zzvtn??5K;gx>1UR<14tNIujKYh0z&EIb=B?Z^46teD;n(0hT~e4|Ps(=vu8!muxGM zh5XP_l8fY~L2bs_(n?b54L#2{_%N6s_}UZwaVTs1%$}j6kyqm`O(7xF2D)}bn1wH) z#;fQF2_-b_@#7J_>^zweem?rx9omu&)7XQwnIK$;yHvzeUsA=xZqu$rKdkiacl&JY zUC=a>6UWT)eOQ@N-vJU;g|wMU^cK!58IJCGdBs2EgW9U8L0qP5Ra#3WLt zzP*8UVe~Px<5ql6FiCW`V~SGwkiN!~*MM|{zxNSzJ!N@3d zY={fNfi$`>Un>~hM^kOCZ+;%L=fMZ0C+hYMzUa5X-k{3Z5r;tP#DevusqXI*d@h=n z%Sgpmdaob=-Ny8Xcwf_~h=_AAij&8&kN_)#1yZ)#b+`%r9FtvzEvHdV%pVIkkyC=j z>Wu?KQ;g5_^in3tZ)fK0`2R23-a0DEHtO595owVw0RfTj29<6>x<#UI~yF4w|g0dvin``r83zwPM%P&S|2W9gCr;>Lf( z&zjU<_v1G|ho1;R(%!(Q($N>0Q=8F zkhu}tsDieIW!RP@4vh&nqn)hD-6Z+b_OFO6HN&_C@>SuYwHUNIHx8oDA?4*R;_u;x z!k_c|t0>7C#+(ZikLW_gQ3b#I*`nOOJYE>VTYGnEX5(9cNA_nROC}%|8i#{?Bf>zC zXVYI&Jf6aIvH#E_PM|P7PG?HM5~wqp9x-xnDzW9*+Yw*wU0g}H8=ZIqOXf8~8lWha zEk^t35H$VQ)$y|M_LgEU;9#OYe+s?2rZ3o4g(k!Aras==I~w)Y>YDw3N5pt<5pOo8 z@vGA>5GI?dr}T?j?57d~;Tn_1K-P#vs>q>os@`zOz%hY+)P+s12&?B!;PB=trk*CB z%K@Ef#GQc$0*Jw^=zqcGwID;<2l`y!h!$3Cj-A2{S0=s669S z){DhoA&(Xm&fc_+a0s=5G#>VU9O@>bUykTGCQyd&I__+~}4-47@I4CQe4JpUWHq=)HDp@!nWF_qTP{hH<9zB_DAL#IdJ#Gov*KYf7) z;d$H5W#}ieJuH*|yiQ-cBI(`KX2qn0H#q zyQx|Xd?1wZ?TCPT7cJ#^f=$`XyQ7-l7*C=2jSD8spUXGnXx?e1Ig?;1+)cen88m^x z)Ha8}%ZIDMwe$s+eFk+CW=Zx9Wk%(+NXR~TUm&1=PV3Il$GC5s*nI4(l&jziG90EJ z1Q%dlwnvj|2EC+snxOp(7g-6xWaH_?FW3hQ(N}*lRCM5*-n{$S6U*}q;= z?P(92nJJ-xnoGiqyzRA!^{f>GDrMlxY<=;ff&E#fVrHtMWlyx98IY#I7GomzESp(k|UVWa`M@l@q1M z{U=J+++)CksRY$6p&&@aL`JLwB({e=!>tkSs58)IJzK?QS%<6v#WkO^}L567)+wSMeuCzCG$X;7bm5tI?ur&0Su}E)Q=mZ616@ji|-Wt@xy1s z$W^UUV@}2bLZcKuNbK z80-oTvtp9?AB#d6=7JVf%G$X0I`%E)C)60+91wmqh*5iP)7sZ4s_$9la6q9E%2M!l zJ4@mcJzU*KlZGY6du3X4I|oKfV!`5*`^>;HTig5e!qNQE@_dNZ@WyE~Yeh8oNb3e! zQ_m~E(R6x_?y4X@HQ)Wnb@~&rbzP;gx#uxowoaq@p17{^o68cqyQ;KhA^b4Q>}8#n zYqEzC>ov`fs8<%GL{1is%-b^1hh;E5gXz91;_rrSjg#u<%B?br(~el_xvE%{N7>rF zKL?H^Uy%>ywyXJ09vRj-eTB>E7e+`P_2K~;lRB6%qUndyJ0}fQ&xFtY6JWKp$V(|W zHPubvgJyaL$qS*Ca!}eEJzy1V9fonMjZl;y!MfN>g=vhxmy&spEMiB z&eQU8JU`Y0*M~FO@bWpQ)QAU!|@)EK#Dq90#qQ5LZ z-nTSnK*29N>XssRi~)hAxQP3Gp2)M*$6|!rW{YOj`Qq`jeL6aEtv@)02|`DPu-jGg z@;sdAdT+S4J#`u*T~Lo_{5Dlymf%(1$gI(yqZB`?G)iCS0gC={(->nMj`_{^Y+&-y zOXBL_(?-(Z2rMv_a_D?${X;(p4?Laz4M6B2Vb#MK63Y?swjM3Zuk(B=hIx43%VFM| z76C9-1b4_EwrO)T|S!Kuji-b0^mFvV7=W>xHo2S`Ngq8B6n4dCZM~QV4Lx%Az z2L#os^1=_R(qc>W7g(y*2>s1%Ti3%x5hr~PzETWsfT?7ry{5H6S3NtAe)(@IO}FW=xdy{rdXZ4fZRZcF0MQdCDQL)_s~2w$=R6UWb9ah8c@_1dhyspY#r*gpv+A7Oyl!nYAXl)ExYPRcJS?`vbW^JB$0Sq*uk)he=Ic_`R71dRKCQGwCZSj|%*=bs@tRfxY#IU7mNwcn9p$W!f?!Y8CbTx6Y*9 zb`dGR<;`vXgnU){i8-2SJRh`aDsEDa_D{>+S5ofrjpMo*)S^_>FG6 ztql>fv+aS-+;`Lc7&=}9{)B{>baD!yWlU|s_wbDs0K-x1t1G=$$+!CP+)Hs-9C=C+ z3X5RwY;$BuE{nek#p7+wdSKEWRB|S)+3NKkiKDGV7l)F09C8>{F8;td6vvV1;~Y{J z^D$`-%!v-RyTA}>nxAt&dx8bpw|>G0Oj~T^7U4rZtz!F+g-assmWqX2zbW?1w?AA;UY5g^~u zjnU1wH9dv8<$aT0InNQqCf(8hI%$G}JmOm8t95H}ZYBGbhwxHL7SM36zaB~)@RTpR zZqG{ZKSZDB%4~RlK=5agJ|`FWyT$FTf(7N4V*=cqoSf|JU3=2CN8N8NHd4;QTSy6A zh2j2ns1X!`tGH^3!3D#cDTE{>)Dfoxb7=}NNvpwZaXf40p^uUta|vw(I#bF?z?A7u zb8cKX$W<&KpJ|9<7*=vrj$+u#?y=k{EVW8UZKLqdONU&je{0uA+$@Z+?0LMegugZR zZOuQiC#m;$1gf|uETs#sr_5yO@5+bL8UJCrh_%%#v2^x(v8?_RQ|Si<_-Oa8o5`y7qN7UCZzFK{;4*xB10ZI%wl`nHN9U#wCIGN_Mavxcvd^DNKJ z*WF!B4on@cRW2Uk?B36i>LKNh7rRnZA>8k_F&HRaNfhh>$J4i&cL=7jDcDn2&u5q+ z`g8mh{GCmSzvJuzXTFP0ExrKTb^G}6on{h{pD(|FDA=`Sd~P^`r7CangQG=^yjTG* zhR8lZrF6Y73I|F;J$jJ_>gT}y08wFJ2srAVpzwpJz1elz?`E0z;9~!Eb#}Yj8j^|aj7+o7h_|6u>^-P?Cw zbQP{G2=Opd)52MIM8T%0ZM|Ik&Dpif%E@|xbH;VjIW^!@!@P)XNaxDia8#q`GghNZ z5IN_a3+2+DV!{>lveM66v~US|rNgWEI(8{!a;m0cxV*VY?q`Z;Y_1IqrN!-71C56{ z7lt+6{|<4infycgtJ{Oab}t*O*C7!nXb?&ZY-upOr)%L@VKSSZjkZ zp8X`9UIxsq8IGqZU{u9Q**No(HDa}NH7gXhJ%88U?mew%6Z8c!3O(HJaZy-^EY4(q zm)#S(GZN>%DkypF@TY62!Re&tq-u4GtJX6VbJ4KlK_3HxuMN<0Pmc?+@88i>2$B(> zQ2C{aEg`XsL80{bG`wUKpO_#U1V>5G=$6wyiE@<#jGrc#Hj3WLjpP_ih|`0E#l^*zmPJo@omu&}H*DkM4wF0b0CsIr!!PV5e8*mH zq&|%V1c8B;e$cGy##WV2$1yQ^AasLyc5&h8a=5rWmay)Z9zOjW>w-+gZlz;dS0JdG zo{Fki(4GZ`U=!ztVYXP-|OJn;=INohP(=DLO*D$ z1D5S4GP|mpbo}`5<2W1E*MXkj+*>f8|7g72j~oQnS>8KU9iXMk{86n?UX)N(v;4aM z`@W><+}zwk^Rx{kh>wTr(z2?|U-%OUw1g~6Q+2bXnh0)bcviJ~dXNru?yauCxp7j} zA5A_Mll5zP&H5{1sG4?Z?^1(#+IwBh8K}CnSa@ubpzMbly>$tosNByzLrg~i`FdHa z{OCdETb)K2vi6gBe78*iUVj%p^1rl;O&A<9?*)UGu^xQsyl<@cX8 zFh0N^3q1J=Zm(Q~6u*oXZ!cE3Pey$(K0vw!ctPL8AAi$qGJ5(Q`0YLH2fG;k;F>`j z9ZvEXeE$1fy+ow}Z)V^tpk4ToyGE>DFTTL|2!4&w*R(oGu?kb z&wsoIz9xG9fBZvVXn(x6k!@VCsS?mGaVWTyQ%PEPE?79IRyU#(Te-v?ci0V;2+6Cj zt+iU8um8Gh+ARK%BSkz9Tm{Ir&dpWD4`q0JQL_obYNtl5%(*Ax^UCyC`@tDU0VX*W z`PGi?9!CLHklX>$d#7T#5NSksK{ym@;?(tQDJxMu|azE4`YUuKnc(#zyF%M){CdzfH9IoI>?i z_f%D#aj>dMeF#Dx)<5+_XBI3Bwzgl0^PlZ^dIULFw^YlQWFx+<+Pj%;=y$3~AhBSM zWnO`+o}iWbYYchm-cEv-+qn?)iTYYJyMu!**L%Y-YM>eB`0(^+e<9}+&`eL&o&H2@ zFUAac$R*YoGJm>b)>e8Qt{^jsu^dg5aF{af^V0dJ{c}k)|l5jE7iIz zOW|R@U#uLD3YgF~^a<6zWsqF@BU&=-4u4-M%KprbrR2UGPq6Lo>S~H$*?%0sda5zH zx6@%q+O4|-$1(|B>RE&!5DRoH-OVkp7cqr|uG~mWWBS*qRZEw!XTBQj@#!56f5qw^ z?>odgPCsRl_)jaOtmdsWgv~-66vdwNwv^3d*7Ix@dQJ?#RRm5KLy=|-*LYUM=$vcKe{jv?D;xV_t6C>>9 zHJ@kf1+kl5gbCxg9=`HorD*>FtVTd^?JgUiQHH3c>7;M-^vYuuq}K6WG0|&?EiKVdh_6;0(>&;};ibTqBcJ~1)MS*^7fnqj}j`ke^voD7&Y z5kKmF<9Zko=L^xz0 zM}A(*q|W%;k@{`3iQZB9H6XP3s|`dmTD+n{w0ij%DbvEF393=)y0H{Ne&f>zJDX#n z_spPVC58~;RB6C(P^K;-wIUV6h3=3e1=p28!ndv|C&UR+AksxU6B5>G?tb^C<&dm- zVF?lvkfPp7$=$u4+flSVQ|P0b)aVj|AHTI={kOZ`jEL=J(?;>Ix^ zKqN(g6py4xW_Xt5?D=1OpMx>i82xa;$*gH(WjsIO&^jk+2A`mbTHm*-ry?FJZT+wq z+xPGkHa*Ul)W-gwT5jHbMyDV-4aPyLO^(=>HmhU0DXVwW&rd+k+ zy+&-8^Uzv_Y+OAmKT0syDw?=e)pfnPk9mu<+MVBfh~0r8W-#c1$*)H_k9}K#`>yz} z-{Vy}5@q~|{=;cZdo;+Q#C6oEp7r*d$GCc+!$%>>%))#(u^QOqHZu<}_3^38mjboo zK7UT`r4#zJ{4)nZ=|q}Jw_a=NFaBX!E7zAGKj*#WI%ML<`^ zKaq6;1tMc8Z@4?7DjS=2n^sd$k_Q@uj$c|*oVZ~tQ$z-C*)sq&JiK7CbYAAw1N{(R z8YK$aRrW}YPpXs|}Ql2hp)zNjRSAsN4->A}V5zqrEoO9FCpaW0^LzVWrHPVh_#_ zz9J%)AY2R+j`rrQC6sGBde(dc2NBQyl7k}V$j0gJRvV3HxAz3v04H!peHUz7+KD|I zkl*c4JT82i()d;WRa&Ml5dCPeAia#WdIKr|#K=G2jm~pwAK5I)bCY%-ho51og1gVq zATuWGKDzj=Am$&YvBhXayNd8#GAd&<_m$WVI@(oieDJVa%7o$Ugs8-M;I+A zrR-NcX zO(j!SZ7c2+Xmei0YfC17{o_jJKbLnO*IZ6-od@2wOb!qr14*ExYvt-pz#}`!C^T1u zv4(Ht7c=d~qk`eSOjn&;7|(D=&$(q&DvIP{nhlQUlZ=$uN9$Xjz|o6rBevRou~yOpR*hxQ?)U0f~`+^u|_iE3mT@8E95SM3=J6s2oI21Y_D zuLO*K7_3_}n`p00&&SK4hj8(leC)_z2D^Q1Kc~Gt&xa0mB+(8tUk!yA$2dA_)%uz4H3s!wG=@cJ5Qnv*L3tc+rL zu3HIqvs6w;E3Rf6`THH?)CSl69GER3E*+raX3EnXxaQ*xpG)S{vGhL&hlEt2hv!KX zykbA{F8=8g^6N8pB3Wv@IX|(3dTy+ZDc9E|%Pap$N$IC&n78lwgE`cWoI)Ck;}(8S z)Y@M)Wg$?TUF<^q$xqQ; z!jUKVG`L1z#~ZUiytY)jKfSAxAL{;Czy76Fk9d}Bx?9ipQ6+k&@{1ll(zdLU1LA1H zq_$Wk{X=7!f7h7`VU-(cdfO(YWZ#lidQ@<|%vL{qd85l$ zZAtRg7%KfZtFNYD zAS1-DRabT@P^78lHJcP%TiAu!&fms?uU^p7VVQSPC035$L%QB_`Le~#BXoxAS}jO( z(Z$E(?@7RgFa(=_PN>E%$nache99qBk24%oSP*h;1X>E>*enwyb*29F9y@DtrF#D{ zt|Ps_x=4n=__WJPcL~h={OJ7Lri=?wa0Z4Cdp81S!W7`QTidQ`6?Www5=jxW#A6P9 z#9MaVH#R0CX|Hj>&F2Yzwwc%C!76>^mkNT$WRV<;A#@S1ubT%xD8nfAWJT=h z^54l`IvZ1>_ggvGgIl*xNGO>?-r>Jd6VCa#=Xt5X|CFYQyr#k2b{hHjPV9be?6%@7 zat!UEtwz@ILX9@lZv%kL`v$;QfYVDOuV&H?ogq2xDfr&4&s|fUD2IR2g3_0Ai_+*&n}WG!eSCa)@|2BZMp5y(3r0a4{2jnoy5Ndwh|{#FpWlj%00aGU2V-XES?n6e|v>SO7xlzG$F^jnC@qQPvl+?^(y!G-Mp>GH(nk&0!d4){pUi7x@N5%-14Ib# zc2-ZFkPwUwNLw)H^zocZOs-GP6$+gg5F7!9Sc7&_f5dMLE&KI9=8Bl%#Ci)do!2!g zPeQ}xJLM-1pTZ1EM@Yp<5?H%if^hhH&ahH~OG3M7?FccPXgbcvK4>pb6&by{tmEUL zMkZdOU24>_PuZg3PB_P-iVa?iiHEV~CN zK%4>`0`A1JFfo*Y%PBRYFg{D+NaVzv)}%r+t4YFf(BR}`e1+bJUW4Z6ftT6Osl|J* z{6kk|Lyshh1o^_p&^;Hw?Vnb=Eis3H4@=1Jy(AC;K-E!ywSDXp;SRx6?KNFT_|F+U zl+I@;z}-Zf>DD#;$5A)tS8bKiz09d#rl5&r*23q+^>gY_tq*&rT8K-mwF^G3b$SY#t#ylQ&n56uNQ*kh%U0!Jq&DnlPB?pGvs z;ggy6_3%FWw`6?_e1x2lkdQ-8`B(%o)JO6!Sg8W9t$2r zbuIcmAboh0XGmUQfN}oFYH!=Th2dr&<1pVUWB77U3nmAZGG1G+DaX=tRq(L>KdIm| zmo4>_Ld9mh=QRsd%i^7vL4+*9tje*ewmn-FfT-76Bk8p@>8(M8#YYC8<+!=OVh+Fv zK&P~=G$YmKTnbq*!nV$P6}eHb*y00Z8Yx za)Ys|0lTX{eHvcYBEA|@%PU{~|)#++p!lDPJu}1-U)^su+6J5+1bjomCIGP;6 zTR$0t6y+~(amk{O27bKNAj2=s?Lt##xaM?~MgQCw4f;JTQ8mpGvv{-Iv@LGw$BY<_ zIe%(56aI5>PX|?Kd-@OvwkxH!%Dp}Rm&$!ay%v}$*>F$gikO^x%i8=GmCFE7xjX+s zFzrE!_aA| zif4-}0i`)=eI+xOr16-@zJxFIrbf#w9Q|Gxs4Em3IUBjTM4ILX<#tOTf=Hd9^-B=S6om$5wTw<~TGS z6h&D+%(Lw|YMn>HQ-m@koI8sV5Qn1*09?(P7;JX$J$ICD3C2ry*fXqTq# zFOjbjZF@E=MA0B2;}PN$)>az4QO6;|0>NJ2%)J2Ma+}+Qu(!Bmz>um?EZ#eh)kO%s`c74p{{K_U+TBi z9k?ri`B(IV#;QtoQQD=@{3e3&X>P9iLd|+IFuZbP%fnF0C|G2$8`)5xainb&e5Hi# zzCt)4**y>dBV9eZF8mnZl#D6bvSlY9@J8FMX~cpmhZK;A)$?_nt>@v#+u65Y z-^s3Os)$Y7yZPmW^{6uU$^qdP0BLbOen&8UU4CG>$iCl3kzvYgNh>BOc~SXPu=NK! zwPgywNP>ZOcz*En6njofqgTkbinngqnhmeqN^Id?j~A-gu;>;O-mT6URftB}`$v0G ze_|YmxGb*#+~_?-yFYciTp``S9>>Qa5sC|zX&|P=NRH4gFk?H=yh%0-f*Lq9F22C` z&Eo3I-1cT{%Lr8Ah)xHec{9ozQ}v7X8#EvK92Jash`sDaDMD-uss(kV#BlRYB{6m)es~x7~x9)Y{iA2h@cXFGp4~%cLF^ybo_x zbbZqDq#czd>5H&ySLy=U_HUsvo}NUX%lXYheFTi--Ls zV%sod|Eub{@pDofXdHVRMcPR5xV0_Yv+=R9F$2zIModAte2YqwzeDaRkVhD}iV>I@ zV1y>agQ4)TcI4Sk6$G-mClVxljpn_0njJ*4`x zPi>`CnaM9pOXNyf8VBUjE?;BNr-x;&gdLU>q#S)98m1k+hg@S%N!soqR}wFZs>f?r zrO#Dms14OUg_pR}iMdv^#XDi6>&EH2v+Z0|&eG*3Lxv9y+MJZKjMY#To44MWFbPCV z&MRwcCc~H*q-!{{rvv~sNX-or96TgzDR2+Tgtn+Z4kN9-zE?P{`V4QMA|PJx6ZyE= zU8ID9rg&;liz;R<^$MA&?fF?*nfnc$znrSXaLm>j!(P$A01h%9--e3VrZhN>NY%5- zHgeaS07xNNB@*j|;drC&HT5_i1%8#_Nbj^Dgo+)Muc3MMa+7*o#ZzyNZvC+p=yQVt zo%{$;2wpt?boBJ|+c$`M0973D``lBb*5nb5o%T4;w!}6_>vCaV|KYM?-y@Bz;+FIJ zHrc668Uv+fzY2cmSw?iL3(ePvA`491LsmVVS?aN6Uj~HVF3%eV@oL&AP7aAzjwVL8wWk7{`mC|L8rhnI0Q%? zqhLzKS*{K*~uT2BPwz1{-8ve%8UHTfxeTgOCl)+JyQRquyPk z;{5>cq=(P6{!7e4}>_CD#=u`CH`kOD!q{U^)W>is~iahqW2-yaGJg z&MEbB&FO~%Gk8#i#xRru$mc}Bc{B%vjy}L~SpcgrxFO^GdRM+)fIV=lbsHC{ve|1b zxms}10R6=^U3#%_m*0l#rLT};hI(ihXe~eA2I7TQ+JNT4dC0zN6Ml*}ybk1)xX9uoeQ*8W z4AaIO(Y2b2)`;q1CBY=#>nTmxJ{L{sP9tqpg+Bx>to2FavO99=W^G-5O>gypZoSjX zs=dbSHz3#e{lS4<@LZ$Rnx>{T9O})*Z&a}uD4c*J5gdJrLDZc<2$}oP++gx{yJ$`g z@_vx-=IP1b>a9O-7!Yj1?uEGURtdFMnp}Ykg@8yg&hDipQFuGRLhY9GOoGbF>IKtq zAWFw`AjKE9?kY+wDvuX0#zROm4wK*m^kzWOI6gUR9zDuwzr)6p7nHsap>d_ScBQ^L zeP9`G=GeNe7eqh4#Ycs`d(n}07NF3vjgF36W<_`LrT)eAkMZVmE+vv`qu$B5Og8P1)N*uZZT+BGWA6@E$ zWmeB=o9}GFfZyjyghQ9&h}#b8`Nzo~#N9A+Bn=n^vcGJVsBCE4qw(r2<2jU2I?CqR zB%ywpMQB&%bN6B8vF!Zx)6v+r*v{l5S-N-hm6xNvq=gPbP8e z<2{oIY?}Wti7|m(+VTILNd$}}ZGIM|ib4zTUmqD}?^EHtBG-9WX_h7zHn7_674W_h z0rl5y7Q$Wlq38EElU=p0&d!eeRyN$np`}ArBghY8q3NP|t+;#T+v>((^7+g``p2vbz>E5@3)p~VJh~9#Za7y^-^UR!mM8; zVHlShfE87Gi>`tFxG?n+An~nQ4&M+eV8;{yaeyQi6{7kv4*(I==tn%&dpTaxD(uw0 zNphFX%Ioz6ipNBt00xmR%n*Nh#lojBHrPyEp50ucH=YK~DdpQA=FjxLRQNk1hQ?LysGS>L zGSez#Zr8Zh0YTgK>7Gl?w6N7Mj>nk@9LOSp$k{oWz_0GC6W>DM1OBB8eWE7u&MdQP zGtWulDqpxHSxz&h9UfAqdEL9!y5;xst9g#Z>)w7*G8>`n`p_bqILwT$jJZR1O(Ryb znlc?~U&pwNHER3Xjhk~>k#gAIDqxRIruEJL;0ta4@A<-4W*bbN{a;^SUODOA4T5=2 z5C!7_`&|vGXJ_{mBD`9n+Z2`_b{%%~1xgBK4LyGGb?hd4P9|b-!iRudd0($bb&No? zIKumELVKZs%tbR<_JpV02%ZfD6jZw~TP}&%HEnFaV&G7g zNyXHE2V_{>yjT4pFd{-n?@n#jxTkl}pmEs%#>ez-RGaALd;y4!Y&*JlOwDPkjBczjjX1*El!75|D8FwDL=fg571-%bwcVn0G`r2Oq?e9G_nB% zw+C1sf)~hx?YwWJ`<_GO*Vno2F}ADc_L*CaUHf>dT;j@j*bq*C22cKj34EjeGy4<3 z1Y)0bK1+CC6;gC89~u@GmA;Wwa1|m_?iuVC#8<>mFy+aAV*(NW7s9I*o$W_R z@M2SMS^;^m0#Cf>jdh^mf^d~~zS zp)PH7-703A)Oyh+ocl*;+&eh(omz07-N)&r6o2a$kU!HZU#en%*IMmP%Aey{?@e}rp z+;X|g@9bh3tm)t+4lU+C@26(jRZt8S6ak2fJmtm5`8dt{t*wO zS4lbG)ea`B)J%+AutH6g|2lR2sms>!;BI;OrzJ}S;qlhg<=zDTfF!x<2V)hnD5A+M zhRZIu@UzXI07ALDX1Q+KJ!)xnLrB)t9r!R-9q!;uji3}f$6t@1tV(?uhh0$gz3O#n z=v2sSkDz0~FKOJcNn(I=8M*EZE=%vv`PC&d3pv5nP`$qb9u^%*(n|#Oroh%MI01mH zWPJO@X;q90#mA+G;{nA(TfgUOS^*ZW#Im4>S;QX7{y!1DmoE0i18Yivi=Ch8+(6|) zwOU`5N?bJ`Ppec3)0CNgt+BO~4p)wX?FXHY&MguJ<3OOnAdFv{Mn)%4QGw+iqLWvc zYA~Zw;Ha4lqS)HMZ`+FuvtU!wz1QKn^4j+dpS)LoAgNscOP*I|)mqS6RuPxSwl<}g z{`u+J6T&QH$%<8ni%Zkf)8h*?i*It_VPAzvh=J=$kaLfTp4^^t+&gBVDbO6XDbu%R zy({6WbKbjG_yXSQV9}B%zC4vAs-WFO7sE3XAQAz#uH?qj;VBTxJv zIzN99RrSBKc0ZS5`9}U8`j2GQ9y}0u@_>G-#M*f^`#7)9(;NIx2$F#qvpd0 z0Ed^WVGGvN6it&N?&C85-+$z}N9McN7Kz+=G@`Z-z63mYFm=x)-QzZYX~75B!WFy9 z_f4&P(5K}Az|8&UL-+pwj>z3V$-nJiA}?|X2&hGXu$qa9>rYp0wlKb!WbWLoUrzh@ zh>Av#Cb1o!$F1=xv=9>!8y#7gJ?|oZfM5y|)>#m=vY@cB{iQA;>VgX?EQ`^!ahcs$a2I!OyK&k;Td4)Ma_+{e8sbudL`ueYlPUY$DGQ6~HI7Jh z(m23isHyQck*&(AWpq0Kt~qrBe8)*5XU43Qo@s#dr0Aip{NGHJ@@Bu9R;(dyX-zz zoe?|axwFeXyz_8~y(>QyY2~{X!P)XSQ^8UZc~CA2P{Z|p{L@pa>V0CtD$jxxNvG}~&uyWSWN_Vc}HxY+)c zlhrbRBP%*$NmH#BxOxEt*gA6BZoM?VFTs(Np7s|-KvJvQp_l^vX8azfD+dT$li>5Q zwk7xtGoo`t?eKSnVRtxL-T3y*H)==a#UolcDthi$1fVLIi@R}Cqr=BVFP|LT?&Ez0 z+0UP2qQpZggjGMjE@T~*p4|sh+=rQ-6}ytd=BV}HyR1YOJ7&aMOwL`3eD;LDLjA4* zg3B(V+h-NH?cTBWG>w&ZEYmM5+IN5^?;L!m7QzviKA<~TIhYrT=`3<+l}B|lC4469 zDtWhbU~`zTu*lL_fO&zG%#6Iu=8CX;3-sskNQx~j;2zC&fp3Nou;?|pZd=>|xMq^i z)p3M7mA>phnN%@`FaS+n6o`bn93E%bEHyaYOy$Lg&fvCG^IA`%Q~cYf58ZnkSSX)e z(~TiYiaqER9Lg3|%eeL

8fq~0=I!v4AnE;d}cbadUlY^=u7 z2yT6}jOD5rRDIq88bDM$yF`mG{l+U@$dnz8edJQt{~F;<5C-6X4*}1<+)ZJq77WD} z@!$c=jI`JXmA@d})o5|DPwY2rJ-wXM-1&=vA6t(VDptOen?o^~9uQKD+5jBWZH zp}1)7ynVKOXuB&;O@~nTSj)UXuV!oYqU71fW z^;}EPtJUHnXah0YhG(bWDwSF(6teVCBcKY>!}HY&h^I(MJlPOpkf+Y8>8)+S+}XE}8f9u|=UhBUcN4AaYD}bWF?^QrDByPFtw4 zG>!v%F*l;W{}%R_xBH9LH4#_F%kIz#i2y7OACP~*$$4h~XE~x9*-0QMKEHKvV6*ZP zzd+T~bEhESF$)L-qnX^pb>o|Emzw=qR^ei#L33y=WvGW1SdU+-4MaHY$J+7+OhUg# zW13!C=sq!a(KmrCt~m#*25}rydmkp`TM1O+?R@}VP^mxZ(#eWDC?fN5kLn&lC!nyxP(Ynru{7`7FhKfe>hjcv~ zDu}JXLI)n-#q6D8Vcq=whU)A+dFRbQWb8wH#Hq;(E2Rf ztDFUPD+|czEEv|3&jLwdw((}HrCev0B|x4;!w?#CXKfS6z6oboi^Xu)$Zr`@DvXqtGYo2+D%KzDfpy*g zfeZ=#mxVSn?q^q~Y4p3MO~kl&LOZqSt*NkKzM*#c?v{z1*wu!8W5WfY_j`D_6IqAM z-e}eha|;QoSzOn?EUH<&oZGE5vhv%qHh4L4Ph+;~)qML=2sR`!R#LrB5VO%pEJrn8 zs|=NKGoN3gC_4f&43Dq=2ebG@^QmvExUoMb`)xY(gHCSUyEt!j!a1q@F`GI$KIq zqK9%m=-u4hK%TGlH`x~o!SOeo-8{4H!Cp#rVk;Gi1|9vD3jz^K#yli-UPd$l(wU?o znKqZL0P&rVa~oya=%~`vayRqVG{$>Z(g^F5#p2~{h9GQtp=W=Ci*!3l*+Ll7l@@j$ zp*P1JULix{(R^J5w!V>eKADaxAo$(oKP6*85qyY&I@Ld;recR>4Ayg7W|`mpNC;9s zNbv168mln@W^X6beex5KYk4Xj=BH4d(OTpy)kh9DU4&}AkuBjufCbo>@1sqx}1zQZGVVsJ^Va*+1*0 z0!W+}KpEzFj7$}>#IIrroBa22UYK!5IJfpy${>I6U4fA@7h2nYmjh9!=((RwSo$gY z>f;1*rDDMqbWiAd)9H5Dy4Y#d^`juuo*#whwFW+k9I>tmaUT3{=@CUGS~H!5*7C_~ zP%c5shTl82nBF_F7&PXko$B5w1U?0?>kdBr?lyP=j|J9Is zf{A1(?N#`rx{daRc#!}DVD3EU@bMSJe`~%U`JIvQ+nkIW= z0sq6I!2AjKMgF<8ORNCO!DaJA_JM~C1?P|+(y@Ep?K3=RsVt>l@&;40+VZ0GRPPKN@8R9@$m|JB-g05$cs>)r}d1!*cE z0wO}_AV^nw6Qo0E(u?#SskN5tH$UAv|;QU3AQ<$6g@Gs9qTc4esb-=ydGEw?0t9tjLSEsd4gT-P`G9B|AP}km%1eey#Fwe!89M6-RfAEFGj=(AhHOdy zno4v}x|{AJ(l(@KKHrePyH_kR_Snb-$Z1A?Qo|kYtpyhN;zkOiLy{zNoUq21g%d?; zCap(9lFCgDx~V!EXFTPI4{zIL=B)u&viI(<&WTsQd*vnBFxJIss)R>vU0n=a9*i;E zY$rq>uuprU?LKH8lgejk(0Eqa^u*bq0`tjo;!xwN{ZcJ&xE}2FEKDsH>I?lt^Awcx zG5Ckw_8AkH_kk0CCfD>a)rRAJj>CuhOz}PMJ)!q#R=fmg`&dw^V5vm17cwC&BbC?d z8x7exw}FGRmHAJ0`TE>xf5lzIFg@8;1>iyA!jI~$-7RM}4c2v}kL>ipiq*wcKIXW@ zNm^)XIP}leW7*WKUPWQn%4?i(H%*bibmt`l4 zI%=aNvwyT5z^T`eqT?nvAG4-Z7fu+hw#gpwybedD_fdPGx?sVQtt+Q7}2C5V^IH)`bB6$jG=Xjo6 zaEsmLfn4n@pm|mK(m<^lozlq6YEU78M~mp;REmfZ&QDUv(46*xgCI)mP^w}ru1C|74D~d3(Ru!i1BWfKzrtFY00yd?IjBm0m{`dVE5;N zAbT$Z%oCtbZ4%L^GmPZW)P9Rwet}4+cb8Z+a-})>AlbV#TthMrmX*O6WHwwJJ+X$W zZr59nssxv?r*b5yYLQ0f(z)FSOjQKcWv_pAQF*MZv|HwnJEWqpiY(R*n&DbQ`IB%fMD<;uWx zgqLxICy^-19Z7*pMnNpYB$H86^d@`@++!TVOTQndI*-01x+yK2iZyV0Dk0L!k{xKf zhRI))Sam;6vo{sYp~>1sw|w_kj+^18IldW_nD0yN% zj31*s(vSed>K8pRo|!K7WG}Y7*-NWt1c!g2(y!JwQgB9dP=G3TNr( zk}~%532A5P8pqgD#cAi8k|J z#-f|7+Zr=INr)wv?~^{wJDS#+^RSQE zODi0>`JMiHUe9&z65phnm*(0n7R{_t{+3_(7~5mqUe!ac3WBrOnBo?sbstsJ9=AjR z;aCKjpacg=7~@p2&gHfjPu4@$e~WYdYneH)EHx5_SK{2B;Z&K;Y8tJ%NWc}M1ABaY zngL1|DQW9kx)h%~$&qiu#T{Sdb$qaT!dzmhiO`37K7P)jjbFpF-TPO*Q)IBks{ z_wxio-i+ie2uqk`9ZHJUBw<(3&qCez$M;yE5UUUt@rpwqG^?y*XDi<$0wdM@NH)(7 zGFm6zFj*$%CPeA2AOsQuJ9a+cJo#T`iC-Hd;HL(!<^SLqjb2lp;{nmf#ZQr3nvZ*4tf))A)6IaTbPVG5CF09 zYRjI}?5i4zWmD%lZJe^x*QJXLAC$#x~36@&w1vJO? z8YsdsQI;hVBXWjA_m$(Smy@}re|V6{_7IwMxZ(*G^HsmWQw}agJaGPglCb*jn2$6y zii!=(HhZ8%7-%ySCeXq3+rj(uO%V}{SxHARW0%O>UXx8nVOC@Py;mQ}W;e@xa+?yv z%82?X-~Iu6Dk$jn&S_g6db-De?U`^}%iBlyG;2@2l)#(c@M}LGK?83x>kg3@`ZYV+ z3OXwhBkQETkOx5_KFC(-$tYc zIw&4jBN~=n;d%P%>!Q((Nqg}lnj3>5M;BIRGgbX`6fEcmt|d8&@d^Zqv$8hgk|b*Q zvU&i}m}9uya12uIvp=yLD%s+0VW6#}fK>aP5r@`0Busl?(`EQMUE#EcohY9roP#>;+~usuLpmz zel-DC>oa-?*ce~YW@^*)LC$Nd%<8_CA2lJi1SPUQT3E+PzhhAcc!rJ6B~z$KlCN2+ zj7ik2*xVQ;f%!XzZ#}CuqT-FkR>+oQ-3JtKHjECJwP_dDr1P<>8yf_BdwalM25f{o z-8T5XnSc){@Wp1|hnclchaKw;<4U?$z+;g<)4jw9cVxUt*WOx8UBroe@Q3Q4S0e8J zR!X-^s`t~JyzvbHQehV}Lh@gZz0^Dy?%+h7e~q>?Fzmr?t(Cm#0xxUNO-$Z3R=3iZ zOw|zpa%@+as-M%$qo^zf^kvZQjlg}P>o&?Rd%c8ga&HpJoH)GnW@Pa}B!-e)6{%dSZzF|eWkn=65}aa;#U=l|iw*eaGrfiv$YllI`&y%Cd zib4JJ&p5EL0=BmbD}M1CR&j*CsR(gJjM4rSCV70%+WX@WYoE%Ci0OlgUH3Qd0oi^* zs_7cS)u6aB-0*}m0suDEzs|~0uI7~2lr6T4vx;r&%E6e286q`m9-bqS`2PVp5^}sDtHz z0PhK%%+(}Wt}U*ylA`vs;5K+Hi%GK7Vj`oeyxi&f_K(JU2S65dnKp4*S`l0Ze;jFTyS6^NYl`U#?bFeEn~>sEqUIk2 z!rk5`PC+h5rS8fy^dQKniKK`E^9&dbC^ej(v0vTVT<-+t*&9dU>otkQs~7w{EuHl) z(VDLpuGOKUA#mcC4wWqRot?gSk=6M6u`M%BjlZ}(M{m6fB%Z1P5OjWZ~LQSRoM(&x)peQCi!PvBi_n%?!U{i~1vGZ@no z>=-r6=2bItJJJqfMm9m-P+wDHB_M znLv&sWu`oNYlSlKbu5DOp)VVYv;iG6Iqv>9fsn-|U0v+d0&9CdH$GO3fi6)OBmE-P zrtuLSIL6Ot78=66@|*?m6{s9D;(i9vV&A>xy&BKD?0ZVij-b>nl)E^E1E&q3vR)Fvl!;h=3TPu1)yP=z~=1^k6T^=G)B(LWUA zg^Mc+vQD4!+wBChh+@P8VPw49h$1v`w{SM0W_mfL>E#b6D`O}a%KBC>Xkdz}s-!;%f&DFx*I&V3A!OI|tw3A~+#5G31!kjWF=e)pZ!2&VhvuGhf0d zX0vlTJI+7R?-K;t^xosfPVsDRD|J-Jmdx*$ZXQ}i`}@B4Tq9*?P?7g&=7gHX>RiiG zm0#QYFC1rTy(t=XOn#&D_>?dIkah6Dt->607yC^AM3kyx56u=Ei(wmVM!qodlq)RMzWG!iP)v9v-8U)ve z{pyvNT%lB*2L59{0pM&>!k11jR@peBSEs;D)9&u3QE{9MuHLwR7|c2RD;cDFdc^5+ zSRIoG713-r9DnNAe;EM#*w>FCixn~z$~U9gv11Sqg~(7P7!F_eTbBatsH2UkX-X(l zCoxR&QFLizf6Ww)gO*a|=AiUydt(w{P_jb_S=+{KkmDRb7`$Hss)P8OQnrs+Li8SNFNNB?jcf zB(T|ahGM+4v55PjTEwb-d|45_d5a<@-4pTh`u(DoOo|PxK*M#2b(e4%PW19}OEqmc5n_-_yiS zl*>O@cs`c@@r-hBW|ItygDyFYyb;F9(7)47-{b!D+CqD2tU)Al{3fS|c;(zvTtMy- z)U1o4MwamiZ>D{R%)~-a4}($#i%I(!^CH#q@P9EPg=)y6AdPkjZM{+*gUC`N#5CUs zw_jPKtI@-DKbC_>5|w>DRV3V7BLHf8p;&vDctfLYh+W`cv9bcQa)K)jdm-rUE!c*R zkf`X*gDQM7ZYJ+A@L4^SJtW$j1N;T~PH$HAsmA;!_qWI09@R7Pu!7@WF*2wrD8tCK z(hevXvX}>I6ydm`?cYs-Ti19LKcWY~NXU;g0KBobGfW;8dHwn74=^Ci@E!`$i4EJ7 zndmY>WZYe$PqAqrKU=(}{V2_-JYM+Yqt~I?U&9Mr0^v;#-}1o7WaJHn@x0+8IDKZHh%K;e}8kcGxg zd<{(^PHg0dA@cZd3fFq~T-n$IL>aZ)Xiep42Az%UkN4VMoI(9gR>c=;n8w`%=5C++ zjrQoi&NKPu{PaOV{;QA{>6aiaXh zWa=I-+X%&m>178KU|gfm5#83+@nK2ViFxNCp5%rdj#XuG4`M4~vmmvNY|jOvUK@3r z_D)FUDPb3MlFo@h6$wN@x5FeAAH9!{2Q{I?4$T>tR-G-MkMOiNk39$^%K@Ld9;>%} zs&}#WByn*YwW^31de9(BEnh9Ms1jgTVVTVc9tB;xzkv>TnyHnR0e}|YZSy?Aa=eO- z>4p|j7dkIj^gRQu*LHS5;6={z?&^f(%_Q^_E`0(rj;f9u4sW+0$;g%$lOowKtL|xZ z>?PY9-XAmIFv8?h)p;>rw6^rIt-~E?nQNzgP0hrQ*4?!MjQdblfeq{l;gxH`93s|2 zS=p-ts)|lUD05}rC9&0#3%waGgJwV`QsFu-<}&kYUQg_XN9N&v9ga*xBE zWWUqI_XYgJl_%nk_k}9F{1aX4ut>N4oNWLthVlvtzat%rcv*vZ%QwVYvPdW{C861w zop$q#;NfqQ`*R+-vE2GQX+I+uYv^j+^9v&-Bdin;&O$ft;np8#l#nu`h|^4-E>{^A z@uk5{qSb(^G${#%_Tfk07HXqI1(s``ZM5eCq*iFgSs_hBX0zMQ ze~1E$Pe5QAhTb)feneFx-mqMO_f<~PPd}C!gjrX7;Pm=FB}z3Vd*n~1p{`g zOR0D}YUfQ24X~{_#jjF6>^4jF^ml$kl)G@nJ9V?)8XX6$(hxn+*_9#0)C4euZ1Dny zkYQqgeA~p1{`pat2d50JM_ll8O)tbF6z!4`N;UsrsJEK4*d5sGQcW_9Rkx{U2E^IP zi!LrVPurdasrfk-@U0TaA_0Uv9*sX7B#+S>P6cL&r-rtSUKyZp1&GpYnkx~~*u+xF z1JTX6YUeQc6l{J^wWz+M<_W*$;y_c=$E)`9NFO2>tz3{QZ;iLgp-h=d4!B4U%%)S5 zlK`;B=2Lzbj_7JY6RVzMGsBU6(m@!wbV!Q6aCb&qOXt*F9wT@f*VE)8lXk0;Z_Hnk zkS+mhdqoVe7=JN+g^TnrM)$1Z<}2l%+OUecr%6W^jWb~6vW?%aGXuB>W7?~nJi19p zra=|FkpmRZeP71Hn)UQms`>2{zjnwz#rOA@DXF(Y@WeJfY|&~ealo$F>on1D7hLoN zz*E3Vzr0zvTBL}M&KJEoAV4!+#~^$@m?N^yA$w$07ktZFL)|j_kN=$lWelW>DnKCow~hF_?+s5F)5PzfryDLO%f5{r3m z^&=xr$r{=^RF6s>zwyStLKP0cK^B&u9<$HUV)Aly)N`L?IYwI@d{1!^=yW96)_Y~? zMnV;UTGWwKX6-bi+w{0{0RfeoSx!Ic@{I((GSo<^z8c4*dgP-rF43=fzmq0BrhMd! zSI(md0*Cq=04iFK#_WnQ%43|t-zSRND|n*qbA2Kg7Wu-lc-NQK<2HTO6tx|yB~Y!X zq~gV^LU798sr?zu?yr?sf`7vL)W2g-zLCqnxmQ~=BdtXQaO+D*8(pl9<=polG&oD` zxXpB??;B{gzu0#GP@fuTQLf_?%U5mUA$`tb9yRF|3wu|NsZQrQCbki}n`F!0de_CFl!5Km+3d@gYC)pf*BOxO^vO1@zw#|<~>GAV{LYq24I+-m=ei6!F7B@*jhqO;Uo7Ps(-fC`!~;t`mP z`nz5EA*H68b zC8U>K<;If#fUN&mx`!JAjuX=>>&ksyA>J!)YR`+g7c+8wO6=UUtB z)mHxIA;`+p3g>3%0tR^}cd8Jh~&NfPi&(Ht&N^ci<185RPGro|F@jQPLZpO)^cmY;Lx$9!9#wWH7LnXGcrE z*SXd5r`gG6#MD;`W~dGAINDXOW)3#4p#iV;B5pL0bHJG!trV{fLWr1ZSkfWTl^sl6 z9Po8E`2Nt5T7%!KD2QwhX%u;u)=E6{UemT`YtoO#-77y$_VYy_3UmK;Kx6I6%AyGP zWh$qhw@Nt}0^}3Gn8V*chmCgS@ClpOGIlH+p9MZuG=Hh7J-ej~wVA08PPKIgj4hF# zIA#x#PmsgUvMfHJN0X;o=;)G`5}?Y!sT(Xb(?!t;rLMSoHCl2P@2;VN{l$)1SG}vK z;mWL|QR{gH@V<0}3Fl-&i)8o>H16G%3RCxd&FcYVB{h;itmj>FOPKNFfC6BJUJ{a^^+72*}# z-|DRFZSit2;7oqSY^Oa{cpiAG-e1(+$>e~VxLZX&vRDKScbLD`6TG7-DaqhN>&h1A z%rOTu!D+P{o@~?#Q4BSph{uyw##XlSu_9mgc+U zKFszg%UZ*ey=`l!8*zO`BT|vsX?LfBYT>bX*^u{N0>T&86Wl=qnLgrYSOQyr)!f0 zsr0!HJ2}dV4VzGklFdDa!VM&5raNrVFF%Cl={Vx*h2N-e{^D3Dml+*o#D+N>m?A3I zjbx|FE1rfGt&yyDog1h-NN*ZZbC(^JQ2;$I#Mhs|z93%bO2cR>xE_=I)S{Pks%RSt zhCqpQ*V?N@(yiC)8;%?``Vj|F;`GTY`Afmj=6U7df@f${G?CXIv8J!XY&es* zP~$Tl_C~QagFO^f{ADFsKg%%ceebMQ`D~42x=Pm)j{(M9g&C+y1aJ5RY@+ZEXL|>D z7XDH){0(-}9_~yF0+06ep%a_z>1S9n^U0tq^R$uAW}&1MM!^%ooDW=GKcM zr{L3Bm&1p?Q@kSH@oM4`K$*!J4qnJXgJG;(KJdFNVq_ZNnYbxUe2vn&wqoPoENb7Q zn2SXIT|Fwfe?Z)ep5*!mKjNtB*^4?==n5N)TFWLhxFt@kp)GUm)=NPMME1_kqNw3n zO-uc)~2|J%eDearT(ehX06-|gNZ(Ff3*sku5uS8eUj zE?U^1+b3Jm$EO0^&L%Ey1k>s59ZlP7DF70Evvq#4@03S^ZoGCf#v#gy&4caavJhC7 zGEb|uBPyyB;PGd5vNX?XjDoGj(dZ13*(M#9QIQEtEzqvb7kb0VzJ>S@1{qTpAs*Rm{dZ^s`#Anzs7tjBsm=}n}9GID< zm1?6qb6 zLjYF%S_Cl=nU9pxk0a;&=}L5Ls=_3%z2&9>t5A05c-=mFMAnf_lW{CCWYgt5TVLxywlOP)m)$@}5| z;)ueL4w=-HLN3n!9HBnVJnb2cc5gmr@jy)rRhm>r8(b5=du_kw1*`|4k;lqOJLrcJ zw(%*z_<=Y107`VlAeX$Aiv?(w9t&ds%c1|ChyU+9@_&uT)zkkw{v3PBKJ1=Y^YDc` za2OVMSv_|PQ+G=db5~2?=^7soA1@~lFDEar7LTw9uYd^8a}W=Y2oDeDP4Mggse^-) zg{>9j|J)&xL}U->@a*avY|YI@%$=MeuC~@T?$@OM{p#^6XJ>oM$BK^TTpW-2cz9oM wtK5DC^r8889}7!2b5~nucUvdNYcjTO&i1Ad8rqa5;3L-*WmRP=q}~Sq2RF(BlK=n! literal 0 HcmV?d00001 diff --git a/vendor/github.com/rs/zerolog/sampler.go b/vendor/github.com/rs/zerolog/sampler.go new file mode 100644 index 00000000..1be98c4f --- /dev/null +++ b/vendor/github.com/rs/zerolog/sampler.go @@ -0,0 +1,134 @@ +package zerolog + +import ( + "math/rand" + "sync/atomic" + "time" +) + +var ( + // Often samples log every ~ 10 events. + Often = RandomSampler(10) + // Sometimes samples log every ~ 100 events. + Sometimes = RandomSampler(100) + // Rarely samples log every ~ 1000 events. + Rarely = RandomSampler(1000) +) + +// Sampler defines an interface to a log sampler. +type Sampler interface { + // Sample returns true if the event should be part of the sample, false if + // the event should be dropped. + Sample(lvl Level) bool +} + +// RandomSampler use a PRNG to randomly sample an event out of N events, +// regardless of their level. +type RandomSampler uint32 + +// Sample implements the Sampler interface. +func (s RandomSampler) Sample(lvl Level) bool { + if s <= 0 { + return false + } + if rand.Intn(int(s)) != 0 { + return false + } + return true +} + +// BasicSampler is a sampler that will send every Nth events, regardless of +// their level. +type BasicSampler struct { + N uint32 + counter uint32 +} + +// Sample implements the Sampler interface. +func (s *BasicSampler) Sample(lvl Level) bool { + n := s.N + if n == 1 { + return true + } + c := atomic.AddUint32(&s.counter, 1) + return c%n == 1 +} + +// BurstSampler lets Burst events pass per Period then pass the decision to +// NextSampler. If Sampler is not set, all subsequent events are rejected. +type BurstSampler struct { + // Burst is the maximum number of event per period allowed before calling + // NextSampler. + Burst uint32 + // Period defines the burst period. If 0, NextSampler is always called. + Period time.Duration + // NextSampler is the sampler used after the burst is reached. If nil, + // events are always rejected after the burst. + NextSampler Sampler + + counter uint32 + resetAt int64 +} + +// Sample implements the Sampler interface. +func (s *BurstSampler) Sample(lvl Level) bool { + if s.Burst > 0 && s.Period > 0 { + if s.inc() <= s.Burst { + return true + } + } + if s.NextSampler == nil { + return false + } + return s.NextSampler.Sample(lvl) +} + +func (s *BurstSampler) inc() uint32 { + now := time.Now().UnixNano() + resetAt := atomic.LoadInt64(&s.resetAt) + var c uint32 + if now > resetAt { + c = 1 + atomic.StoreUint32(&s.counter, c) + newResetAt := now + s.Period.Nanoseconds() + reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) + if !reset { + // Lost the race with another goroutine trying to reset. + c = atomic.AddUint32(&s.counter, 1) + } + } else { + c = atomic.AddUint32(&s.counter, 1) + } + return c +} + +// LevelSampler applies a different sampler for each level. +type LevelSampler struct { + TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler +} + +func (s LevelSampler) Sample(lvl Level) bool { + switch lvl { + case TraceLevel: + if s.TraceSampler != nil { + return s.TraceSampler.Sample(lvl) + } + case DebugLevel: + if s.DebugSampler != nil { + return s.DebugSampler.Sample(lvl) + } + case InfoLevel: + if s.InfoSampler != nil { + return s.InfoSampler.Sample(lvl) + } + case WarnLevel: + if s.WarnSampler != nil { + return s.WarnSampler.Sample(lvl) + } + case ErrorLevel: + if s.ErrorSampler != nil { + return s.ErrorSampler.Sample(lvl) + } + } + return true +} diff --git a/vendor/github.com/rs/zerolog/syslog.go b/vendor/github.com/rs/zerolog/syslog.go new file mode 100644 index 00000000..c4082830 --- /dev/null +++ b/vendor/github.com/rs/zerolog/syslog.go @@ -0,0 +1,80 @@ +// +build !windows +// +build !binary_log + +package zerolog + +import ( + "io" +) + +// See http://cee.mitre.org/language/1.0-beta1/clt.html#syslog +// or https://www.rsyslog.com/json-elasticsearch/ +const ceePrefix = "@cee:" + +// SyslogWriter is an interface matching a syslog.Writer struct. +type SyslogWriter interface { + io.Writer + Debug(m string) error + Info(m string) error + Warning(m string) error + Err(m string) error + Emerg(m string) error + Crit(m string) error +} + +type syslogWriter struct { + w SyslogWriter + prefix string +} + +// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level +// method matching the zerolog level. +func SyslogLevelWriter(w SyslogWriter) LevelWriter { + return syslogWriter{w, ""} +} + +// SyslogCEEWriter wraps a SyslogWriter with a SyslogLevelWriter that adds a +// MITRE CEE prefix for JSON syslog entries, compatible with rsyslog +// and syslog-ng JSON logging support. +// See https://www.rsyslog.com/json-elasticsearch/ +func SyslogCEEWriter(w SyslogWriter) LevelWriter { + return syslogWriter{w, ceePrefix} +} + +func (sw syslogWriter) Write(p []byte) (n int, err error) { + var pn int + if sw.prefix != "" { + pn, err = sw.w.Write([]byte(sw.prefix)) + if err != nil { + return pn, err + } + } + n, err = sw.w.Write(p) + return pn + n, err +} + +// WriteLevel implements LevelWriter interface. +func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { + switch level { + case TraceLevel: + case DebugLevel: + err = sw.w.Debug(sw.prefix + string(p)) + case InfoLevel: + err = sw.w.Info(sw.prefix + string(p)) + case WarnLevel: + err = sw.w.Warning(sw.prefix + string(p)) + case ErrorLevel: + err = sw.w.Err(sw.prefix + string(p)) + case FatalLevel: + err = sw.w.Emerg(sw.prefix + string(p)) + case PanicLevel: + err = sw.w.Crit(sw.prefix + string(p)) + case NoLevel: + err = sw.w.Info(sw.prefix + string(p)) + default: + panic("invalid level") + } + // Any CEE prefix is not part of the message, so we don't include its length + n = len(p) + return +} diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go new file mode 100644 index 00000000..26f5e632 --- /dev/null +++ b/vendor/github.com/rs/zerolog/writer.go @@ -0,0 +1,154 @@ +package zerolog + +import ( + "bytes" + "io" + "path" + "runtime" + "strconv" + "strings" + "sync" +) + +// LevelWriter defines as interface a writer may implement in order +// to receive level information with payload. +type LevelWriter interface { + io.Writer + WriteLevel(level Level, p []byte) (n int, err error) +} + +type levelWriterAdapter struct { + io.Writer +} + +func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { + return lw.Write(p) +} + +type syncWriter struct { + mu sync.Mutex + lw LevelWriter +} + +// SyncWriter wraps w so that each call to Write is synchronized with a mutex. +// This syncer can be used to wrap the call to writer's Write method if it is +// not thread safe. Note that you do not need this wrapper for os.File Write +// operations on POSIX and Windows systems as they are already thread-safe. +func SyncWriter(w io.Writer) io.Writer { + if lw, ok := w.(LevelWriter); ok { + return &syncWriter{lw: lw} + } + return &syncWriter{lw: levelWriterAdapter{w}} +} + +// Write implements the io.Writer interface. +func (s *syncWriter) Write(p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.Write(p) +} + +// WriteLevel implements the LevelWriter interface. +func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.WriteLevel(l, p) +} + +type multiLevelWriter struct { + writers []LevelWriter +} + +func (t multiLevelWriter) Write(p []byte) (n int, err error) { + for _, w := range t.writers { + if _n, _err := w.Write(p); err == nil { + n = _n + if _err != nil { + err = _err + } else if _n != len(p) { + err = io.ErrShortWrite + } + } + } + return n, err +} + +func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { + for _, w := range t.writers { + if _n, _err := w.WriteLevel(l, p); err == nil { + n = _n + if _err != nil { + err = _err + } else if _n != len(p) { + err = io.ErrShortWrite + } + } + } + return n, err +} + +// MultiLevelWriter creates a writer that duplicates its writes to all the +// provided writers, similar to the Unix tee(1) command. If some writers +// implement LevelWriter, their WriteLevel method will be used instead of Write. +func MultiLevelWriter(writers ...io.Writer) LevelWriter { + lwriters := make([]LevelWriter, 0, len(writers)) + for _, w := range writers { + if lw, ok := w.(LevelWriter); ok { + lwriters = append(lwriters, lw) + } else { + lwriters = append(lwriters, levelWriterAdapter{w}) + } + } + return multiLevelWriter{lwriters} +} + +// TestingLog is the logging interface of testing.TB. +type TestingLog interface { + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Helper() +} + +// TestWriter is a writer that writes to testing.TB. +type TestWriter struct { + T TestingLog + + // Frame skips caller frames to capture the original file and line numbers. + Frame int +} + +// NewTestWriter creates a writer that logs to the testing.TB. +func NewTestWriter(t TestingLog) TestWriter { + return TestWriter{T: t} +} + +// Write to testing.TB. +func (t TestWriter) Write(p []byte) (n int, err error) { + t.T.Helper() + + n = len(p) + + // Strip trailing newline because t.Log always adds one. + p = bytes.TrimRight(p, "\n") + + // Try to correct the log file and line number to the caller. + if t.Frame > 0 { + _, origFile, origLine, _ := runtime.Caller(1) + _, frameFile, frameLine, ok := runtime.Caller(1 + t.Frame) + if ok { + erase := strings.Repeat("\b", len(path.Base(origFile))+len(strconv.Itoa(origLine))+3) + t.T.Logf("%s%s:%d: %s", erase, path.Base(frameFile), frameLine, p) + return n, err + } + } + t.T.Log(string(p)) + + return n, err +} + +// ConsoleTestWriter creates an option that correctly sets the file frame depth for testing.TB log. +func ConsoleTestWriter(t TestingLog) func(w *ConsoleWriter) { + return func(w *ConsoleWriter) { + w.Out = TestWriter{T: t, Frame: 6} + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 0ce0cc1b..e4aa5ad8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -114,6 +114,9 @@ github.com/google/gofuzz # github.com/google/uuid v1.2.0 ## explicit github.com/google/uuid +# github.com/gorilla/websocket v1.5.0 +## explicit; go 1.12 +github.com/gorilla/websocket # github.com/imdario/mergo v0.3.12 ## explicit; go 1.13 github.com/imdario/mergo @@ -128,9 +131,23 @@ github.com/json-iterator/go github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-colorable v0.1.12 +## explicit; go 1.13 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.14 +## explicit; go 1.12 +github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.2 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/mochi-mqtt/server/v2 v2.3.0 +## explicit; go 1.19 +github.com/mochi-mqtt/server/v2 +github.com/mochi-mqtt/server/v2/hooks/auth +github.com/mochi-mqtt/server/v2/hooks/storage +github.com/mochi-mqtt/server/v2/listeners +github.com/mochi-mqtt/server/v2/packets +github.com/mochi-mqtt/server/v2/system # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent @@ -216,6 +233,14 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/rs/xid v1.4.0 +## explicit; go 1.12 +github.com/rs/xid +# github.com/rs/zerolog v1.28.0 +## explicit; go 1.15 +github.com/rs/zerolog +github.com/rs/zerolog/internal/cbor +github.com/rs/zerolog/internal/json # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag