diff --git a/Gopkg.lock b/Gopkg.lock
index 71e9e342cb..0b8c713152 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -24,6 +24,25 @@
revision = "2b7f4fc93daf5ec3048fa4fc1c15573466711a17"
version = "v0.8.0"
+[[projects]]
+ digest = "1:dce7126264112cb2357a02f845cbb4cb4428dbe499ff290bf391f826175fe983"
+ name = "fortio.org/fortio"
+ packages = [
+ ".",
+ "bincommon",
+ "fgrpc",
+ "fhttp",
+ "fnet",
+ "log",
+ "periodic",
+ "stats",
+ "ui",
+ "version",
+ ]
+ pruneopts = ""
+ revision = "fd8f4a7177e9ea509f27105ae4e55e6c68ece6f7"
+ version = "v1.3.1"
+
[[projects]]
digest = "1:4d581a0828b5c0f9e252edb5cd85f2c5c20809783dddedae5be94cc8ee26639b"
name = "github.com/aws/aws-sdk-go"
@@ -681,6 +700,8 @@
"encoding",
"encoding/proto",
"grpclog",
+ "health",
+ "health/grpc_health_v1",
"internal",
"internal/backoff",
"internal/channelz",
@@ -691,6 +712,8 @@
"metadata",
"naming",
"peer",
+ "reflection",
+ "reflection/grpc_reflection_v1alpha",
"resolver",
"resolver/dns",
"resolver/passthrough",
@@ -1007,6 +1030,7 @@
analyzer-version = 1
input-imports = [
"contrib.go.opencensus.io/exporter/stackdriver",
+ "fortio.org/fortio",
"github.com/evanphx/json-patch",
"github.com/fsnotify/fsnotify",
"github.com/golang/protobuf/proto",
@@ -1034,10 +1058,12 @@
"google.golang.org/grpc/grpclog",
"google.golang.org/grpc/metadata",
"google.golang.org/grpc/status",
+ "gopkg.in/yaml.v2",
"k8s.io/api/admission/v1beta1",
"k8s.io/api/admissionregistration/v1beta1",
"k8s.io/api/apps/v1",
"k8s.io/api/core/v1",
+ "k8s.io/api/extensions/v1beta1",
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1",
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset",
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake",
@@ -1075,6 +1101,7 @@
"k8s.io/client-go/testing",
"k8s.io/client-go/tools/cache",
"k8s.io/client-go/tools/clientcmd",
+ "k8s.io/client-go/tools/metrics",
"k8s.io/client-go/tools/record",
"k8s.io/client-go/util/flowcontrol",
"k8s.io/client-go/util/workqueue",
diff --git a/Gopkg.toml b/Gopkg.toml
index a38b4df44b..f62ad7259c 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -99,3 +99,7 @@
[[constraint]]
name = "contrib.go.opencensus.io/exporter/stackdriver"
version = "v0.8.0"
+
+[[constraint]]
+ name = "fortio.org/fortio"
+ version = "1.3.1"
diff --git a/build/Makefile b/build/Makefile
index b56958e65c..0fcc5e9c84 100644
--- a/build/Makefile
+++ b/build/Makefile
@@ -138,6 +138,8 @@ endif
GO_TEST=$(DOCKER_RUN) go test $(RACE_DETECTOR_ARGS)
GO_E2E_TEST_ARGS=--kubeconfig /root/.kube/$(kubeconfig_file)
+PERF_OUTPUT_DIR=$(mount_path)/build/.perf
+
go_build_base_path=$(mount_path)
ifdef LOCAL_GO
@@ -148,6 +150,7 @@ ifdef LOCAL_GO
GO_BUILD_DARWIN_AMD64=GOOS=darwin GOARCH=amd64 go build
GO_TEST=go test -v $(RACE_DETECTOR_ARGS)
GO_E2E_TEST_ARGS=
+ PERF_OUTPUT_DIR=$(build_path)/.perf
go_build_base_path=$(agones_path)
endif
@@ -229,7 +232,8 @@ stress-test-e2e: $(ensure-build-image)
-run '.*StressTest.*' \
--gameserver-image=$(GS_TEST_IMAGE) \
--pullsecret=$(IMAGE_PULL_SECRET) \
- --stress $(STRESS_TEST_LEVEL)
+ --stress $(STRESS_TEST_LEVEL) \
+ --perf-output $(PERF_OUTPUT_DIR)
# Run test on install yaml - make sure there is no change
# mostly this is for CI
diff --git a/test/e2e/fleet_test.go b/test/e2e/fleet_test.go
index f7445f5b14..f4932a2478 100644
--- a/test/e2e/fleet_test.go
+++ b/test/e2e/fleet_test.go
@@ -670,8 +670,11 @@ func TestScaleUpAndDownInParallelStressTest(t *testing.T) {
var fleets []*v1alpha1.Fleet
- var scaleUpResults e2e.PerfResults
- var scaleDownResults e2e.PerfResults
+ scaleUpStats := framework.NewStatsCollector(fmt.Sprintf("fleet_%v_scale_up", fleetSize))
+ scaleDownStats := framework.NewStatsCollector(fmt.Sprintf("fleet_%v_scale_down", fleetSize))
+
+ defer scaleUpStats.Report()
+ defer scaleDownStats.Report()
for fleetNumber := 0; fleetNumber < fleetCount; fleetNumber++ {
flt := defaultFleet()
@@ -713,22 +716,19 @@ func TestScaleUpAndDownInParallelStressTest(t *testing.T) {
}()
if fleetNumber%2 == 0 {
- scaleDownResults.AddSample(scaleAndWait(t, flt, 0))
+ scaleDownStats.ReportDuration(scaleAndWait(t, flt, 0), nil)
}
for i := 0; i < repeatCount; i++ {
if time.Now().After(deadline) {
break
}
- scaleUpResults.AddSample(scaleAndWait(t, flt, fleetSize))
- scaleDownResults.AddSample(scaleAndWait(t, flt, 0))
+ scaleUpStats.ReportDuration(scaleAndWait(t, flt, fleetSize), nil)
+ scaleDownStats.ReportDuration(scaleAndWait(t, flt, 0), nil)
}
}(fleetNumber, flt)
}
wg.Wait()
-
- scaleUpResults.Report(fmt.Sprintf("scale up 0 to %v with %v fleets", fleetSize, fleetCount))
- scaleDownResults.Report(fmt.Sprintf("scale down %v to 0 with %v fleets", fleetSize, fleetCount))
}
func scaleAndWait(t *testing.T, flt *v1alpha1.Fleet, fleetSize int32) time.Duration {
diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go
index 2719a1d8c9..dd7be0013f 100644
--- a/test/e2e/framework/framework.go
+++ b/test/e2e/framework/framework.go
@@ -49,10 +49,11 @@ type Framework struct {
GameServerImage string
PullSecret string
StressTestLevel int
+ PerfOutputDir string
}
// New setups a testing framework using a kubeconfig path and the game server image to use for testing.
-func New(kubeconfig, gsimage string, pullSecret string, stressTestLevel int) (*Framework, error) {
+func New(kubeconfig string) (*Framework, error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, errors.Wrap(err, "build config from flags failed")
@@ -69,11 +70,8 @@ func New(kubeconfig, gsimage string, pullSecret string, stressTestLevel int) (*F
}
return &Framework{
- KubeClient: kubeClient,
- AgonesClient: agonesClient,
- GameServerImage: gsimage,
- PullSecret: pullSecret,
- StressTestLevel: stressTestLevel,
+ KubeClient: kubeClient,
+ AgonesClient: agonesClient,
}, nil
}
@@ -201,6 +199,15 @@ func (f *Framework) WaitForFleetGameServersCondition(flt *v1alpha1.Fleet, cond f
})
}
+// NewStatsCollector returns new instance of statistics collector,
+// which can be used to emit performance statistics for load tests and stress tests.
+func (f *Framework) NewStatsCollector(name string) *StatsCollector {
+ if f.StressTestLevel > 0 {
+ name = fmt.Sprintf("stress_%v_%v", f.StressTestLevel, name)
+ }
+ return &StatsCollector{name: name, outputDir: f.PerfOutputDir}
+}
+
// CleanUp Delete all Agones resources in a given namespace.
func (f *Framework) CleanUp(ns string) error {
logrus.Info("Cleaning up now.")
diff --git a/test/e2e/framework/perf.go b/test/e2e/framework/perf.go
index 0786a741ef..862b67d55c 100644
--- a/test/e2e/framework/perf.go
+++ b/test/e2e/framework/perf.go
@@ -1,25 +1,35 @@
package framework
import (
- "sort"
+ "encoding/json"
+ "net/http"
+ "os"
+ "path/filepath"
"sync"
"time"
+ "fortio.org/fortio/fhttp"
+ "fortio.org/fortio/stats"
"github.com/sirupsen/logrus"
+
+ k8serrors "k8s.io/apimachinery/pkg/api/errors"
)
-// PerfResults aggregates performance test results.
-// The AddSample() method is safe for concurrent use by multiple goroutines.
-type PerfResults struct {
- mu sync.Mutex
- samples []time.Duration
+// StatsCollector collects latency and throughput counters.
+// The ReportDuration() method is safe for concurrent use by multiple goroutines.
+type StatsCollector struct {
+ name string
+ outputDir string
+ mu sync.Mutex
+ samples []time.Duration
+ statusCounts map[int]int64
firstSampleTime time.Time
lastSampleTime time.Time
}
-// AddSample adds a single time measurement.
-func (p *PerfResults) AddSample(d time.Duration) {
+// ReportDuration adds a single time measurement.
+func (p *StatsCollector) ReportDuration(d time.Duration, err error) {
p.mu.Lock()
defer p.mu.Unlock()
@@ -29,37 +39,77 @@ func (p *PerfResults) AddSample(d time.Duration) {
}
p.lastSampleTime = n
p.samples = append(p.samples, d)
+ if p.statusCounts == nil {
+ p.statusCounts = map[int]int64{}
+ }
+ p.statusCounts[errToHTTPStatusCode(err)]++
+}
+
+func errToHTTPStatusCode(err error) int {
+ // crude translation from 'err' to HTTP status code.
+ switch {
+ case err == nil:
+ return http.StatusOK
+ case k8serrors.IsNotFound(err):
+ return http.StatusNotFound
+ case k8serrors.IsConflict(err):
+ return http.StatusConflict
+ case k8serrors.IsUnauthorized(err):
+ return http.StatusUnauthorized
+ case k8serrors.IsServiceUnavailable(err):
+ return http.StatusServiceUnavailable
+ default:
+ return http.StatusInternalServerError
+ }
}
// Report outputs performance report to log.
-func (p *PerfResults) Report(name string) {
+func (p *StatsCollector) Report() {
if len(p.samples) == 0 {
return
}
- sort.Slice(p.samples, func(i, j int) bool {
- return p.samples[i] < p.samples[j]
- })
-
- var sum time.Duration
+ h := stats.NewHistogram(0, 1)
for _, s := range p.samples {
- sum += s
+ h.Record(s.Seconds())
}
- avg := time.Duration(int64(sum) / int64(len(p.samples)))
+ var rr fhttp.HTTPRunnerResults
+ rr.RunType = "HTTP"
+ rr.Labels = "Agones " + p.name
+ rr.StartTime = time.Now()
+ rr.ActualDuration = p.lastSampleTime.Sub(p.firstSampleTime)
+ rr.DurationHistogram = h.Export()
+ rr.DurationHistogram.CalcPercentiles([]float64{50, 90, 95, 99, 99.9})
+ rr.RetCodes = map[int]int64{}
+ rr.ActualQPS = float64(len(p.samples)) / rr.ActualDuration.Seconds()
+
logrus.
- WithField("avg", avg).
- WithField("count", len(p.samples)).
- WithField("min", p.samples[0].Seconds()).
- WithField("max", p.samples[len(p.samples)-1].Seconds()).
- WithField("p50", p.samples[len(p.samples)*500/1001].Seconds()).
- WithField("p90", p.samples[len(p.samples)*900/1001].Seconds()).
- WithField("p95", p.samples[len(p.samples)*950/1001].Seconds()).
- WithField("p99", p.samples[len(p.samples)*990/1001].Seconds()).
- WithField("p999", p.samples[len(p.samples)*999/1001].Seconds()).
+ WithField("avg", rr.DurationHistogram.Avg).
+ WithField("count", rr.DurationHistogram.Count).
+ WithField("min", rr.DurationHistogram.Min).
+ WithField("max", rr.DurationHistogram.Max).
+ WithField("p50", rr.DurationHistogram.CalcPercentile(50)).
+ WithField("p90", rr.DurationHistogram.CalcPercentile(90)).
+ WithField("p95", rr.DurationHistogram.CalcPercentile(95)).
+ WithField("p99", rr.DurationHistogram.CalcPercentile(99)).
+ WithField("p999", rr.DurationHistogram.CalcPercentile(99.9)).
WithField("duration", p.lastSampleTime.Sub(p.firstSampleTime).Seconds()).
- Info(name)
+ Info(p.name)
+
+ if p.outputDir != "" {
+ os.MkdirAll(p.outputDir, 0755) //nolint:errcheck
- // TODO - use something like Fortio ("fortio.org/fortio/stats") to
- // generate histogram for long-term storage and analysis.
+ fname := filepath.Join(p.outputDir, p.name+"_"+rr.StartTime.UTC().Format("2006-01-02_1504")+".json")
+ f, err := os.Create(fname)
+ if err != nil {
+ logrus.WithError(err).Error("unable to create performance log")
+ return
+ }
+ defer f.Close() //nolint:errcheck
+
+ e := json.NewEncoder(f)
+ e.SetIndent("", " ")
+ e.Encode(rr) //nolint:errcheck
+ }
}
diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go
index 9be424d0f3..8eae1d1de9 100644
--- a/test/e2e/main_test.go
+++ b/test/e2e/main_test.go
@@ -39,6 +39,7 @@ func TestMain(m *testing.M) {
pullSecret := flag.String("pullsecret", "",
"optional secret to be used for pulling the gameserver and/or Agones SDK sidecar images")
stressTestLevel := flag.Int("stress", 0, "enable stress test at given level 0-100")
+ perfOutputDir := flag.String("perf-output", "", "write performance statistics to the specified directrory")
flag.Parse()
@@ -53,11 +54,16 @@ func TestMain(m *testing.M) {
exitCode int
)
- if framework, err = e2eframework.New(*kubeconfig, *gsimage, *pullSecret, *stressTestLevel); err != nil {
+ if framework, err = e2eframework.New(*kubeconfig); err != nil {
log.Printf("failed to setup framework: %v\n", err)
os.Exit(1)
}
+ framework.GameServerImage = *gsimage
+ framework.PullSecret = *pullSecret
+ framework.StressTestLevel = *stressTestLevel
+ framework.PerfOutputDir = *perfOutputDir
+
// run cleanup before tests, to ensure no resources from previous runs exist.
err = framework.CleanUp(defaultNs)
if err != nil {
diff --git a/vendor/fortio.org/fortio/.gitignore b/vendor/fortio.org/fortio/.gitignore
new file mode 100644
index 0000000000..2ed10b0880
--- /dev/null
+++ b/vendor/fortio.org/fortio/.gitignore
@@ -0,0 +1,15 @@
+coverage.txt
+*.bak
+*.json
+test.profile.*
+# binary, in case of go build .
+fortio
+.DS_Store
+# Avoid accidental changes to vendor or Gopkg.*:
+vendor
+Gopkg.*
+.idea
+# certs for testing
+cert-tmp/
+# brew binary bottles create a .brew_home/ dir in source tree
+.brew_home/
diff --git a/vendor/fortio.org/fortio/CONTRIBUTING.md b/vendor/fortio.org/fortio/CONTRIBUTING.md
new file mode 100644
index 0000000000..3a20e4197b
--- /dev/null
+++ b/vendor/fortio.org/fortio/CONTRIBUTING.md
@@ -0,0 +1,8 @@
+# Contributing to Fortio
+
+Contributions whether through issues, documentation, bug fixes, or new features
+are most welcome !
+
+Please also see [Contributing to Istio](https://github.com/istio/community/blob/master/CONTRIBUTING.md#contributing-to-istio)
+and [Getting started contributing to Fortio](https://github.com/fortio/fortio/wiki/FAQ#how-do-i-get-started-contributing-to-fortio) in the FAQ.
+
diff --git a/vendor/fortio.org/fortio/Dockerfile b/vendor/fortio.org/fortio/Dockerfile
new file mode 100644
index 0000000000..4078ef63a9
--- /dev/null
+++ b/vendor/fortio.org/fortio/Dockerfile
@@ -0,0 +1,33 @@
+# Build the binaries in larger image
+FROM docker.io/fortio/fortio.build:v12 as build
+WORKDIR /go/src/fortio.org
+COPY . fortio
+# Submodule handling
+RUN make -C fortio submodule
+# We moved a lot of the logic into the Makefile so it can be reused in brew
+# but that also couples the 2, this expects to find binaries in the right place etc
+RUN make -C fortio official-build-version BUILD_DIR=/build OFFICIAL_BIN=../fortio_go_latest.bin
+# Check we still build with go 1.8 (and macos does not break)
+RUN make -C fortio official-build BUILD_DIR=/build OFFICIAL_BIN=../fortio_go1.8.mac GOOS=darwin GO_BIN=/usr/local/go/bin/go
+# Optionally (comment out) Build with 1.8 for perf comparison
+# RUN make -C fortio official-build-version BUILD_DIR= OFFICIAL_BIN=../fortio_go1.8.bin GO_BIN=/usr/local/go/bin/go
+# Just check it stays compiling on Windows (would need to set the rsrcDir too)
+RUN make -C fortio official-build BUILD_DIR=/build OFFICIAL_BIN=../fortio.exe GOOS=windows
+# Minimal image with just the binary and certs
+FROM scratch as release
+# NOTE: the list of files here, if updated, must be changed in release/Dockerfile.in too
+COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
+# TODO: get rid of *.bak, *~ and other spurious non source files
+COPY --from=build /go/src/fortio.org/fortio/ui/static /usr/share/fortio/static
+COPY --from=build /go/src/fortio.org/fortio/ui/templates /usr/share/fortio/templates
+#COPY --from=build /go/src/fortio.org/fortio_go_latest.bin /usr/bin/fortio_go_latest
+#COPY --from=build /go/src/fortio.org/fortio_go1.8.bin /usr/bin/fortio_go1.8
+COPY --from=build /go/src/fortio.org/fortio_go_latest.bin /usr/bin/fortio
+EXPOSE 8079
+EXPOSE 8080
+EXPOSE 8081
+VOLUME /var/lib/fortio
+WORKDIR /var/lib/fortio
+ENTRYPOINT ["/usr/bin/fortio"]
+# start the server mode (grpc ping on 8079, http echo and UI on 8080, redirector on 8081) by default
+CMD ["server"]
diff --git a/vendor/fortio.org/fortio/Dockerfile.build b/vendor/fortio.org/fortio/Dockerfile.build
new file mode 100644
index 0000000000..cac67111f0
--- /dev/null
+++ b/vendor/fortio.org/fortio/Dockerfile.build
@@ -0,0 +1,36 @@
+# Dependencies and linters for build:
+FROM ubuntu:xenial
+# Need gcc for -race test (and some linters though those work with CGO_ENABLED=0)
+RUN apt-get -y update && \
+ apt-get --no-install-recommends -y upgrade && \
+ apt-get --no-install-recommends -y install ca-certificates curl make git gcc \
+ libc6-dev apt-transport-https ssh ruby-dev build-essential rpm
+# Install FPM
+RUN gem install --no-ri --no-rdoc fpm
+# Install both go1.11 [go1.10.3] and go1.8.7 so we don't become incompatible with 1.8
+RUN curl -f https://storage.googleapis.com/golang/go1.8.7.linux-amd64.tar.gz | tar xfz - -C /usr/local
+# Newer go have no problem with being installed in random directories without requiring GOROOT so we
+# leave the old go in /usr/local and put the new ones, despite being preferred, in a different root:
+# No report of issues with 1.11 so far so skipping 1.10
+#RUN mkdir /go1.10
+#RUN curl -f https://dl.google.com/go/go1.10.4.linux-amd64.tar.gz | tar xfz - -C /go1.10
+RUN mkdir /go1.11
+RUN curl -f https://dl.google.com/go/go1.11.linux-amd64.tar.gz | tar xfz - -C /go1.11
+ENV GOPATH /go
+RUN mkdir -p $GOPATH/bin
+# We do pick the latest go first in the path
+ENV PATH /go1.11/go/bin:/usr/local/go/bin:$PATH:$GOPATH/bin
+RUN go version # check it's indeed the version we expect
+# This is now handled through dep and vendor submodule
+# RUN go get -u google.golang.org/grpc
+# Install dep
+RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
+# Install meta linters
+RUN go get -u github.com/alecthomas/gometalinter
+RUN gometalinter -i -u
+WORKDIR /go/src/fortio.org
+# Docker:
+RUN curl -fsSL "https://download.docker.com/linux/ubuntu/gpg" | apt-key add
+RUN echo "deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable" > /etc/apt/sources.list.d/docker.list
+RUN apt-get -y update
+RUN apt-get install --no-install-recommends -y docker-ce
diff --git a/vendor/fortio.org/fortio/Dockerfile.echosrv b/vendor/fortio.org/fortio/Dockerfile.echosrv
new file mode 100644
index 0000000000..16c8b49bb7
--- /dev/null
+++ b/vendor/fortio.org/fortio/Dockerfile.echosrv
@@ -0,0 +1,10 @@
+# Build the binaries in larger image
+FROM docker.io/fortio/fortio.build:v12 as build
+WORKDIR /go/src/fortio.org
+COPY . fortio
+RUN make -C fortio official-build-version BUILD_DIR=/build OFFICIAL_TARGET=fortio.org/fortio/echosrv OFFICIAL_BIN=../echosrv.bin
+# Minimal image with just the binary
+FROM scratch
+COPY --from=build /go/src/fortio.org/echosrv.bin /usr/bin/echosrv
+EXPOSE 8080
+ENTRYPOINT ["/usr/bin/echosrv"]
diff --git a/vendor/fortio.org/fortio/Dockerfile.fcurl b/vendor/fortio.org/fortio/Dockerfile.fcurl
new file mode 100644
index 0000000000..ff9a5f015e
--- /dev/null
+++ b/vendor/fortio.org/fortio/Dockerfile.fcurl
@@ -0,0 +1,11 @@
+# Build the binaries in larger image
+FROM docker.io/fortio/fortio.build:v12 as build
+WORKDIR /go/src/fortio.org
+COPY . fortio
+# fcurl should not need vendor/no dependencies
+RUN make -C fortio official-build-version BUILD_DIR=/build OFFICIAL_TARGET=fortio.org/fortio/fcurl OFFICIAL_BIN=../fcurl.bin
+# Minimal image with just the binary and certs
+FROM scratch
+COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
+COPY --from=build /go/src/fortio.org/fcurl.bin /usr/bin/fcurl
+ENTRYPOINT ["/usr/bin/fcurl"]
diff --git a/vendor/fortio.org/fortio/Dockerfile.test b/vendor/fortio.org/fortio/Dockerfile.test
new file mode 100644
index 0000000000..bfe3195265
--- /dev/null
+++ b/vendor/fortio.org/fortio/Dockerfile.test
@@ -0,0 +1,5 @@
+# Getting the source tree ready for running tests (sut)
+FROM docker.io/fortio/fortio.build:v12
+WORKDIR /go/src/fortio.org
+COPY . fortio
+RUN make -C fortio dependencies
diff --git a/vendor/fortio.org/fortio/LICENSE b/vendor/fortio.org/fortio/LICENSE
new file mode 100644
index 0000000000..2c45691e88
--- /dev/null
+++ b/vendor/fortio.org/fortio/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 Istio Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/fortio.org/fortio/Makefile b/vendor/fortio.org/fortio/Makefile
new file mode 100755
index 0000000000..a09cbe12bd
--- /dev/null
+++ b/vendor/fortio.org/fortio/Makefile
@@ -0,0 +1,288 @@
+# Makefile to build fortio's docker images as well as short cut
+# for local test/install
+#
+# See also release/README.md
+#
+
+IMAGES=echosrv fcurl # plus the combo image / Dockerfile without ext.
+
+DOCKER_PREFIX := docker.io/fortio/fortio
+BUILD_IMAGE_TAG := v12
+BUILD_IMAGE := $(DOCKER_PREFIX).build:$(BUILD_IMAGE_TAG)
+
+TAG:=$(USER)$(shell date +%y%m%d_%H%M%S)
+
+DOCKER_TAG = $(DOCKER_PREFIX)$(IMAGE):$(TAG)
+
+CERT_TEMP_DIR := ./cert-tmp/
+
+# go test ./... and others run in vendor/ and cause problems (!)
+# so to avoid `can't load package: package fortio.org/fortio/...: no Go files in ...`
+# note that only go1.8 needs the grep -v vendor but we are compatible with 1.8
+# ps: can't use go list (and get packages as canonical fortio.org/fortio/x)
+# as somehow that makes gometaliner silently not find/report errors...
+PACKAGES ?= $(shell find . -type d -print | egrep -v "/(\.|vendor|tmp|static|templates|release|docs|json|cert-tmp|debian)")
+# Marker for whether vendor submodule is here or not already
+GRPC_DIR:=./vendor/google.golang.org/grpc
+
+# Local targets:
+go-install: submodule
+ go install $(PACKAGES)
+
+# Run/test dependencies
+dependencies: submodule certs
+
+# Only generate certs if needed
+certs: $(CERT_TEMP_DIR)/server.cert
+
+# Generate certs for unit and release tests.
+$(CERT_TEMP_DIR)/server.cert: cert-gen
+ ./cert-gen
+
+# Remove certificates
+certs-clean:
+ rm -rf $(CERT_TEMP_DIR)
+
+TEST_TIMEOUT:=90s
+
+# Local test
+test: dependencies
+ go test -timeout $(TEST_TIMEOUT) -race $(PACKAGES)
+
+# To debug strange linter errors, uncomment
+# DEBUG_LINTERS="--debug"
+
+local-lint: dependencies vendor.check
+ gometalinter $(DEBUG_LINTERS) \
+ --deadline=180s --enable-all --aggregate --exclude=.pb.go \
+ --disable=gocyclo --disable=gas --disable=gosec \
+ --disable=gochecknoglobals --disable=gochecknoinits \
+ --line-length=132 $(LINT_PACKAGES)
+
+# Lint everything by default but ok to "make lint LINT_PACKAGES=./fhttp"
+LINT_PACKAGES:=$(PACKAGES)
+# TODO: do something about cyclomatic complexity; maybe reenable gas and gosec
+# Note CGO_ENABLED=0 is needed to avoid errors as gcc isn't part of the
+# build image
+lint: dependencies
+ docker run -v $(CURDIR):/go/src/fortio.org/fortio $(BUILD_IMAGE) bash -c \
+ "cd /go/src/fortio.org/fortio && time go install $(LINT_PACKAGES) \
+ && time make local-lint LINT_PACKAGES=\"$(LINT_PACKAGES)\""
+
+# This really also tests the release process and build on windows,mac,linux
+# and the docker images, not just "web" (ui) stuff that it also exercises.
+release-test:
+ ./Webtest.sh
+
+# old name for release-test
+webtest: release-test
+
+coverage: dependencies
+ ./.circleci/coverage.sh
+ curl -s https://codecov.io/bash | bash
+
+# Submodule handling when not already there
+submodule: $(GRPC_DIR)
+
+$(GRPC_DIR):
+ $(MAKE) submodule-sync
+
+# If you want to force update/sync, invoke 'make submodule-sync' directly
+submodule-sync:
+ git submodule sync
+ git submodule update --init
+
+# Short cut for pulling/updating to latest of the current branch
+pull:
+ git pull
+ $(MAKE) submodule-sync
+
+# https://github.com/istio/vendor-istio#how-do-i-add--change-a-dependency
+# PS: for fortio no dependencies should be added, only grpc updated.
+depend.status:
+ @echo "No error means your Gopkg.* are in sync and ok with vendor/"
+ dep status
+ cp Gopkg.* vendor/
+
+depend.update.full: depend.cleanlock depend.update
+
+depend.cleanlock:
+ -rm Gopkg.lock
+
+depend.update:
+ @echo "Running dep ensure with DEPARGS=$(DEPARGS)"
+ time dep ensure $(DEPARGS)
+ cp Gopkg.* vendor/
+ @echo "now check the diff in vendor/ and make a PR"
+
+vendor.check:
+ @echo "Checking that Gopkg.* are in sync with vendor/ submodule:"
+ @echo "if this fails, 'make pull' and/or seek on-call help"
+ diff Gopkg.toml vendor/
+ diff Gopkg.lock vendor/
+
+.PHONY: depend.status depend.cleanlock depend.update depend.update.full vendor.check
+
+
+# Docker: Pushes the combo image and the smaller image(s)
+all: test go-install lint docker-version docker-push-internal
+ @for img in $(IMAGES); do \
+ $(MAKE) docker-push-internal IMAGE=.$$img TAG=$(TAG); \
+ done
+
+# When changing the build image, this Makefile should be edited first
+# (bump BUILD_IMAGE_TAG), also change this list if the image is used in
+# more places.
+FILES_WITH_IMAGE:= .circleci/config.yml Dockerfile Dockerfile.echosrv \
+ Dockerfile.test Dockerfile.fcurl release/Dockerfile.in Webtest.sh
+# then run make update-build-image and check the diff, etc... see release/README.md
+update-build-image:
+ $(MAKE) docker-push-internal IMAGE=.build TAG=$(BUILD_IMAGE_TAG)
+
+update-build-image-tag:
+ sed -i .bak -e 's!$(DOCKER_PREFIX).build:v..!$(BUILD_IMAGE)!g' $(FILES_WITH_IMAGE)
+
+docker-version:
+ @echo "### Docker is `which docker`"
+ @docker version
+
+docker-internal: dependencies
+ @echo "### Now building $(DOCKER_TAG)"
+ docker build -f Dockerfile$(IMAGE) -t $(DOCKER_TAG) .
+
+docker-push-internal: docker-internal
+ @echo "### Now pushing $(DOCKER_TAG)"
+ docker push $(DOCKER_TAG)
+
+release: dist
+ release/release.sh
+
+.PHONY: all docker-internal docker-push-internal docker-version test dependencies
+
+.PHONY: go-install lint install-linters coverage webtest release-test update-build-image
+
+.PHONY: local-lint update-build-image-tag release submodule submodule-sync pull certs certs-clean
+
+# Targets used for official builds (initially from Dockerfile)
+BUILD_DIR := /tmp/fortio_build
+LIB_DIR := /usr/share/fortio
+DATA_DIR := .
+OFFICIAL_BIN := ../fortio.bin
+GOOS :=
+GO_BIN := go
+GIT_STATUS ?= $(strip $(shell git status --porcelain | wc -l))
+GIT_TAG ?= $(shell git describe --tags --match 'v*')
+GIT_SHA ?= $(shell git rev-parse HEAD)
+# Main/default binary to build: (can be changed to build fcurl or echosrv instead)
+OFFICIAL_TARGET := fortio.org/fortio
+
+# Putting spaces in linker replaced variables is hard but does work.
+# This sets up the static directory outside of the go source tree and
+# the default data directory to a /var/lib/... volume
+# + rest of build time/git/version magic.
+
+$(BUILD_DIR)/build-info.txt:
+ -mkdir -p $(BUILD_DIR)
+ echo "$(shell date +'%Y-%m-%d %H:%M') $(GIT_SHA)" > $@
+
+$(BUILD_DIR)/link-flags.txt: $(BUILD_DIR)/build-info.txt
+ echo "-s -X fortio.org/fortio/ui.resourcesDir=$(LIB_DIR) -X main.defaultDataDir=$(DATA_DIR) \
+ -X \"fortio.org/fortio/version.buildInfo=$(shell cat $<)\" \
+ -X fortio.org/fortio/version.tag=$(GIT_TAG) \
+ -X fortio.org/fortio/version.gitstatus=$(GIT_STATUS)" | tee $@
+
+.PHONY: official-build official-build-version official-build-clean
+
+official-build: $(BUILD_DIR)/link-flags.txt
+ $(GO_BIN) version
+ CGO_ENABLED=0 GOOS=$(GOOS) $(GO_BIN) build -a -ldflags '$(shell cat $(BUILD_DIR)/link-flags.txt)' -o $(OFFICIAL_BIN) $(OFFICIAL_TARGET)
+
+official-build-version: official-build
+ $(OFFICIAL_BIN) version
+
+official-build-clean:
+ -$(RM) $(BUILD_DIR)/build-info.txt $(BUILD_DIR)/link-flags.txt $(OFFICIAL_BIN) release/Makefile
+
+# Create a complete source tree (including submodule) with naming matching debian package conventions
+TAR ?= gtar # on macos need gtar to get --owner
+DIST_VERSION ?= $(shell echo $(GIT_TAG) | sed -e "s/^v//")
+DIST_PATH:=release/fortio_$(DIST_VERSION).orig.tar
+
+.PHONY: dist dist-sign distclean
+
+release/Makefile: release/Makefile.dist
+ echo "GIT_TAG := $(GIT_TAG)" > $@
+ echo "GIT_STATUS := $(GIT_STATUS)" >> $@
+ echo "GIT_SHA := $(GIT_SHA)" >> $@
+ cat $< >> $@
+
+dist: submodule release/Makefile
+ # put the source files where they can be used as gopath by go,
+ # except leave the debian dir where it needs to be (below the version dir)
+ git ls-files --recurse-submodules \
+ | awk '{printf("src/fortio.org/fortio/%s\n", $$0)}' \
+ | (cd ../../.. ; $(TAR) \
+ --xform="s|^src|fortio-$(DIST_VERSION)/src|;s|^.*debian/|fortio-$(DIST_VERSION)/debian/|" \
+ --owner=0 --group=0 -c -f - -T -) > $(DIST_PATH)
+ # move the release/Makefile at the top (after the version dir)
+ $(TAR) --xform="s|^release/|fortio-$(DIST_VERSION)/|" \
+ --owner=0 --group=0 -r -f $(DIST_PATH) release/Makefile
+ gzip -f $(DIST_PATH)
+ @echo "Created $(CURDIR)/$(DIST_PATH).gz"
+
+dist-sign:
+ gpg --armor --detach-sign $(DIST_PATH)
+
+distclean: official-build-clean
+ -rm -f *.profile.* */*.profile.*
+ -rm -rf $(CERT_TEMP_DIR)
+
+# Install target more compatible with standard gnu/debian practices. Uses DESTDIR as staging prefix
+
+install: official-install
+
+.PHONY: install official-install
+
+BIN_INSTALL_DIR = $(DESTDIR)/usr/bin
+LIB_INSTALL_DIR = $(DESTDIR)$(LIB_DIR)
+MAN_INSTALL_DIR = $(DESTDIR)/usr/share/man/man1
+#DATA_INSTALL_DIR = $(DESTDIR)$(DATA_DIR)
+BIN_INSTALL_EXEC = fortio
+
+official-install: official-build-clean official-build-version
+ -mkdir -p $(BIN_INSTALL_DIR) $(LIB_INSTALL_DIR) $(MAN_INSTALL_DIR) # $(DATA_INSTALL_DIR)
+ # -chmod 1777 $(DATA_INSTALL_DIR)
+ cp $(OFFICIAL_BIN) $(BIN_INSTALL_DIR)/$(BIN_INSTALL_EXEC)
+ cp -r ui/templates ui/static $(LIB_INSTALL_DIR)
+ cp docs/fortio.1 $(MAN_INSTALL_DIR)
+
+# Test distribution (only used by maintainer)
+
+.PHONY: debian-dist-common debian-dist-test debian-dist debian-sbuild
+
+# warning, will be cleaned
+TMP_DIST_DIR:=~/tmp/fortio-dist
+
+# debian getting version from debian/changelog while we get it from git tags
+# doesn't help making this simple: (TODO: unify or autoupdate the 3 versions)
+
+debian-dist-common:
+ $(MAKE) dist TAR=tar
+ -mkdir -p $(TMP_DIST_DIR)
+ rm -rf $(TMP_DIST_DIR)/fortio*
+ cp $(CURDIR)/$(DIST_PATH).gz $(TMP_DIST_DIR)
+ cd $(TMP_DIST_DIR); tar xfz *.tar.gz
+ -cd $(TMP_DIST_DIR);\
+ ln -s *.tar.gz fortio_`cd fortio-$(DIST_VERSION); dpkg-parsechangelog -S Version | sed -e "s/-.*//"`.orig.tar.gz
+
+debian-dist-test: debian-dist-common
+ cd $(TMP_DIST_DIR)/fortio-$(DIST_VERSION); FORTIO_SKIP_TESTS=Y dpkg-buildpackage -us -uc
+ cd $(TMP_DIST_DIR)/fortio-$(DIST_VERSION); lintian
+
+debian-dist: distclean debian-dist-common
+ cd $(TMP_DIST_DIR)/fortio-$(DIST_VERSION); FORTIO_SKIP_TESTS=N dpkg-buildpackage -ap
+ cd $(TMP_DIST_DIR)/fortio-$(DIST_VERSION); lintian
+
+# assumes you ran one of the previous 2 target first
+debian-sbuild:
+ cd $(TMP_DIST_DIR)/fortio-$(DIST_VERSION); sbuild
diff --git a/vendor/fortio.org/fortio/README.md b/vendor/fortio.org/fortio/README.md
new file mode 100644
index 0000000000..040401bb32
--- /dev/null
+++ b/vendor/fortio.org/fortio/README.md
@@ -0,0 +1,714 @@
+# Fortio
+
+[![Awesome Go](https://fortio.org/mentioned-badge.svg)](https://github.com/avelino/awesome-go#networking)
+[![Go Report Card](https://goreportcard.com/badge/fortio.org/fortio)](https://goreportcard.com/report/fortio.org/fortio)
+[![GoDoc](https://godoc.org/fortio.org/fortio?status.svg)](https://godoc.org/fortio.org/fortio)
+[![codecov](https://codecov.io/gh/fortio/fortio/branch/master/graph/badge.svg)](https://codecov.io/gh/fortio/fortio)
+[![CircleCI](https://circleci.com/gh/fortio/fortio.svg?style=shield)](https://circleci.com/gh/fortio/fortio)
+
+
+Fortio (Φορτίο) started as, and is, [Istio](https://istio.io/)'s load testing tool and now graduated to be its own project.
+
+Fortio runs at a specified query per second (qps) and records an histogram of execution time
+and calculates percentiles (e.g. p99 ie the response time such as 99% of the requests take less than that number (in seconds, SI unit)).
+It can run for a set duration, for a fixed number of calls, or until interrupted (at a constant target QPS, or max speed/load per connection/thread).
+
+The name fortio comes from greek [φορτίο](https://fortio.org/fortio.mp3) which means load/burden.
+
+Fortio is a fast, small (3Mb docker image, minimal dependencies), reusable, embeddable go library as well as a command line tool and server process,
+the server includes a simple web UI and graphical representation of the results (both a single latency graph and a multiple results comparative min, max, avg, qps and percentiles graphs).
+
+Fortio also includes a set of server side features (similar to httpbin) to help debugging and testing: request echo back including headers, adding latency or error codes with a probability distribution, tcp proxying, GRPC echo/health in addition to http, etc...
+
+Fortio is quite mature and very stable with no known major bugs (lots of possible improvements if you want to contribute though!),
+and when bugs are found they are fixed quickly, so after 1 year of development and 42 incremental releases, we reached 1.0 in June 2018.
+
+## Installation
+
+1. [Install go](https://golang.org/doc/install) (golang 1.8 or later)
+2. `go get fortio.org/fortio`
+3. you can now run `fortio` (from your gopath bin/ directory)
+
+Or use docker, for instance:
+
+```shell
+docker run -p 8080:8080 -p 8079:8079 fortio/fortio server & # For the server
+docker run fortio/fortio load http://www.google.com/ # For a test run
+```
+
+Or download one the binary distributions, from the [releases](https://github.com/fortio/fortio/releases) assets page or for instance:
+
+```shell
+curl -L https://github.com/fortio/fortio/releases/download/v1.3.1/fortio-linux_x64-1.3.1.tgz \
+ | sudo tar -C / -xvzpf -
+# or the debian package
+wget https://github.com/fortio/fortio/releases/download/v1.3.1/fortio_1.3.1-1_amd64.deb
+dpkg -i fortio_1.3.1-1_amd64.deb
+# or the rpm
+rpm -i https://github.com/fortio/fortio/releases/download/v1.3.1/fortio-1.3.1-1.x86_64.rpm
+```
+
+On a MacOS you can also install Fortio using [Homebrew](https://brew.sh/):
+
+```shell
+brew install fortio
+```
+
+Once `fortio server` is running, you can visit its web UI at [http://localhost:8080/fortio/](http://localhost:8080/fortio/)
+
+You can get a preview of the reporting/graphing UI at [https://fortio.istio.io/](https://fortio.istio.io/)
+and on [istio.io/docs/performance-and-scalability/synthetic-benchmarks/](https://istio.io/docs/performance-and-scalability/synthetic-benchmarks/)
+
+## Command line arguments
+
+Fortio can be an http or grpc load generator, gathering statistics using the `load` subcommand,
+or start simple http and grpc ping servers, as well as a basic web UI, result graphing and https redirector,
+with the `server` command or issue grpc ping messages using the `grpcping` command.
+It can also fetch a single URL's for debugging when using the `curl` command (or the `-curl` flag to the load command).
+You can run just the redirector with `redirect`.
+If you saved JSON results (using the web UI or directly from the command line), you can browse and graph those results using the `report` command.
+The `version` command will print version and build information, `fortio version -s` just the version.
+Lastly, you can learn which flags are available using `help` command.
+
+Most important flags for http load generation:
+
+| Flag | Description, example |
+| -------------|----------------------|
+| `-qps rate` | Queries Per Seconds or 0 for no wait/max qps |
+| `-c connections` | Number of parallel simultaneous connections (and matching go routine) |
+| `-t duration` | How long to run the test (for instance `-t 30min` for 30 minutes) or 0 to run until ^C, example (default 5s) |
+| `-n numcalls` | Run for exactly this number of calls instead of duration. Default (0) is to use duration (-t). |
+| `-r resolution` | Resolution of the histogram lowest buckets in seconds (default 0.001 i.e 1ms), use 1/10th of your expected typical latency |
+| `-H "header: value"` | Can be specified multiple times to add headers (including Host:) |
+| `-a` | Automatically save JSON result with filename based on labels and timestamp |
+| `-json filename` | Filename or `-` for stdout to output json result (relative to `-data-dir` by default, should end with .json if you want `fortio report` to show them; using `-a` is typicallly a better option)|
+| `-labels "l1 l2 ..."` | Additional config data/labels to add to the resulting JSON, defaults to target URL and hostname|
+
+You can switch from http GET queries to POST by setting `-content-type` or passing one of the `-payload-*` option.
+
+Full list of command line flags (`fortio help`):
+
+
+
+Φορτίο 1.3.1 usage:
+ fortio command [flags] target
+where command is one of: load (load testing), server (starts grpc ping and
+http echo/ui/redirect/proxy servers), grpcping (grpc client), report (report
+only UI server), redirect (redirect only server), or curl (single URL debug).
+where target is a url (http load tests) or host:port (grpc health test).
+flags are:
+ -H header
+ Additional header(s)
+ -L Follow redirects (implies -std-client) - do not use for load test
+ -P value
+ Proxies to run, e.g -P "localport1 dest_host1:dest_port1" -P "[::1]:0
+www.google.com:443" ...
+ -a Automatically save JSON result with filename based on labels & timestamp
+ -abort-on int
+ Http code that if encountered aborts the run. e.g. 503 or -1 for socket
+errors.
+ -allow-initial-errors
+ Allow and don't abort on initial warmup errors
+ -base-url string
+ base URL used as prefix for data/index.tsv generation. (when empty, the
+url from the first request is used)
+ -c int
+ Number of connections/goroutine/threads (default 4)
+ -cacert Path
+ Path to a custom CA certificate file to be used for the GRPC client
+TLS, if empty, use https:// prefix for standard internet CAs TLS
+ -cert Path
+ Path to the certificate file to be used for GRPC server TLS
+ -compression
+ Enable http compression
+ -content-type string
+ Sets http content type. Setting this value switches the request method
+from GET to POST.
+ -curl
+ Just fetch the content once
+ -data-dir Directory
+ Directory where JSON results are stored/read (default ".")
+ -echo-debug-path string
+ http echo server URI for debug, empty turns off that part (more secure)
+(default "/debug")
+ -gomaxprocs int
+ Setting for runtime.GOMAXPROCS, <1 doesn't change the default
+ -grpc
+ Use GRPC (health check by default, add -ping for ping) for load testing
+ -grpc-max-streams uint
+ MaxConcurrentStreams for the grpc server. Default (0) is to leave the
+option unset.
+ -grpc-ping-delay duration
+ grpc ping delay in response
+ -grpc-port string
+ grpc server port. Can be in the form of host:port, ip:port or port or
+/unix/domain/path or "disabled" to not start the grpc server. (default "8079")
+ -halfclose
+ When not keepalive, whether to half close the connection (only for fast
+http)
+ -health
+ grpc ping client mode: use health instead of ping
+ -healthservice string
+ which service string to pass to health check
+ -http-port string
+ http echo server port. Can be in the form of host:port, ip:port, port
+or /unix/domain/path. (default "8080")
+ -http1.0
+ Use http1.0 (instead of http 1.1)
+ -httpbufferkb kbytes
+ Size of the buffer (max data size) for the optimized http client in
+kbytes (default 128)
+ -httpccch
+ Check for Connection: Close Header
+ -https-insecure
+ Long form of the -k flag
+ -json path
+ Json output to provided file path or '-' for stdout (empty = no json
+output, unless -a is used)
+ -k Do not verify certs in https connections
+ -keepalive
+ Keep connection alive (only for fast http 1.1) (default true)
+ -key Path
+ Path to the key file used for GRPC server TLS
+ -labels string
+ Additional config data/labels to add to the resulting JSON, defaults to
+target URL and hostname
+ -logcaller
+ Logs filename and line number of callers to log (default true)
+ -loglevel value
+ loglevel, one of [Debug Verbose Info Warning Error Critical Fatal]
+(default Info)
+ -logprefix string
+ Prefix to log lines before logged messages (default "> ")
+ -maxpayloadsizekb int
+ MaxPayloadSize is the maximum size of payload to be generated by the
+EchoHandler size= argument. In Kbytes. (default 256)
+ -n int
+ Run for exactly this number of calls instead of duration. Default (0)
+is to use duration (-t). Default is 1 when used as grpc ping count.
+ -p string
+ List of pXX to calculate (default "50,75,90,99,99.9")
+ -payload string
+ Payload string to send along
+ -payload-file path
+ File path to be use as payload (POST for http), replaces -payload when
+set.
+ -payload-size int
+ Additional random payload size, replaces -payload when set > 0, must be
+smaller than -maxpayloadsizekb. Setting this switches http to POST.
+ -ping
+ grpc load test: use ping instead of health
+ -profile file
+ write .cpu and .mem profiles to file
+ -qps float
+ Queries Per Seconds or 0 for no wait/max qps (default 8)
+ -quiet
+ Quiet mode: sets the loglevel to Error and reduces the output.
+ -r float
+ Resolution of the histogram lowest buckets in seconds (default 0.001)
+ -redirect-port string
+ Redirect all incoming traffic to https URL (need ingress to work
+properly). Can be in the form of host:port, ip:port, port or "disabled" to
+disable the feature. (default "8081")
+ -s int
+ Number of streams per grpc connection (default 1)
+ -static-dir path
+ Absolute path to the dir containing the static files dir
+ -stdclient
+ Use the slower net/http standard client (works for TLS)
+ -sync string
+ index.tsv or s3/gcs bucket xml URL to fetch at startup for server modes.
+ -sync-interval duration
+ Refresh the url every given interval (default, no refresh)
+ -t duration
+ How long to run the test or 0 to run until ^C (default 5s)
+ -timeout duration
+ Connection and read timeout value (for http) (default 15s)
+ -ui-path string
+ http server URI for UI, empty turns off that part (more secure)
+(default "/fortio/")
+ -unix-socket path
+ Unix domain socket path to use for physical connection
+ -user user:password
+ User credentials for basic authentication (for http). Input data format
+should be user:password
+
+
+
+See also the FAQ entry about [fortio flags for best results](https://github.com/fortio/fortio/wiki/FAQ#i-want-to-get-the-best-results-what-flags-should-i-pass)
+
+## Example use and output
+
+### Start the internal servers
+
+```Shell
+$ fortio server &
+Fortio 1.3.1 grpc 'ping' server listening on [::]:8079
+Fortio 1.3.1 https redirector server listening on [::]:8081
+Fortio 1.3.1 echo server listening on [::]:8080
+UI started - visit:
+http://localhost:8080/fortio/
+(or any host/ip reachable on this server)
+14:57:12 I fortio_main.go:217> All fortio 1.3.1 release go1.10.3 servers started!
+```
+
+### Change the port / binding address
+
+By default, Fortio's web/echo servers listen on port 8080 on all interfaces.
+Use the `-http-port` flag to change this behavior:
+
+```Shell
+$ fortio server -http-port 10.10.10.10:8088
+UI starting - visit:
+http://10.10.10.10:8088/fortio/
+Https redirector running on :8081
+Fortio 1.3.1 grpc ping server listening on port :8079
+Fortio 1.3.1 echo server listening on port 10.10.10.10:8088
+```
+
+### Unix domain sockets
+
+You can use unix domain socket for any server/client:
+
+```Shell
+$ fortio server --http-port /tmp/fortio-uds-http &
+Fortio 1.3.1 grpc 'ping' server listening on [::]:8079
+Fortio 1.3.1 https redirector server listening on [::]:8081
+Fortio 1.3.1 echo server listening on /tmp/fortio-uds-http
+UI started - visit:
+fortio curl -unix-socket=/tmp/fortio-uds-http http://localhost/fortio/
+14:58:45 I fortio_main.go:217> All fortio 1.3.1 unknown go1.10.3 servers started!
+$ fortio curl -unix-socket=/tmp/fortio-uds-http http://foo.bar/debug
+15:00:48 I http_client.go:428> Using unix domain socket /tmp/fortio-uds-http instead of foo.bar http
+HTTP/1.1 200 OK
+Content-Type: text/plain; charset=UTF-8
+Date: Wed, 08 Aug 2018 22:00:48 GMT
+Content-Length: 231
+
+Φορτίο version 1.3.1 unknown go1.10.3 echo debug server up for 2m3.4s on ldemailly-macbookpro - request from
+
+GET /debug HTTP/1.1
+
+headers:
+
+Host: foo.bar
+User-Agent: fortio.org/fortio-1.3.1
+
+body:
+```
+
+### GRPC
+
+#### Simple grpc ping
+
+```Shell
+$ fortio grpcping localhost
+02:29:27 I pingsrv.go:116> Ping RTT 305334 (avg of 342970, 293515, 279517 ns) clock skew -2137
+Clock skew histogram usec : count 1 avg -2.137 +/- 0 min -2.137 max -2.137 sum -2.137
+# range, mid point, percentile, count
+>= -4 < -2 , -3 , 100.00, 1
+# target 50% -2.137
+RTT histogram usec : count 3 avg 305.334 +/- 27.22 min 279.517 max 342.97 sum 916.002
+# range, mid point, percentile, count
+>= 250 < 300 , 275 , 66.67, 2
+>= 300 < 350 , 325 , 100.00, 1
+# target 50% 294.879
+```
+
+#### Change the target port for grpc
+
+The value of `-grpc-port` (default 8079) is used when specifying a hostname or an IP address in `grpcping`. Add `:port` to the `grpcping` destination to
+change this behavior:
+
+```Shell
+$ fortio grpcping 10.10.10.100:8078 # Connects to gRPC server 10.10.10.100 listening on port 8078
+02:29:27 I pingsrv.go:116> Ping RTT 305334 (avg of 342970, 293515, 279517 ns) clock skew -2137
+Clock skew histogram usec : count 1 avg -2.137 +/- 0 min -2.137 max -2.137 sum -2.137
+# range, mid point, percentile, count
+>= -4 < -2 , -3 , 100.00, 1
+# target 50% -2.137
+RTT histogram usec : count 3 avg 305.334 +/- 27.22 min 279.517 max 342.97 sum 916.002
+# range, mid point, percentile, count
+>= 250 < 300 , 275 , 66.67, 2
+>= 300 < 350 , 325 , 100.00, 1
+# target 50% 294.879
+```
+
+#### `grpcping` using TLS
+
+* First, start Fortio server with the `-cert` and `-key` flags:
+
+`/path/to/fortio/server.crt` and `/path/to/fortio/server.key` are paths to the TLS certificate and key that
+you must provide.
+
+```Shell
+$ fortio server -cert /path/to/fortio/server.crt -key /path/to/fortio/server.key
+UI starting - visit:
+http://localhost:8080/fortio/
+Https redirector running on :8081
+Fortio 1.3.1 grpc ping server listening on port :8079
+Fortio 1.3.1 echo server listening on port localhost:8080
+Using server certificate /path/to/fortio/server.crt to construct TLS credentials
+Using server key /path/to/fortio/server.key to construct TLS credentials
+```
+
+* Next, use `grpcping` with the `-cacert` flag:
+
+`/path/to/fortio/ca.crt` is the path to the CA certificate
+that issued the server certificate for `localhost`. In our example, the server certificate is
+`/path/to/fortio/server.crt`:
+
+```Shell
+$ fortio grpcping -cacert /path/to/fortio/ca.crt localhost
+Using server certificate /path/to/fortio/ca.crt to construct TLS credentials
+16:00:10 I pingsrv.go:129> Ping RTT 501452 (avg of 595441, 537088, 371828 ns) clock skew 31094
+Clock skew histogram usec : count 1 avg 31.094 +/- 0 min 31.094 max 31.094 sum 31.094
+# range, mid point, percentile, count
+>= 31.094 <= 31.094 , 31.094 , 100.00, 1
+# target 50% 31.094
+RTT histogram usec : count 3 avg 501.45233 +/- 94.7 min 371.828 max 595.441 sum 1504.357
+# range, mid point, percentile, count
+>= 371.828 <= 400 , 385.914 , 33.33, 1
+> 500 <= 595.441 , 547.721 , 100.00, 2
+# target 50% 523.86
+```
+
+#### GRPC to standard https service
+
+`grpcping` can connect to a non-Fortio TLS server by prefacing the destination with `https://`:
+
+```Shell
+$ fortio grpcping https://fortio.istio.io
+11:07:55 I grpcrunner.go:275> stripping https scheme. grpc destination: fortio.istio.io. grpc port: 443
+Clock skew histogram usec : count 1 avg 12329.795 +/- 0 min 12329.795 max 12329.795 sum 12329.795
+# range, mid point, percentile, count
+>= 12329.8 <= 12329.8 , 12329.8 , 100.00, 1
+# target 50% 12329.8
+```
+
+### Simple load test
+
+Load (low default qps/threading) test:
+
+```Shell
+$ fortio load http://www.google.com
+Fortio 1.3.1 running at 8 queries per second, 8->8 procs, for 5s: http://www.google.com
+19:10:33 I httprunner.go:84> Starting http test for http://www.google.com with 4 threads at 8.0 qps
+Starting at 8 qps with 4 thread(s) [gomax 8] for 5s : 10 calls each (total 40)
+19:10:39 I periodic.go:314> T002 ended after 5.056753279s : 10 calls. qps=1.9775534712220633
+19:10:39 I periodic.go:314> T001 ended after 5.058085991s : 10 calls. qps=1.9770324224999916
+19:10:39 I periodic.go:314> T000 ended after 5.058796046s : 10 calls. qps=1.9767549252963101
+19:10:39 I periodic.go:314> T003 ended after 5.059557593s : 10 calls. qps=1.9764573910247019
+Ended after 5.059691387s : 40 calls. qps=7.9056
+Sleep times : count 36 avg 0.49175757 +/- 0.007217 min 0.463508712 max 0.502087879 sum 17.7032725
+Aggregated Function Time : count 40 avg 0.060587641 +/- 0.006564 min 0.052549016 max 0.089893269 sum 2.42350566
+# range, mid point, percentile, count
+>= 0.052549 < 0.06 , 0.0562745 , 47.50, 19
+>= 0.06 < 0.07 , 0.065 , 92.50, 18
+>= 0.07 < 0.08 , 0.075 , 97.50, 2
+>= 0.08 <= 0.0898933 , 0.0849466 , 100.00, 1
+# target 50% 0.0605556
+# target 75% 0.0661111
+# target 99% 0.085936
+# target 99.9% 0.0894975
+Code 200 : 40
+Response Header Sizes : count 40 avg 690.475 +/- 15.77 min 592 max 693 sum 27619
+Response Body/Total Sizes : count 40 avg 12565.2 +/- 301.9 min 12319 max 13665 sum 502608
+All done 40 calls (plus 4 warmup) 60.588 ms avg, 7.9 qps
+```
+
+### GRPC load test
+
+Uses `-s` to use multiple (h2/grpc) streams per connection (`-c`), request to hit the fortio ping grpc endpoint with a delay in replies of 0.25s and an extra payload for 10 bytes and auto save the json result:
+
+```bash
+$ fortio load -a -grpc -ping -grpc-ping-delay 0.25s -payload "01234567890" -c 2 -s 4 https://fortio-stage.istio.io
+Fortio 1.3.1 running at 8 queries per second, 8->8 procs, for 5s: https://fortio-stage.istio.io
+16:32:56 I grpcrunner.go:139> Starting GRPC Ping Delay=250ms PayloadLength=11 test for https://fortio-stage.istio.io with 4*2 threads at 8.0 qps
+16:32:56 I grpcrunner.go:261> stripping https scheme. grpc destination: fortio-stage.istio.io. grpc port: 443
+16:32:57 I grpcrunner.go:261> stripping https scheme. grpc destination: fortio-stage.istio.io. grpc port: 443
+Starting at 8 qps with 8 thread(s) [gomax 8] for 5s : 5 calls each (total 40)
+16:33:04 I periodic.go:533> T005 ended after 5.283227589s : 5 calls. qps=0.9463911814835126
+[...]
+Ended after 5.28514474s : 40 calls. qps=7.5684
+Sleep times : count 32 avg 0.97034752 +/- 0.002338 min 0.967323561 max 0.974838789 sum 31.0511206
+Aggregated Function Time : count 40 avg 0.27731944 +/- 0.001606 min 0.2741372 max 0.280604967 sum 11.0927778
+# range, mid point, percentile, count
+>= 0.274137 <= 0.280605 , 0.277371 , 100.00, 40
+# target 50% 0.277288
+# target 75% 0.278947
+# target 90% 0.279942
+# target 99% 0.280539
+# target 99.9% 0.280598
+Ping SERVING : 40
+All done 40 calls (plus 2 warmup) 277.319 ms avg, 7.6 qps
+Successfully wrote 1210 bytes of Json data to 2018-04-03-163258_fortio_stage_istio_io_ldemailly_macbookpro.json
+```
+
+And the JSON saved is
+
+
+{
+ "RunType": "GRPC Ping Delay=250ms PayloadLength=11",
+ "Labels": "fortio-stage.istio.io , ldemailly-macbookpro",
+ "StartTime": "2018-04-03T16:32:58.895472681-07:00",
+ "RequestedQPS": "8",
+ "RequestedDuration": "5s",
+ "ActualQPS": 7.568383075162479,
+ "ActualDuration": 5285144740,
+ "NumThreads": 8,
+ "Version": "0.9.0",
+ "DurationHistogram": {
+ "Count": 40,
+ "Min": 0.2741372,
+ "Max": 0.280604967,
+ "Sum": 11.092777797,
+ "Avg": 0.277319444925,
+ "StdDev": 0.0016060870789948905,
+ "Data": [
+ {
+ "Start": 0.2741372,
+ "End": 0.280604967,
+ "Percent": 100,
+ "Count": 40
+ }
+ ],
+ "Percentiles": [
+ {
+ "Percentile": 50,
+ "Value": 0.2772881634102564
+ },
+ {
+ "Percentile": 75,
+ "Value": 0.27894656520512817
+ },
+ {
+ "Percentile": 90,
+ "Value": 0.2799416062820513
+ },
+ {
+ "Percentile": 99,
+ "Value": 0.28053863092820513
+ },
+ {
+ "Percentile": 99.9,
+ "Value": 0.2805983333928205
+ }
+ ]
+ },
+ "Exactly": 0,
+ "RetCodes": {
+ "1": 40
+ },
+ "Destination": "https://fortio-stage.istio.io",
+ "Streams": 4,
+ "Ping": true
+}
+
+
+* Load test using gRPC and TLS security. First, start Fortio server with the `-cert` and `-key` flags:
+
+```Shell
+fortio server -cert /etc/ssl/certs/server.crt -key /etc/ssl/certs/server.key
+```
+
+Next, run the `load` command with the `-cacert` flag:
+
+```Shell
+fortio load -cacert /etc/ssl/certs/ca.crt -grpc localhost:8079
+```
+
+### Curl like (single request) mode
+
+```Shell
+$ fortio load -curl -H Foo:Bar http://localhost:8080/debug
+14:26:26 I http.go:133> Setting regular extra header Foo: Bar
+HTTP/1.1 200 OK
+Content-Type: text/plain; charset=UTF-8
+Date: Mon, 08 Jan 2018 22:26:26 GMT
+Content-Length: 230
+
+Φορτίο version 1.3.1 echo debug server up for 39s on ldemailly-macbookpro - request from [::1]:65055
+
+GET /debug HTTP/1.1
+
+headers:
+
+Host: localhost:8080
+User-Agent: fortio.org/fortio-1.3.1
+Foo: Bar
+
+body:
+
+```
+
+### Report only UI
+
+If you have json files saved from running the full UI, you can serve just the reports:
+
+```Shell
+$ fortio report
+Browse only UI starting - visit:
+http://localhost:8080/
+Https redirector running on :8081
+```
+
+## Server URLs and features
+
+Fortio `server` has the following feature for the http listening on 8080 (all paths and ports are configurable through flags above):
+
+* A simple echo server which will echo back posted data (for any path not mentioned below).
+
+ For instance `curl -d abcdef http://localhost:8080/` returns `abcdef` back. It supports the following optional query argument parameters:
+
+| Parameter | Usage, example |
+|-----------|----------------|
+| delay | duration to delay the response by. Can be a single value or a coma separated list of probabilities, e.g `delay=150us:10,2ms:5,0.5s:1` for 10% of chance of a 150 us delay, 5% of a 2ms delay and 1% of a 1/2 second delay |
+| status | http status to return instead of 200. Can be a single value or a coma separated list of probabilities, e.g `status=404:10,503:5,429:1` for 10% of chance of a 404 status, 5% of a 503 status and 1% of a 429 status |
+| size | size of the payload to reply instead of echoing input. Also works as probabilities list. `size=1024:10,512:5` 10% of response will be 1k and 5% will be 512 bytes payload and the rest defaults to echoing back. |
+| close | close the socket after answering e.g `close=true` |
+| header | header(s) to add to the reply e.g. `&header=Foo:Bar&header=X:Y` |
+
+* `/debug` will echo back the request in plain text for human debugging.
+
+* `/fortio/` A UI to
+ * Run/Trigger tests and graph the results.
+ * A UI to browse saved results and single graph or multi graph them (comparative graph of min,avg, median, p75, p99, p99.9 and max).
+ * Proxy/fetch other URLs
+ * `/fortio/data/index.tsv` an tab separated value file conforming to Google cloud storage [URL list data transfer format](https://cloud.google.com/storage/transfer/create-url-list) so you can export/backup local results to the cloud.
+ * Download/sync peer to peer JSON results files from other Fortio servers (using their `index.tsv` URLs)
+ * Download/sync from an Amazon S3 or Google Cloud compatible bucket listings [XML URLs](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html)
+
+The `report` mode is a readonly subset of the above directly on `/`.
+
+There is also the GRPC health and ping servers, as well as the http->https redirector.
+
+## Implementation details
+
+Fortio is written in the [Go](https://golang.org) language and includes a scalable semi log histogram in [stats.go](stats/stats.go) and a periodic runner engine in [periodic.go](periodic/periodic.go) with specializations for [http](http/httprunner.go) and [grpc](fortiogrpc/grpcrunner.go).
+The [http/](http/) package includes a very high performance specialized http 1.1 client.
+You may find fortio's [logger](log/logger.go) useful as well.
+
+You can run the histogram code standalone as a command line in [histogram/](histogram/), a basic echo http server in [echosrv/](echosrv/), or both the http echo and GRPC ping server through `fortio server`, the fortio command line interface lives in this top level directory [fortio_main.go](fortio_main.go)
+
+There is also [fcurl/](fcurl/) which is the `fortio curl` part of the code (if you need a light http client without grpc or server side).
+A matching tiny (2Mb compressed) docker image is [fortio/fortio.fcurl](https://hub.docker.com/r/fortio/fortio.fcurl/tags/)
+
+## More examples
+
+You can get the data on the console, for instance, with 5k qps: (includes envoy and mixer in the calls)
+
+$ time fortio load -qps 5000 -t 60s -c 8 -r 0.0001 -H "Host: perf-cluster" http://benchmark-2:9090/echo
+2017/07/09 02:31:05 Will be setting special Host header to perf-cluster
+Fortio running at 5000 queries per second for 1m0s: http://benchmark-2:9090/echo
+Starting at 5000 qps with 8 thread(s) [gomax 4] for 1m0s : 37500 calls each (total 300000)
+2017/07/09 02:32:05 T004 ended after 1m0.000907812s : 37500 calls. qps=624.9905437680746
+2017/07/09 02:32:05 T000 ended after 1m0.000922222s : 37500 calls. qps=624.9903936684861
+2017/07/09 02:32:05 T005 ended after 1m0.00094454s : 37500 calls. qps=624.9901611965524
+2017/07/09 02:32:05 T006 ended after 1m0.000944816s : 37500 calls. qps=624.9901583216429
+2017/07/09 02:32:05 T001 ended after 1m0.00102094s : 37500 calls. qps=624.9893653892883
+2017/07/09 02:32:05 T007 ended after 1m0.001096292s : 37500 calls. qps=624.9885805003184
+2017/07/09 02:32:05 T003 ended after 1m0.001045342s : 37500 calls. qps=624.9891112105419
+2017/07/09 02:32:05 T002 ended after 1m0.001044416s : 37500 calls. qps=624.9891208560392
+Ended after 1m0.00112695s : 300000 calls. qps=4999.9
+Aggregated Sleep Time : count 299992 avg 8.8889218e-05 +/- 0.002326 min -0.03490402 max 0.001006041 sum 26.6660543
+# range, mid point, percentile, count
+< 0 , 0 , 8.58, 25726
+>= 0 < 0.001 , 0.0005 , 100.00, 274265
+>= 0.001 < 0.002 , 0.0015 , 100.00, 1
+# target 50% 0.000453102
+WARNING 8.58% of sleep were falling behind
+Aggregated Function Time : count 300000 avg 0.00094608764 +/- 0.0007901 min 0.000510522 max 0.029267604 sum 283.826292
+# range, mid point, percentile, count
+>= 0.0005 < 0.0006 , 0.00055 , 0.15, 456
+>= 0.0006 < 0.0007 , 0.00065 , 3.25, 9295
+>= 0.0007 < 0.0008 , 0.00075 , 24.23, 62926
+>= 0.0008 < 0.0009 , 0.00085 , 62.73, 115519
+>= 0.0009 < 0.001 , 0.00095 , 85.68, 68854
+>= 0.001 < 0.0011 , 0.00105 , 93.11, 22293
+>= 0.0011 < 0.0012 , 0.00115 , 95.38, 6792
+>= 0.0012 < 0.0014 , 0.0013 , 97.18, 5404
+>= 0.0014 < 0.0016 , 0.0015 , 97.94, 2275
+>= 0.0016 < 0.0018 , 0.0017 , 98.34, 1198
+>= 0.0018 < 0.002 , 0.0019 , 98.60, 775
+>= 0.002 < 0.0025 , 0.00225 , 98.98, 1161
+>= 0.0025 < 0.003 , 0.00275 , 99.21, 671
+>= 0.003 < 0.0035 , 0.00325 , 99.36, 449
+>= 0.0035 < 0.004 , 0.00375 , 99.47, 351
+>= 0.004 < 0.0045 , 0.00425 , 99.57, 290
+>= 0.0045 < 0.005 , 0.00475 , 99.66, 280
+>= 0.005 < 0.006 , 0.0055 , 99.79, 380
+>= 0.006 < 0.007 , 0.0065 , 99.82, 92
+>= 0.007 < 0.008 , 0.0075 , 99.83, 15
+>= 0.008 < 0.009 , 0.0085 , 99.83, 5
+>= 0.009 < 0.01 , 0.0095 , 99.83, 1
+>= 0.01 < 0.012 , 0.011 , 99.83, 8
+>= 0.012 < 0.014 , 0.013 , 99.84, 35
+>= 0.014 < 0.016 , 0.015 , 99.92, 231
+>= 0.016 < 0.018 , 0.017 , 99.94, 65
+>= 0.018 < 0.02 , 0.019 , 99.95, 26
+>= 0.02 < 0.025 , 0.0225 , 100.00, 139
+>= 0.025 < 0.03 , 0.0275 , 100.00, 14
+# target 50% 0.000866935
+# target 75% 0.000953452
+# target 99% 0.00253875
+# target 99.9% 0.0155152
+Code 200 : 300000
+Response Body Sizes : count 300000 avg 0 +/- 0 min 0 max 0 sum 0
+
+
+Or you can get the data in [JSON format](https://github.com/fortio/fortio/wiki/Sample-JSON-output) (using `-json result.json`)
+
+### Web/Graphical UI
+
+Or graphically (through the [http://localhost:8080/fortio/](http://localhost:8080/fortio/) web UI):
+
+Simple form/UI:
+
+Sample requests with responses delayed by 250us and 0.5% of 503 and 1.5% of 429 simulated http errors.
+
+![Web UI form screenshot](https://user-images.githubusercontent.com/3664595/41430618-53d911d4-6fc5-11e8-8e35-d4f5fea4426a.png)
+
+Run result:
+
+![Graphical result](https://user-images.githubusercontent.com/3664595/41430735-bb95eb3a-6fc5-11e8-8174-be4a6251058f.png)
+
+```Shell
+Code 200 : 2929 (97.6 %)
+Code 429 : 56 (1.9 %)
+Code 503 : 15 (0.5 %)
+```
+
+There are newer/live examples on [istio.io/docs/concepts/performance-and-scalability/#synthetic-end-to-end-benchmarks](https://istio.io/docs/concepts/performance-and-scalability/#synthetic-end-to-end-benchmarks)
+
+## Contributing
+
+Contributions whether through issues, documentation, bug fixes, or new features
+are most welcome !
+
+Please also see [Contributing to Istio](https://github.com/istio/community/blob/master/CONTRIBUTING.md#contributing-to-istio)
+and [Getting started contributing to Fortio](https://github.com/fortio/fortio/wiki/FAQ#how-do-i-get-started-contributing-to-fortio) in the FAQ.
+
+If you are not using the binary releases, please do `make pull` to pull/update to the latest of the current branch.
+
+And make sure to go format and run those commands successfully before sending your PRs:
+
+```Shell
+make test
+make lint
+make release-test
+```
+
+When modifying Javascript, check with [standard](https://github.com/standard/standard):
+
+```Shell
+standard --fix ui/static/js/fortio_chart.js
+```
+
+## See also
+
+Our wiki and the [Fortio FAQ](https://github.com/fortio/fortio/wiki/FAQ) (including for instance differences between `fortio` and `wrk` or `httpbin`)
+
+## Disclaimer
+
+This is not an officially supported Google product.
diff --git a/vendor/fortio.org/fortio/Webtest.sh b/vendor/fortio.org/fortio/Webtest.sh
new file mode 100755
index 0000000000..3ab5823c0e
--- /dev/null
+++ b/vendor/fortio.org/fortio/Webtest.sh
@@ -0,0 +1,119 @@
+#! /bin/bash
+# Copyright 2017 Istio Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -x
+# Check we can build the image
+make docker-internal TAG=webtest || exit 1
+FORTIO_UI_PREFIX=/newprefix/ # test the non default prefix (not /fortio/)
+FILE_LIMIT=20 # must be low to detect leaks
+LOGLEVEL=info # change to debug to debug
+MAXPAYLOAD=8 # Max Payload size for echo?size= in kb
+CERT=/etc/ssl/certs/ca-certificates.crt
+TEST_CERT_VOL=/etc/ssl/certs/fortio
+DOCKERNAME=fortio_server
+DOCKERSECNAME=fortio_secure_server
+DOCKERSECVOLNAME=fortio_certs
+FORTIO_BIN_PATH=fortio # /usr/bin/fortio is the full path but isn't needed
+DOCKERID=$(docker run -d --ulimit nofile=$FILE_LIMIT --name $DOCKERNAME fortio/fortio:webtest server -ui-path $FORTIO_UI_PREFIX -loglevel $LOGLEVEL -maxpayloadsizekb $MAXPAYLOAD)
+function cleanup {
+ set +e # errors are ok during cleanup
+ docker stop $DOCKERID
+ docker rm -f $DOCKERNAME
+ docker stop $DOCKERSECID # may not be set yet, it's ok
+ docker rm -f $DOCKERSECNAME
+ docker rm -f $DOCKERSECVOLNAME
+}
+trap cleanup EXIT
+set -e
+set -o pipefail
+docker ps
+BASE_URL="http://localhost:8080"
+BASE_FORTIO="$BASE_URL$FORTIO_UI_PREFIX"
+CURL="docker exec $DOCKERNAME $FORTIO_BIN_PATH curl -loglevel $LOGLEVEL"
+# Check https works (certs are in the image) - also tests autoswitch to std client for https
+$CURL https://istio.io/robots.txt
+# Check that browse doesn't 404s
+$CURL ${BASE_FORTIO}browse
+# Check we can connect, and run a http QPS test against ourselves through fetch
+$CURL "${BASE_FORTIO}fetch/localhost:8080$FORTIO_UI_PREFIX?url=http://localhost:8080/debug&load=Start&qps=-1&json=on" | grep ActualQPS
+# Check we can do it twice despite ulimit - check we get all 200s (exactly 80 of them (default is 8 connections->16 fds + a few))
+$CURL "${BASE_FORTIO}fetch/localhost:8080$FORTIO_UI_PREFIX?url=http://localhost:8080/debug&load=Start&n=80&qps=-1&json=on" | grep '"200": 80'
+# Check we can connect, and run a grpc QPS test against ourselves through fetch
+$CURL "${BASE_FORTIO}fetch/localhost:8080$FORTIO_UI_PREFIX?url=localhost:8079&load=Start&qps=-1&json=on&n=100&runner=grpc" | grep '"SERVING": 100'
+# Check we get the logo (need to remove the CR from raw headers)
+VERSION=$(docker exec $DOCKERNAME $FORTIO_BIN_PATH version -s)
+LOGO_TYPE=$($CURL "${BASE_FORTIO}${VERSION}/static/img/logo.svg" | grep -i Content-Type: | tr -d '\r'| awk '{print $2}')
+if [ "$LOGO_TYPE" != "image/svg+xml" ]; then
+ echo "Unexpected content type for the logo: $LOGO_TYPE"
+ exit 1
+fi
+# Check we can get the JS file through the proxy and it's > 50k
+SIZE=$($CURL "${BASE_FORTIO}fetch/localhost:8080${FORTIO_UI_PREFIX}${VERSION}/static/js/Chart.min.js" |wc -c)
+if [ "$SIZE" -lt 50000 ]; then
+ echo "Too small fetch for js: $SIZE"
+ exit 1
+fi
+# Check if max payload set to value passed in cmd line parameter -maxpayloadsizekb
+SIZE=$($CURL "${BASE_URL}/echo?size=1048576" |wc -c)
+# Payload is 8192 but between content chunking and headers fast client can return up to 8300 or so
+if [ "$SIZE" -lt 8191 ] || [ "$SIZE" -gt 8400 ]; then
+ echo "-maxpayloadsizekb not working as expected"
+ exit 1
+fi
+
+# Check the main page
+$CURL $BASE_FORTIO
+# Do a small http load using std client
+docker exec $DOCKERNAME $FORTIO_BIN_PATH load -stdclient -qps 1 -t 2s -c 1 https://www.google.com/
+# and with normal and with custom headers
+docker exec $DOCKERNAME $FORTIO_BIN_PATH load -H Foo:Bar -H Blah:Blah -qps 1 -t 2s -c 2 http://www.google.com/
+# Do a grpcping
+docker exec $DOCKERNAME $FORTIO_BIN_PATH grpcping localhost
+# Do a grpcping to a scheme-prefixed destination. Fortio should append port number
+docker exec $DOCKERNAME $FORTIO_BIN_PATH grpcping https://fortio.istio.io
+docker exec $DOCKERNAME $FORTIO_BIN_PATH grpcping http://fortio.istio.io
+# Do a grpcping with -cert flag. Fortio should use valid cert.
+docker exec $DOCKERNAME $FORTIO_BIN_PATH grpcping -cacert $CERT fortio.istio.io:443
+docker exec $DOCKERNAME $FORTIO_BIN_PATH grpcping -cacert $CERT https://fortio.istio.io
+# Do a local grpcping. Fortio should append default grpc port number to destination
+docker exec $DOCKERNAME $FORTIO_BIN_PATH grpcping localhost
+# pprof should be there, no 404/error
+PPROF_URL="$BASE_URL/debug/pprof/heap?debug=1"
+$CURL $PPROF_URL | grep -i TotalAlloc # should find this in memory profile
+# creating dummy container to hold a volume for test certs due to remote docker bind mount limitation.
+DOCKERVOLID=$(docker create -v $TEST_CERT_VOL --name $DOCKERSECVOLNAME docker.io/fortio/fortio.build:v12 /bin/true)
+# copying cert files into the certs volume of the dummy container
+for f in ca.crt server.crt server.key; do docker cp $PWD/cert-tmp/$f $DOCKERSECVOLNAME:$TEST_CERT_VOL/$f; done
+# start server in secure grpc mode. uses non-default ports to avoid conflicts with fortio_server container.
+# mounts certs volume from dummy container.
+DOCKERSECID=$(docker run -d --ulimit nofile=$FILE_LIMIT --name $DOCKERSECNAME --volumes-from $DOCKERSECVOLNAME fortio/fortio:webtest server -cacert $TEST_CERT_VOL/ca.crt -cert $TEST_CERT_VOL/server.crt -key $TEST_CERT_VOL/server.key -grpc-port 8097 -http-port 8098 -redirect-port 8090 -loglevel $LOGLEVEL)
+# run secure grpcping and load tests
+docker exec $DOCKERSECNAME $FORTIO_BIN_PATH grpcping -cacert $TEST_CERT_VOL/ca.crt localhost:8097
+docker exec $DOCKERSECNAME $FORTIO_BIN_PATH load -grpc -cacert $TEST_CERT_VOL/ca.crt localhost:8097
+# switch to report mode
+docker stop $DOCKERID
+docker rm $DOCKERNAME
+DOCKERNAME=fortio_report
+DOCKERID=$(docker run -d --ulimit nofile=$FILE_LIMIT --name $DOCKERNAME fortio/fortio:webtest report -loglevel $LOGLEVEL)
+docker ps
+CURL="docker exec $DOCKERNAME $FORTIO_BIN_PATH curl -loglevel $LOGLEVEL"
+if $CURL $PPROF_URL ; then
+ echo "pprof should 404 on report mode!"
+ exit 1
+else
+ echo "expected pprof failure to access in report mode - good !"
+fi
+# base url should serve report only UI in report mode
+$CURL $BASE_URL | grep "report only limited UI"
+# cleanup() will clean everything left even on success
diff --git a/vendor/fortio.org/fortio/bincommon/commonflags.go b/vendor/fortio.org/fortio/bincommon/commonflags.go
new file mode 100644
index 0000000000..565581e8db
--- /dev/null
+++ b/vendor/fortio.org/fortio/bincommon/commonflags.go
@@ -0,0 +1,156 @@
+// Copyright 2018 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package bincommon is the common code and flag handling between the fortio
+// (fortio_main.go) and fcurl (fcurl.go) executables.
+package bincommon
+
+// Do not add any external dependencies we want to keep fortio minimal.
+
+import (
+ "flag"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+
+ "io"
+
+ "fortio.org/fortio/fhttp"
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/version"
+)
+
+// -- Support for multiple instances of -H flag on cmd line:
+type headersFlagList struct {
+}
+
+func (f *headersFlagList) String() string {
+ return ""
+}
+func (f *headersFlagList) Set(value string) error {
+ return httpOpts.AddAndValidateExtraHeader(value)
+}
+
+// -- end of functions for -H support
+
+// FlagsUsage prints end of the usage() (flags part + error message).
+func FlagsUsage(w io.Writer, msgs ...interface{}) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(w, "flags are:\n")
+ flag.CommandLine.SetOutput(w)
+ flag.PrintDefaults()
+ if len(msgs) > 0 {
+ _, _ = fmt.Fprintln(w, msgs...)
+ }
+}
+
+var (
+ compressionFlag = flag.Bool("compression", false, "Enable http compression")
+ keepAliveFlag = flag.Bool("keepalive", true, "Keep connection alive (only for fast http 1.1)")
+ halfCloseFlag = flag.Bool("halfclose", false,
+ "When not keepalive, whether to half close the connection (only for fast http)")
+ httpReqTimeoutFlag = flag.Duration("timeout", fhttp.HTTPReqTimeOutDefaultValue, "Connection and read timeout value (for http)")
+ stdClientFlag = flag.Bool("stdclient", false, "Use the slower net/http standard client (works for TLS)")
+ http10Flag = flag.Bool("http1.0", false, "Use http1.0 (instead of http 1.1)")
+ httpsInsecureFlag = flag.Bool("k", false, "Do not verify certs in https connections")
+ headersFlags headersFlagList
+ httpOpts fhttp.HTTPOptions
+ followRedirectsFlag = flag.Bool("L", false, "Follow redirects (implies -std-client) - do not use for load test")
+ userCredentialsFlag = flag.String("user", "", "User credentials for basic authentication (for http). Input data format"+
+ " should be `user:password`")
+ // QuietFlag is the value of -quiet.
+ QuietFlag = flag.Bool("quiet", false, "Quiet mode: sets the loglevel to Error and reduces the output.")
+
+ contentTypeFlag = flag.String("content-type", "",
+ "Sets http content type. Setting this value switches the request method from GET to POST.")
+ // PayloadSizeFlag is the value of -payload-size
+ PayloadSizeFlag = flag.Int("payload-size", 0, "Additional random payload size, replaces -payload when set > 0,"+
+ " must be smaller than -maxpayloadsizekb. Setting this switches http to POST.")
+ // PayloadFlag is the value of -payload
+ PayloadFlag = flag.String("payload", "", "Payload string to send along")
+ // PayloadFileFlag is the value of -paylaod-file
+ PayloadFileFlag = flag.String("payload-file", "", "File `path` to be use as payload (POST for http), replaces -payload when set.")
+
+ // UnixDomainSocket to use instead of regular host:port
+ unixDomainSocketFlag = flag.String("unix-socket", "", "Unix domain socket `path` to use for physical connection")
+)
+
+// SharedMain is the common part of main from fortio_main and fcurl.
+func SharedMain(usage func(io.Writer, ...interface{})) {
+ flag.Var(&headersFlags, "H", "Additional `header`(s)")
+ flag.IntVar(&fhttp.BufferSizeKb, "httpbufferkb", fhttp.BufferSizeKb,
+ "Size of the buffer (max data size) for the optimized http client in `kbytes`")
+ flag.BoolVar(&fhttp.CheckConnectionClosedHeader, "httpccch", fhttp.CheckConnectionClosedHeader,
+ "Check for Connection: Close Header")
+ // Special case so `fcurl -version` and `--version` and `version` and ... work
+ if len(os.Args) < 2 {
+ return
+ }
+ if strings.Contains(os.Args[1], "version") {
+ if len(os.Args) >= 3 && strings.Contains(os.Args[2], "s") {
+ // so `fortio version -s` is the short version; everything else is long/full
+ fmt.Println(version.Short())
+ } else {
+ fmt.Println(version.Long())
+ }
+ os.Exit(0)
+ }
+ if strings.Contains(os.Args[1], "help") {
+ usage(os.Stdout)
+ os.Exit(0)
+ }
+}
+
+// FetchURL is fetching url content and exiting with 1 upon error.
+// common part between fortio_main and fcurl.
+func FetchURL(o *fhttp.HTTPOptions) {
+ // keepAlive could be just false when making 1 fetch but it helps debugging
+ // the http client when making a single request if using the flags
+ client := fhttp.NewClient(o)
+ if client == nil {
+ return // error logged already
+ }
+ code, data, header := client.Fetch()
+ log.LogVf("Fetch result code %d, data len %d, headerlen %d", code, len(data), header)
+ os.Stdout.Write(data) //nolint: errcheck
+ if code != http.StatusOK {
+ log.Errf("Error status %d : %s", code, fhttp.DebugSummary(data, 512))
+ os.Exit(1)
+ }
+}
+
+// SharedHTTPOptions is the flag->httpoptions transfer code shared between
+// fortio_main and fcurl.
+func SharedHTTPOptions() *fhttp.HTTPOptions {
+ url := strings.TrimLeft(flag.Arg(0), " \t\r\n")
+ httpOpts.URL = url
+ httpOpts.HTTP10 = *http10Flag
+ httpOpts.DisableFastClient = *stdClientFlag
+ httpOpts.DisableKeepAlive = !*keepAliveFlag
+ httpOpts.AllowHalfClose = *halfCloseFlag
+ httpOpts.Compression = *compressionFlag
+ httpOpts.HTTPReqTimeOut = *httpReqTimeoutFlag
+ httpOpts.Insecure = *httpsInsecureFlag
+ httpOpts.UserCredentials = *userCredentialsFlag
+ httpOpts.ContentType = *contentTypeFlag
+ httpOpts.Payload = fnet.GeneratePayload(*PayloadFileFlag, *PayloadSizeFlag, *PayloadFlag)
+ httpOpts.UnixDomainSocket = *unixDomainSocketFlag
+ if *followRedirectsFlag {
+ httpOpts.FollowRedirects = true
+ httpOpts.DisableFastClient = true
+ }
+ return &httpOpts
+}
diff --git a/vendor/fortio.org/fortio/cert-gen b/vendor/fortio.org/fortio/cert-gen
new file mode 100755
index 0000000000..f1be776637
--- /dev/null
+++ b/vendor/fortio.org/fortio/cert-gen
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+# note: Script uses -batch and -subj, instead of interactive prompts.
+# default environment variable values
+CERT_TEMP_DIR=./cert-tmp
+CA_CERT=$CERT_TEMP_DIR/ca.crt
+SVR_CERT=$CERT_TEMP_DIR/server.crt
+SVR_KEY=$CERT_TEMP_DIR/server.key
+SAN=DNS.1:localhost,IP.1:127.0.0.1
+
+set -e
+
+# Skip cert creation if the certs already exist
+if [ -d $CERT_TEMP_DIR ]
+then
+ echo "Certificate directory $CERT_TEMP_DIR exists. Skipping certificate creation."
+ exit
+fi
+
+echo "Creating test CA cert and server cert/key..."
+
+# create cert directory and files
+mkdir -p $CERT_TEMP_DIR
+touch index.txt
+touch index.txt.attr
+echo 1000 > serial
+cat <$CERT_TEMP_DIR/openssl.conf
+[ ca ]
+default_ca = CA_default
+
+[ CA_default ]
+dir = .
+certs = $CERT_TEMP_DIR
+crl_dir = $CERT_TEMP_DIR
+new_certs_dir = $CERT_TEMP_DIR
+database = ./index.txt
+serial = ./serial
+crlnumber = ./crlnumber
+crl = ./crl/intermediate-ca.crl
+crl_extensions = crl_ext
+default_crl_days = 30
+default_md = sha256
+
+name_opt = ca_default
+cert_opt = ca_default
+default_days = 375
+preserve = no
+policy = policy_loose
+
+[ policy_loose ]
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = supplied
+emailAddress = optional
+
+[ req ]
+default_bits = 4096
+distinguished_name = req_distinguished_name
+string_mask = utf8only
+default_md = sha256
+
+[ req_distinguished_name ]
+countryName = Country Name (2 letter code)
+stateOrProvinceName = State or Province Name
+localityName = Locality Name
+0.organizationName = Organization Name
+organizationalUnitName = Organizational Unit Name
+commonName = Common Name
+
+[ v3_ca ]
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid:always,issuer
+basicConstraints = critical, CA:true, pathlen:0
+keyUsage = critical, digitalSignature, cRLSign, keyCertSign
+
+[ usr_cert ]
+basicConstraints = CA:FALSE
+nsCertType = client
+nsComment = "OpenSSL Generated Client Certificate"
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid,issuer
+keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = clientAuth
+
+[ server_cert ]
+basicConstraints = CA:FALSE
+nsCertType = server
+nsComment = "OpenSSL Generated Server Certificate"
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid,issuer:always
+keyUsage = critical, digitalSignature, keyEncipherment
+extendedKeyUsage = serverAuth
+subjectAltName = DNS.1:localhost,IP.1:127.0.0.1
+EOF
+
+# CA private key (unencrypted)
+openssl genrsa -out $CERT_TEMP_DIR/ca.key 4096
+# Certificate Authority (self-signed certificate)
+openssl req -config $CERT_TEMP_DIR/openssl.conf -new -x509 -days 3650 -sha256 -key $CERT_TEMP_DIR/ca.key -extensions v3_ca -out $CERT_TEMP_DIR/ca.crt -subj "/CN=fake-ca"
+
+# Server private key (unencrypted)
+openssl genrsa -out $CERT_TEMP_DIR/server.key 2048
+# Server certificate signing request (CSR)
+openssl req -config $CERT_TEMP_DIR/openssl.conf -new -sha256 -key $CERT_TEMP_DIR/server.key -out $CERT_TEMP_DIR/server.csr -subj "/CN=fake-server"
+# Certificate Authority signs CSR to grant a certificate
+openssl ca -batch -config $CERT_TEMP_DIR/openssl.conf -extensions server_cert -days 365 -notext -md sha256 -in $CERT_TEMP_DIR/server.csr -out $CERT_TEMP_DIR/server.crt -cert $CERT_TEMP_DIR/ca.crt -keyfile $CERT_TEMP_DIR/ca.key
+
+# Remove unneeded files
+rm -f index.* serial* $CERT_TEMP_DIR/ca.key $CERT_TEMP_DIR/*.csr $CERT_TEMP_DIR/*.pem $CERT_TEMP_DIR/openssl.conf
+
+echo "*******************************************************************"
+echo "WARNING: Generated credentials are self-signed and should be used for testing purposes only."
diff --git a/vendor/fortio.org/fortio/codecov.yml b/vendor/fortio.org/fortio/codecov.yml
new file mode 100644
index 0000000000..41e6de0436
--- /dev/null
+++ b/vendor/fortio.org/fortio/codecov.yml
@@ -0,0 +1,6 @@
+coverage:
+ range: 60..99
+ precision: 1 # how many decimal places to display in the UI: 0 <= value <= 4
+ round: nearest # how coverage is rounded: down/up/nearest
+ ignore: # files and folders that will be removed during processing
+ - "**.pb.go"
diff --git a/vendor/fortio.org/fortio/debian/changelog b/vendor/fortio.org/fortio/debian/changelog
new file mode 100644
index 0000000000..561998689b
--- /dev/null
+++ b/vendor/fortio.org/fortio/debian/changelog
@@ -0,0 +1,20 @@
+fortio (1.3.1-1) unstable; urgency=medium
+
+ * Bump for 1.3.1
+
+ -- Laurent Demailly Sat, 02 Feb 2019 11:38:26 -0800
+
+fortio (1.3.0-2) unstable; urgency=medium
+
+ * Skip tests by default (because build machines don't have internet access)
+ * Added override for Chart.min.js so to not having to further completely
+ revamp the build system for that one file (which does have a header
+ indicating source and (MIT) license)
+
+ -- Laurent Demailly Fri, 14 Sep 2018 18:06:21 -0700
+
+fortio (1.3.0-1) unstable; urgency=medium
+
+ * Initial debian packaged release (Closes: #908172)
+
+ -- Laurent Demailly Thu, 06 Sep 2018 16:21:28 -0700
diff --git a/vendor/fortio.org/fortio/debian/compat b/vendor/fortio.org/fortio/debian/compat
new file mode 100644
index 0000000000..f599e28b8a
--- /dev/null
+++ b/vendor/fortio.org/fortio/debian/compat
@@ -0,0 +1 @@
+10
diff --git a/vendor/fortio.org/fortio/debian/control b/vendor/fortio.org/fortio/debian/control
new file mode 100644
index 0000000000..e1f39e8db5
--- /dev/null
+++ b/vendor/fortio.org/fortio/debian/control
@@ -0,0 +1,18 @@
+Source: fortio
+Section: web
+Priority: optional
+Maintainer: Laurent Demailly
+Build-Depends: debhelper (>= 10),
+ golang-go (>= 1.8)
+Standards-Version: 4.1.2
+Homepage: https://github.com/fortio/fortio
+Vcs-Git: https://github.com/fortio/fortio.git
+Vcs-Browser: https://github.com/fortio/fortio/tree/master/
+
+Package: fortio
+Architecture: any
+Depends: ca-certificates
+Description: load testing library, command line tool, echo server, web UI
+ Fortio is a microservices (http, grpc) load testing library, command line tool,
+ advanced echo server, and web UI in go (golang). Fortio allows one to specify a
+ set query-per-second load and record latency histograms and other useful stats.
diff --git a/vendor/fortio.org/fortio/debian/copyright b/vendor/fortio.org/fortio/debian/copyright
new file mode 100644
index 0000000000..ee65daad6f
--- /dev/null
+++ b/vendor/fortio.org/fortio/debian/copyright
@@ -0,0 +1,22 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: fortio
+Source: https://github.com/fortio/fortio/
+
+Files: *
+Copyright: 2017 Istio Authors.
+License: Apache-2.0
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+ .
+ http://www.apache.org/licenses/LICENSE-2.0
+ .
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ .
+ On Debian systems, the full text of the Apache License, Version 2.0
+ can be found in the file
+ `/usr/share/common-licenses/Apache-2.0'.
diff --git a/vendor/fortio.org/fortio/debian/rules b/vendor/fortio.org/fortio/debian/rules
new file mode 100755
index 0000000000..3207ee0eaf
--- /dev/null
+++ b/vendor/fortio.org/fortio/debian/rules
@@ -0,0 +1,15 @@
+#!/usr/bin/make -f
+
+#export DH_VERBOSE=1
+#export DEB_BUILD_OPTIONS=--no-parallel
+
+# debian build farms don't have internet access so some of our grpc tests against
+# fortio.istio.io can't work, so disabling tests by default
+
+ifndef FORTIO_SKIP_TESTS
+export FORTIO_SKIP_TESTS=Y
+endif
+
+%:
+ dh $@
+
diff --git a/vendor/fortio.org/fortio/debian/source/format b/vendor/fortio.org/fortio/debian/source/format
new file mode 100644
index 0000000000..163aaf8d82
--- /dev/null
+++ b/vendor/fortio.org/fortio/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/vendor/fortio.org/fortio/debian/source/lintian-overrides b/vendor/fortio.org/fortio/debian/source/lintian-overrides
new file mode 100644
index 0000000000..19670f043f
--- /dev/null
+++ b/vendor/fortio.org/fortio/debian/source/lintian-overrides
@@ -0,0 +1,2 @@
+# Chart.min.js is a minified, pruned version as explained in the header of the said file
+fortio source: source-is-missing src/fortio.org/fortio/ui/static/js/Chart.min.js
diff --git a/vendor/fortio.org/fortio/docker-compose.test.yml b/vendor/fortio.org/fortio/docker-compose.test.yml
new file mode 100644
index 0000000000..57138ed372
--- /dev/null
+++ b/vendor/fortio.org/fortio/docker-compose.test.yml
@@ -0,0 +1,4 @@
+sut:
+ build: .
+ dockerfile: Dockerfile.test
+ command: go test -race -v -timeout 60s fortio.org/fortio/...
diff --git a/vendor/fortio.org/fortio/docs/fortio-logo-color.png b/vendor/fortio.org/fortio/docs/fortio-logo-color.png
new file mode 100644
index 0000000000..db3a869c42
Binary files /dev/null and b/vendor/fortio.org/fortio/docs/fortio-logo-color.png differ
diff --git a/vendor/fortio.org/fortio/docs/fortio-logo.gvdesign b/vendor/fortio.org/fortio/docs/fortio-logo.gvdesign
new file mode 100644
index 0000000000..0829bf78df
Binary files /dev/null and b/vendor/fortio.org/fortio/docs/fortio-logo.gvdesign differ
diff --git a/vendor/fortio.org/fortio/docs/fortio-logo.svg b/vendor/fortio.org/fortio/docs/fortio-logo.svg
new file mode 100644
index 0000000000..c8dc1723d3
--- /dev/null
+++ b/vendor/fortio.org/fortio/docs/fortio-logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/vendor/fortio.org/fortio/docs/fortio.1 b/vendor/fortio.org/fortio/docs/fortio.1
new file mode 100755
index 0000000000..471fbd4502
--- /dev/null
+++ b/vendor/fortio.org/fortio/docs/fortio.1
@@ -0,0 +1,15 @@
+.TH "FORTIO" 1 "2018"
+.SH NAME
+fortio \- http and gRPC load testing client, advanced echo server and web
+graphing UI
+.SH SYNOPSIS
+\fBfortio\fP \fIcommand\fP [flags] \fItarget\fP
+.SH DESCRIPTION
+\fIcommand\fP can be one of \fBload\fP (load testing), \fBserver\fP (starts
+grpc ping and http echo/ui/redirect/proxy servers), \fBgrpcping\fP (grpc
+client), \fBreport\fP (report only UI server), \fBredirect\fP (redirect
+only server), or \fBcurl\fP (single URL debug).
+Where \fItarget\fP is a url (http load tests) or host:port (grpc health test).
+.SH "SEE ALSO"
+\fBfortio help\fP for all the flags and https://fortio.org/ for more
+documentation.
diff --git a/vendor/fortio.org/fortio/echosrv/echo.go b/vendor/fortio.org/fortio/echosrv/echo.go
new file mode 100644
index 0000000000..065d0283d8
--- /dev/null
+++ b/vendor/fortio.org/fortio/echosrv/echo.go
@@ -0,0 +1,47 @@
+// Copyright 2017 Istio Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Initially adapted from istio/proxy/test/backend/echo with error handling and
+// concurrency fixes and making it as low overhead as possible
+// (no std output by default)
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strings"
+
+ "fortio.org/fortio/fhttp"
+ "fortio.org/fortio/version"
+)
+
+var (
+ port = flag.String("port", "8080", "default http port, either port or address:port can be specified")
+ debugPath = flag.String("debug-path", "/debug", "path for debug url, set to empty for no debug")
+)
+
+func main() {
+ flag.Parse()
+ if len(os.Args) >= 2 && strings.Contains(os.Args[1], "version") {
+ fmt.Println(version.Long())
+ os.Exit(0)
+ }
+ if _, addr := fhttp.Serve(*port, *debugPath); addr == nil {
+ os.Exit(1) // error already logged
+ }
+ select {}
+}
diff --git a/vendor/fortio.org/fortio/fcurl/fcurl.go b/vendor/fortio.org/fortio/fcurl/fcurl.go
new file mode 100644
index 0000000000..477bb13382
--- /dev/null
+++ b/vendor/fortio.org/fortio/fcurl/fcurl.go
@@ -0,0 +1,51 @@
+// Copyright 2018 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+// Do not add any external dependencies we want to keep fortio minimal.
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "os"
+
+ "fortio.org/fortio/bincommon"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/version"
+)
+
+// Prints usage
+func usage(w io.Writer, msgs ...interface{}) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(w, "Φορτίο fortio-curl %s usage:\n\t%s [flags] url\n",
+ version.Short(),
+ os.Args[0])
+ bincommon.FlagsUsage(w, msgs...)
+}
+
+func main() {
+ bincommon.SharedMain(usage)
+ if len(os.Args) < 2 {
+ usage(os.Stderr, "Error: need a url as parameter")
+ os.Exit(1)
+ }
+ flag.Parse()
+ if *bincommon.QuietFlag {
+ log.SetLogLevelQuiet(log.Error)
+ }
+ o := bincommon.SharedHTTPOptions()
+ bincommon.FetchURL(o)
+}
diff --git a/vendor/fortio.org/fortio/fgrpc/grpcrunner.go b/vendor/fortio.org/fortio/fgrpc/grpcrunner.go
new file mode 100644
index 0000000000..45aa30392f
--- /dev/null
+++ b/vendor/fortio.org/fortio/fgrpc/grpcrunner.go
@@ -0,0 +1,297 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fgrpc // import "fortio.org/fortio/fgrpc"
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/health/grpc_health_v1"
+
+ "strings"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/periodic"
+)
+
+// Dial dials grpc using insecure or tls transport security when serverAddr
+// has prefixHTTPS or cert is provided. If override is set to a non empty string,
+// it will override the virtual host name of authority in requests.
+func Dial(o *GRPCRunnerOptions) (conn *grpc.ClientConn, err error) {
+ var opts []grpc.DialOption
+ switch {
+ case o.CACert != "":
+ var creds credentials.TransportCredentials
+ creds, err = credentials.NewClientTLSFromFile(o.CACert, o.CertOverride)
+ if err != nil {
+ log.Errf("Invalid TLS credentials: %v\n", err)
+ return nil, err
+ }
+ log.Infof("Using CA certificate %v to construct TLS credentials", o.CACert)
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ case strings.HasPrefix(o.Destination, fnet.PrefixHTTPS):
+ creds := credentials.NewTLS(nil)
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ default:
+ opts = append(opts, grpc.WithInsecure())
+ }
+ serverAddr := grpcDestination(o.Destination)
+ if o.UnixDomainSocket != "" {
+ log.Warnf("Using domain socket %v instead of %v for grpc connection", o.UnixDomainSocket, serverAddr)
+ opts = append(opts, grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout(fnet.UnixDomainSocket, o.UnixDomainSocket, timeout)
+ }))
+ }
+ conn, err = grpc.Dial(serverAddr, opts...)
+ if err != nil {
+ log.Errf("failed to connect to %s with certificate %s and override %s: %v", serverAddr, o.CACert, o.CertOverride, err)
+ }
+ return conn, err
+}
+
+// TODO: refactor common parts between http and grpc runners
+
+// GRPCRunnerResults is the aggregated result of an GRPCRunner.
+// Also is the internal type used per thread/goroutine.
+type GRPCRunnerResults struct {
+ periodic.RunnerResults
+ clientH grpc_health_v1.HealthClient
+ reqH grpc_health_v1.HealthCheckRequest
+ clientP PingServerClient
+ reqP PingMessage
+ RetCodes HealthResultMap
+ Destination string
+ Streams int
+ Ping bool
+}
+
+// Run exercises GRPC health check or ping at the target QPS.
+// To be set as the Function in RunnerOptions.
+func (grpcstate *GRPCRunnerResults) Run(t int) {
+ log.Debugf("Calling in %d", t)
+ var err error
+ var res interface{}
+ status := grpc_health_v1.HealthCheckResponse_SERVING
+ if grpcstate.Ping {
+ res, err = grpcstate.clientP.Ping(context.Background(), &grpcstate.reqP)
+ } else {
+ var r *grpc_health_v1.HealthCheckResponse
+ r, err = grpcstate.clientH.Check(context.Background(), &grpcstate.reqH)
+ if r != nil {
+ status = r.Status
+ res = r
+ }
+ }
+ log.Debugf("For %d (ping=%v) got %v %v", t, grpcstate.Ping, err, res)
+ if err != nil {
+ log.Warnf("Error making grpc call: %v", err)
+ grpcstate.RetCodes[Error]++
+ } else {
+ grpcstate.RetCodes[status.String()]++
+ }
+}
+
+// GRPCRunnerOptions includes the base RunnerOptions plus http specific
+// options.
+type GRPCRunnerOptions struct {
+ periodic.RunnerOptions
+ Destination string
+ Service string // Service to be checked when using grpc health check
+ Profiler string // file to save profiles to. defaults to no profiling
+ Payload string // Payload to be sent for grpc ping service
+ Streams int // number of streams. total go routines and data streams will be streams*numthreads.
+ Delay time.Duration // Delay to be sent when using grpc ping service
+ CACert string // Path to CA certificate for grpc TLS
+ CertOverride string // Override the cert virtual host of authority for testing
+ AllowInitialErrors bool // whether initial errors don't cause an abort
+ UsePing bool // use our own Ping proto for grpc load instead of standard health check one.
+ UnixDomainSocket string // unix domain socket path to use for physical connection instead of Destination
+}
+
+// RunGRPCTest runs an http test and returns the aggregated stats.
+func RunGRPCTest(o *GRPCRunnerOptions) (*GRPCRunnerResults, error) {
+ if o.Streams < 1 {
+ o.Streams = 1
+ }
+ if o.NumThreads < 1 {
+ // sort of todo, this redoing some of periodic normalize (but we can't use normalize which does too much)
+ o.NumThreads = periodic.DefaultRunnerOptions.NumThreads
+ }
+ if o.UsePing {
+ o.RunType = "GRPC Ping"
+ if o.Delay > 0 {
+ o.RunType += fmt.Sprintf(" Delay=%v", o.Delay)
+ }
+ } else {
+ o.RunType = "GRPC Health"
+ }
+ pll := len(o.Payload)
+ if pll > 0 {
+ o.RunType += fmt.Sprintf(" PayloadLength=%d", pll)
+ }
+ log.Infof("Starting %s test for %s with %d*%d threads at %.1f qps", o.RunType, o.Destination, o.Streams, o.NumThreads, o.QPS)
+ o.NumThreads *= o.Streams
+ r := periodic.NewPeriodicRunner(&o.RunnerOptions)
+ defer r.Options().Abort()
+ numThreads := r.Options().NumThreads // may change
+ total := GRPCRunnerResults{
+ RetCodes: make(HealthResultMap),
+ Destination: o.Destination,
+ Streams: o.Streams,
+ Ping: o.UsePing,
+ }
+ grpcstate := make([]GRPCRunnerResults, numThreads)
+ out := r.Options().Out // Important as the default value is set from nil to stdout inside NewPeriodicRunner
+ var conn *grpc.ClientConn
+ var err error
+ ts := time.Now().UnixNano()
+ for i := 0; i < numThreads; i++ {
+ r.Options().Runners[i] = &grpcstate[i]
+ if (i % o.Streams) == 0 {
+ conn, err = Dial(o)
+ if err != nil {
+ log.Errf("Error in grpc dial for %s %v", o.Destination, err)
+ return nil, err
+ }
+ } else {
+ log.Debugf("Reusing previous client connection for %d", i)
+ }
+ grpcstate[i].Ping = o.UsePing
+ var err error
+ if o.UsePing {
+ grpcstate[i].clientP = NewPingServerClient(conn)
+ if grpcstate[i].clientP == nil {
+ return nil, fmt.Errorf("unable to create ping client %d for %s", i, o.Destination)
+ }
+ grpcstate[i].reqP = PingMessage{Payload: o.Payload, DelayNanos: o.Delay.Nanoseconds(), Seq: int64(i), Ts: ts}
+ if o.Exactly <= 0 {
+ _, err = grpcstate[i].clientP.Ping(context.Background(), &grpcstate[i].reqP)
+ }
+ } else {
+ grpcstate[i].clientH = grpc_health_v1.NewHealthClient(conn)
+ if grpcstate[i].clientH == nil {
+ return nil, fmt.Errorf("unable to create health client %d for %s", i, o.Destination)
+ }
+ grpcstate[i].reqH = grpc_health_v1.HealthCheckRequest{Service: o.Service}
+ if o.Exactly <= 0 {
+ _, err = grpcstate[i].clientH.Check(context.Background(), &grpcstate[i].reqH)
+ }
+ }
+ if !o.AllowInitialErrors && err != nil {
+ log.Errf("Error in first grpc call (ping = %v) for %s: %v", o.UsePing, o.Destination, err)
+ return nil, err
+ }
+ // Setup the stats for each 'thread'
+ grpcstate[i].RetCodes = make(HealthResultMap)
+ }
+
+ if o.Profiler != "" {
+ fc, err := os.Create(o.Profiler + ".cpu")
+ if err != nil {
+ log.Critf("Unable to create .cpu profile: %v", err)
+ return nil, err
+ }
+ pprof.StartCPUProfile(fc) //nolint: gas,errcheck
+ }
+ total.RunnerResults = r.Run()
+ if o.Profiler != "" {
+ pprof.StopCPUProfile()
+ fm, err := os.Create(o.Profiler + ".mem")
+ if err != nil {
+ log.Critf("Unable to create .mem profile: %v", err)
+ return nil, err
+ }
+ runtime.GC() // get up-to-date statistics
+ pprof.WriteHeapProfile(fm) // nolint:gas,errcheck
+ fm.Close() // nolint:gas,errcheck
+ fmt.Printf("Wrote profile data to %s.{cpu|mem}\n", o.Profiler)
+ }
+ // Numthreads may have reduced
+ numThreads = r.Options().NumThreads
+ keys := []string{}
+ for i := 0; i < numThreads; i++ {
+ // Q: is there some copying each time stats[i] is used?
+ for k := range grpcstate[i].RetCodes {
+ if _, exists := total.RetCodes[k]; !exists {
+ keys = append(keys, k)
+ }
+ total.RetCodes[k] += grpcstate[i].RetCodes[k]
+ }
+ // TODO: if grpc client needs 'cleanup'/Close like http one, do it on original NumThreads
+ }
+ // Cleanup state:
+ r.Options().ReleaseRunners()
+ which := "Health"
+ if o.UsePing {
+ which = "Ping"
+ }
+ for _, k := range keys {
+ _, _ = fmt.Fprintf(out, "%s %s : %d\n", which, k, total.RetCodes[k])
+ }
+ return &total, nil
+}
+
+// grpcDestination parses dest and returns dest:port based on dest being
+// a hostname, IP address, hostname:port, or ip:port. The original dest is
+// returned if dest is an invalid hostname or invalid IP address. An http/https
+// prefix is removed from dest if one exists and the port number is set to
+// StandardHTTPPort for http, StandardHTTPSPort for https, or DefaultGRPCPort
+// if http, https, or :port is not specified in dest.
+// TODO: change/fix this (NormalizePort and more)
+func grpcDestination(dest string) (parsedDest string) {
+ var port string
+ // strip any unintentional http/https scheme prefixes from dest
+ // and set the port number.
+ switch {
+ case strings.HasPrefix(dest, fnet.PrefixHTTP):
+ parsedDest = strings.TrimSuffix(strings.Replace(dest, fnet.PrefixHTTP, "", 1), "/")
+ port = fnet.StandardHTTPPort
+ log.Infof("stripping http scheme. grpc destination: %v: grpc port: %s",
+ parsedDest, port)
+ case strings.HasPrefix(dest, fnet.PrefixHTTPS):
+ parsedDest = strings.TrimSuffix(strings.Replace(dest, fnet.PrefixHTTPS, "", 1), "/")
+ port = fnet.StandardHTTPSPort
+ log.Infof("stripping https scheme. grpc destination: %v. grpc port: %s",
+ parsedDest, port)
+ default:
+ parsedDest = dest
+ port = fnet.DefaultGRPCPort
+ }
+ if _, _, err := net.SplitHostPort(parsedDest); err == nil {
+ return parsedDest
+ }
+ if ip := net.ParseIP(parsedDest); ip != nil {
+ switch {
+ case ip.To4() != nil:
+ parsedDest = ip.String() + fnet.NormalizePort(port)
+ return parsedDest
+ case ip.To16() != nil:
+ parsedDest = "[" + ip.String() + "]" + fnet.NormalizePort(port)
+ return parsedDest
+ }
+ }
+ // parsedDest is in the form of a domain name,
+ // append ":port" and return.
+ parsedDest += fnet.NormalizePort(port)
+ return parsedDest
+}
diff --git a/vendor/fortio.org/fortio/fgrpc/grpcrunner_test.go b/vendor/fortio.org/fortio/fgrpc/grpcrunner_test.go
new file mode 100644
index 0000000000..25f97b3f8d
--- /dev/null
+++ b/vendor/fortio.org/fortio/fgrpc/grpcrunner_test.go
@@ -0,0 +1,397 @@
+// Copyright 2017 Istio Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package fgrpc
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/periodic"
+
+ "google.golang.org/grpc/health/grpc_health_v1"
+)
+
+var (
+ // Generated from "make cert"
+ caCrt = "../cert-tmp/ca.crt"
+ svrCrt = "../cert-tmp/server.crt"
+ svrKey = "../cert-tmp/server.key"
+ // used for failure test cases
+ failCrt = "../missing/cert.crt"
+ failKey = "../missing/cert.key"
+)
+
+func TestGRPCRunner(t *testing.T) {
+ log.SetLogLevel(log.Info)
+ iPort := PingServerTCP("0", "", "", "bar", 0)
+ iDest := fmt.Sprintf("localhost:%d", iPort)
+ sPort := PingServerTCP("0", svrCrt, svrKey, "bar", 0)
+ sDest := fmt.Sprintf("localhost:%d", sPort)
+ uds := fnet.GetUniqueUnixDomainPath("fortio-grpc-test")
+ uPath := PingServer(uds, "", "", "", 10)
+ uDest := "foo.bar:125"
+
+ ro := periodic.RunnerOptions{
+ QPS: 10, // some internet outcalls, not too fast
+ Resolution: 0.00001,
+ }
+
+ tests := []struct {
+ name string
+ runnerOpts GRPCRunnerOptions
+ expect bool
+ }{
+ {
+ name: "valid insecure runner with payload",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: iDest,
+ Payload: "test",
+ },
+ expect: true,
+ },
+ {
+ name: "valid secure runner",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: sDest,
+ CACert: caCrt,
+ },
+ expect: true,
+ },
+ {
+ name: "valid unix domain socket runner",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: uDest,
+ UnixDomainSocket: uPath.String(),
+ },
+ expect: true,
+ },
+ {
+ name: "invalid insecure runner to secure server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: sDest,
+ },
+ expect: false,
+ },
+ {
+ name: "valid secure runner using nil credentials to Internet https server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "https://fortio.istio.io:443",
+ },
+ expect: true,
+ },
+ {
+ name: "valid secure runner using nil credentials to Internet https server, default https port, trailing slash",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "https://fortio.istio.io/",
+ },
+ expect: true,
+ },
+ {
+ name: "invalid secure runner to insecure server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "fortio.istio.io:443",
+ },
+ expect: false,
+ },
+ {
+ name: "invalid secure runner using test cert to https prefix Internet server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "https://fortio.istio.io:443",
+ CACert: caCrt,
+ },
+ expect: false,
+ },
+ {
+ name: "invalid secure runner using test cert to no prefix Internet server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "fortio.istio.io:443",
+ },
+ expect: false,
+ },
+ {
+ name: "invalid name in secure runner cert",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: sDest,
+ CACert: caCrt,
+ CertOverride: "invalidName",
+ },
+ expect: false,
+ },
+ {
+ name: "invalid cert for secure runner",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: sDest,
+ CACert: "../missing/cert.crt",
+ },
+ expect: false,
+ },
+ }
+ for _, test := range tests {
+ test.runnerOpts.Profiler = "test.profile"
+ test.runnerOpts.RunnerOptions = ro
+ res, err := RunGRPCTest(&test.runnerOpts)
+ switch {
+ case err != nil && test.expect:
+ t.Errorf("Test case: %s failed due to unexpected error: %v", test.name, err)
+ return
+ case err == nil && !test.expect:
+ t.Errorf("Test case: %s failed due to unexpected response: %v", test.name, res)
+ return
+ case err == nil && test.expect:
+ totalReq := res.DurationHistogram.Count
+ ok := res.RetCodes[grpc_health_v1.HealthCheckResponse_SERVING.String()]
+ if totalReq != ok {
+ t.Errorf("Test case: %s failed. Mismatch between requests %d and ok %v",
+ test.name, totalReq, res.RetCodes)
+ }
+ }
+ }
+}
+
+func TestGRPCRunnerMaxStreams(t *testing.T) {
+ log.SetLogLevel(log.Info)
+ port := PingServerTCP("0", "", "", "maxstream", 10)
+ destination := fmt.Sprintf("localhost:%d", port)
+
+ opts := GRPCRunnerOptions{
+ RunnerOptions: periodic.RunnerOptions{
+ QPS: 100,
+ NumThreads: 1,
+ },
+ Destination: destination,
+ Streams: 10, // will be batches of 10 max
+ UsePing: true,
+ Delay: 20 * time.Millisecond,
+ }
+ o1 := opts
+ res, err := RunGRPCTest(&o1)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ totalReq := res.DurationHistogram.Count
+ avg10 := res.DurationHistogram.Avg
+ ok := res.RetCodes[grpc_health_v1.HealthCheckResponse_SERVING.String()]
+ if totalReq != ok {
+ t.Errorf("Mismatch1 between requests %d and ok %v", totalReq, res.RetCodes)
+ }
+ if avg10 < opts.Delay.Seconds() || avg10 > 3*opts.Delay.Seconds() {
+ t.Errorf("Ping delay not working, got %v for %v", avg10, opts.Delay)
+ }
+ o2 := opts
+ o2.Streams = 20
+ res, err = RunGRPCTest(&o2)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ totalReq = res.DurationHistogram.Count
+ avg20 := res.DurationHistogram.Avg
+ ok = res.RetCodes[grpc_health_v1.HealthCheckResponse_SERVING.String()]
+ if totalReq != ok {
+ t.Errorf("Mismatch2 between requests %d and ok %v", totalReq, res.RetCodes)
+ }
+ // Half of the calls should take 2x (delayed behind maxstreams)
+ if avg20 < 1.5*opts.Delay.Seconds() {
+ t.Errorf("Expecting much slower average with 20/10 %v %v", avg20, avg10)
+ }
+}
+
+func TestGRPCRunnerWithError(t *testing.T) {
+ log.SetLogLevel(log.Info)
+ iPort := PingServerTCP("0", "", "", "bar", 0)
+ iDest := fmt.Sprintf("localhost:%d", iPort)
+ sPort := PingServerTCP("0", svrCrt, svrKey, "bar", 0)
+ sDest := fmt.Sprintf("localhost:%d", sPort)
+
+ ro := periodic.RunnerOptions{
+ QPS: 10,
+ Duration: 1 * time.Second,
+ }
+
+ tests := []struct {
+ name string
+ runnerOpts GRPCRunnerOptions
+ }{
+ {
+ name: "insecure runner",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: iDest,
+ },
+ },
+ {
+ name: "secure runner",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: sDest,
+ CACert: caCrt,
+ },
+ },
+ {
+ name: "invalid insecure runner to secure server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: sDest,
+ },
+ },
+ {
+ name: "invalid secure runner to insecure server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: iDest,
+ CACert: caCrt,
+ },
+ },
+ {
+ name: "invalid name in runner cert",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: sDest,
+ CACert: caCrt,
+ CertOverride: "invalidName",
+ },
+ },
+ {
+ name: "valid runner using nil credentials to Internet https server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "https://fortio.istio.io/",
+ },
+ },
+ {
+ name: "invalid runner using test cert to https prefix Internet server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "https://fortio.istio.io/",
+ CACert: caCrt,
+ },
+ },
+ {
+ name: "invalid runner using test cert to no prefix Internet server",
+ runnerOpts: GRPCRunnerOptions{
+ Destination: "fortio.istio.io:443",
+ CACert: caCrt,
+ },
+ },
+ }
+ for _, test := range tests {
+ test.runnerOpts.Service = "svc2"
+ test.runnerOpts.RunnerOptions = ro
+ _, err := RunGRPCTest(&test.runnerOpts)
+ if err == nil {
+ t.Error("Was expecting initial error when connecting to secure without AllowInitialErrors")
+ }
+ test.runnerOpts.AllowInitialErrors = true
+ res, err := RunGRPCTest(&test.runnerOpts)
+ if err != nil {
+ t.Errorf("Test case: %s failed due to unexpected error: %v", test.name, err)
+ return
+ }
+ totalReq := res.DurationHistogram.Count
+ numErrors := res.RetCodes[Error]
+ if totalReq != numErrors {
+ t.Errorf("Test case: %s failed. Mismatch between requests %d and errors %v",
+ test.name, totalReq, res.RetCodes)
+ }
+ }
+}
+
+func TestGRPCDestination(t *testing.T) {
+ tests := []struct {
+ name string
+ dest string
+ output string
+ }{
+ {
+ "valid hostname",
+ "localhost",
+ "localhost:8079",
+ },
+ {
+ "hostname and port",
+ "localhost:1234",
+ "localhost:1234",
+ },
+ {
+ "hostname with http prefix",
+ "http://localhost",
+ "localhost:80",
+ },
+ {
+ "Hostname with http prefix and trailing /",
+ "http://localhost/",
+ "localhost:80",
+ },
+ {
+ "Hostname with https prefix",
+ "https://localhost",
+ "localhost:443",
+ },
+ {
+ "Hostname with https prefix and trailing /",
+ "https://localhost/",
+ "localhost:443",
+ },
+ {
+ "IPv4 address",
+ "1.2.3.4",
+ "1.2.3.4:8079",
+ },
+ {
+ "IPv4 address and port",
+ "1.2.3.4:5678",
+ "1.2.3.4:5678",
+ },
+ {
+ "IPv4 address with http prefix and trailing /",
+ "http://1.2.3.4/",
+ "1.2.3.4:80",
+ },
+ {
+ "IPv6 address",
+ "2001:dba::1",
+ "[2001:dba::1]:8079",
+ },
+ {
+ "IPv6 address and port",
+ "[2001:dba::1]:1234",
+ "[2001:dba::1]:1234",
+ },
+ {
+ "IPv6 address with http prefix",
+ "http://2001:dba::1",
+ "[2001:dba::1]:80",
+ },
+ {
+ "IPv6 address with https prefix",
+ "https://2001:dba::1",
+ "[2001:dba::1]:443",
+ },
+ {
+ "IPv6 address with https prefix and trailing /",
+ "https://2001:dba::1/",
+ "[2001:dba::1]:443",
+ },
+ }
+
+ for _, tc := range tests {
+ dest := grpcDestination(tc.dest)
+ if dest != tc.output {
+ t.Errorf("Test case: %s failed to set gRPC destination\n\texpected: %s\n\t actual: %s",
+ tc.name,
+ tc.output,
+ dest,
+ )
+ }
+ }
+}
diff --git a/vendor/fortio.org/fortio/fgrpc/ping.pb.go b/vendor/fortio.org/fortio/fgrpc/ping.pb.go
new file mode 100644
index 0000000000..856325889a
--- /dev/null
+++ b/vendor/fortio.org/fortio/fgrpc/ping.pb.go
@@ -0,0 +1,166 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: ping.proto
+
+/*
+Package fgrpc is a generated protocol buffer package.
+
+It is generated from these files:
+ ping.proto
+
+It has these top-level messages:
+ PingMessage
+*/
+package fgrpc
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import (
+ context "golang.org/x/net/context"
+ grpc "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type PingMessage struct {
+ Seq int64 `protobuf:"varint,1,opt,name=seq" json:"seq,omitempty"`
+ Ts int64 `protobuf:"varint,2,opt,name=ts" json:"ts,omitempty"`
+ Payload string `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"`
+ DelayNanos int64 `protobuf:"varint,4,opt,name=delayNanos" json:"delayNanos,omitempty"`
+}
+
+func (m *PingMessage) Reset() { *m = PingMessage{} }
+func (m *PingMessage) String() string { return proto.CompactTextString(m) }
+func (*PingMessage) ProtoMessage() {}
+func (*PingMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *PingMessage) GetSeq() int64 {
+ if m != nil {
+ return m.Seq
+ }
+ return 0
+}
+
+func (m *PingMessage) GetTs() int64 {
+ if m != nil {
+ return m.Ts
+ }
+ return 0
+}
+
+func (m *PingMessage) GetPayload() string {
+ if m != nil {
+ return m.Payload
+ }
+ return ""
+}
+
+func (m *PingMessage) GetDelayNanos() int64 {
+ if m != nil {
+ return m.DelayNanos
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*PingMessage)(nil), "fgrpc.PingMessage")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for PingServer service
+
+type PingServerClient interface {
+ Ping(ctx context.Context, in *PingMessage, opts ...grpc.CallOption) (*PingMessage, error)
+}
+
+type pingServerClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewPingServerClient(cc *grpc.ClientConn) PingServerClient {
+ return &pingServerClient{cc}
+}
+
+func (c *pingServerClient) Ping(ctx context.Context, in *PingMessage, opts ...grpc.CallOption) (*PingMessage, error) {
+ out := new(PingMessage)
+ err := grpc.Invoke(ctx, "/fgrpc.PingServer/Ping", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for PingServer service
+
+type PingServerServer interface {
+ Ping(context.Context, *PingMessage) (*PingMessage, error)
+}
+
+func RegisterPingServerServer(s *grpc.Server, srv PingServerServer) {
+ s.RegisterService(&_PingServer_serviceDesc, srv)
+}
+
+func _PingServer_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PingMessage)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(PingServerServer).Ping(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/fgrpc.PingServer/Ping",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(PingServerServer).Ping(ctx, req.(*PingMessage))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _PingServer_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "fgrpc.PingServer",
+ HandlerType: (*PingServerServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Ping",
+ Handler: _PingServer_Ping_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "ping.proto",
+}
+
+func init() { proto.RegisterFile("ping.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 163 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc8, 0xcc, 0x4b,
+ 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4d, 0x4b, 0x2f, 0x2a, 0x48, 0x56, 0xca, 0xe4,
+ 0xe2, 0x0e, 0xc8, 0xcc, 0x4b, 0xf7, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0x15, 0x12, 0xe0, 0x62,
+ 0x2e, 0x4e, 0x2d, 0x94, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0e, 0x02, 0x31, 0x85, 0xf8, 0xb8, 0x98,
+ 0x4a, 0x8a, 0x25, 0x98, 0xc0, 0x02, 0x4c, 0x25, 0xc5, 0x42, 0x12, 0x5c, 0xec, 0x05, 0x89, 0x95,
+ 0x39, 0xf9, 0x89, 0x29, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x30, 0xae, 0x90, 0x1c, 0x17,
+ 0x57, 0x4a, 0x6a, 0x4e, 0x62, 0xa5, 0x5f, 0x62, 0x5e, 0x7e, 0xb1, 0x04, 0x0b, 0x58, 0x07, 0x92,
+ 0x88, 0x91, 0x1d, 0x17, 0x17, 0xc8, 0xaa, 0xe0, 0xd4, 0xa2, 0xb2, 0xd4, 0x22, 0x21, 0x03, 0x2e,
+ 0x16, 0x10, 0x4f, 0x48, 0x48, 0x0f, 0xec, 0x10, 0x3d, 0x24, 0x57, 0x48, 0x61, 0x11, 0x53, 0x62,
+ 0x48, 0x62, 0x03, 0x3b, 0xdc, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x54, 0x22, 0xa0, 0xfe, 0xc6,
+ 0x00, 0x00, 0x00,
+}
diff --git a/vendor/fortio.org/fortio/fgrpc/ping.proto b/vendor/fortio.org/fortio/fgrpc/ping.proto
new file mode 100644
index 0000000000..835ecf2f5f
--- /dev/null
+++ b/vendor/fortio.org/fortio/fgrpc/ping.proto
@@ -0,0 +1,15 @@
+// regenerate the .pb.go file after any change using
+// protoc ping.proto --go_out=plugins=grpc:.
+syntax = "proto3";
+package fgrpc;
+
+message PingMessage {
+ int64 seq = 1; // sequence number
+ int64 ts = 2; // src send ts / dest receive ts
+ string payload = 3; // extra packet data
+ int64 delayNanos = 4; // delay the response by x nanoseconds
+}
+
+service PingServer {
+ rpc Ping (PingMessage) returns (PingMessage) {}
+}
diff --git a/vendor/fortio.org/fortio/fgrpc/pingsrv.go b/vendor/fortio.org/fortio/fgrpc/pingsrv.go
new file mode 100644
index 0000000000..8de8c81f76
--- /dev/null
+++ b/vendor/fortio.org/fortio/fgrpc/pingsrv.go
@@ -0,0 +1,195 @@
+// Copyright 2017 Istio Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package fgrpc
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/health"
+ "google.golang.org/grpc/health/grpc_health_v1"
+ "google.golang.org/grpc/reflection"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/stats"
+)
+
+const (
+ // DefaultHealthServiceName is the default health service name used by fortio.
+ DefaultHealthServiceName = "ping"
+ //Error indicates that something went wrong with healthcheck in grpc
+ Error = "ERROR"
+)
+
+type pingSrv struct {
+}
+
+func (s *pingSrv) Ping(c context.Context, in *PingMessage) (*PingMessage, error) {
+ log.LogVf("Ping called %+v (ctx %+v)", *in, c)
+ out := *in // copy the input including the payload etc
+ out.Ts = time.Now().UnixNano()
+ if in.DelayNanos > 0 {
+ s := time.Duration(in.DelayNanos)
+ log.LogVf("GRPC ping: sleeping for %v", s)
+ time.Sleep(s)
+ }
+ return &out, nil
+}
+
+// PingServer starts a grpc ping (and health) echo server.
+// returns the port being bound (useful when passing "0" as the port to
+// get a dynamic server). Pass the healthServiceName to use for the
+// grpc service name health check (or pass DefaultHealthServiceName)
+// to be marked as SERVING. Pass maxConcurrentStreams > 0 to set that option.
+func PingServer(port, cert, key, healthServiceName string, maxConcurrentStreams uint32) net.Addr {
+ socket, addr := fnet.Listen("grpc '"+healthServiceName+"'", port)
+ if addr == nil {
+ return nil
+ }
+ var grpcOptions []grpc.ServerOption
+ if maxConcurrentStreams > 0 {
+ log.Infof("Setting grpc.MaxConcurrentStreams server to %d", maxConcurrentStreams)
+ grpcOptions = append(grpcOptions, grpc.MaxConcurrentStreams(maxConcurrentStreams))
+ }
+ if cert != "" && key != "" {
+ creds, err := credentials.NewServerTLSFromFile(cert, key)
+ if err != nil {
+ log.Fatalf("Invalid TLS credentials: %v\n", err)
+ }
+ log.Infof("Using server certificate %v to construct TLS credentials", cert)
+ log.Infof("Using server key %v to construct TLS credentials", key)
+ grpcOptions = append(grpcOptions, grpc.Creds(creds))
+ }
+ grpcServer := grpc.NewServer(grpcOptions...)
+ reflection.Register(grpcServer)
+ healthServer := health.NewServer()
+ healthServer.SetServingStatus(healthServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
+ grpc_health_v1.RegisterHealthServer(grpcServer, healthServer)
+ RegisterPingServerServer(grpcServer, &pingSrv{})
+ go func() {
+ if err := grpcServer.Serve(socket); err != nil {
+ log.Fatalf("failed to start grpc server: %v", err)
+ }
+ }()
+ return addr
+}
+
+// PingServerTCP is PingServer() assuming tcp instead of possible unix domain socket port, returns
+// the numeric port.
+func PingServerTCP(port, cert, key, healthServiceName string, maxConcurrentStreams uint32) int {
+ addr := PingServer(port, cert, key, healthServiceName, maxConcurrentStreams)
+ if addr == nil {
+ return -1
+ }
+ return addr.(*net.TCPAddr).Port
+}
+
+// PingClientCall calls the ping service (presumably running as PingServer on
+// the destination). returns the average round trip in seconds.
+func PingClientCall(serverAddr, cacert string, n int, payload string, delay time.Duration) (float64, error) {
+ o := GRPCRunnerOptions{Destination: serverAddr, CACert: cacert}
+ conn, err := Dial(&o) // somehow this never seem to error out, error comes later
+ if err != nil {
+ return -1, err // error already logged
+ }
+ msg := &PingMessage{Payload: payload, DelayNanos: delay.Nanoseconds()}
+ cli := NewPingServerClient(conn)
+ // Warm up:
+ _, err = cli.Ping(context.Background(), msg)
+ if err != nil {
+ log.Errf("grpc error from Ping0 %v", err)
+ return -1, err
+ }
+ skewHistogram := stats.NewHistogram(-10, 2)
+ rttHistogram := stats.NewHistogram(0, 10)
+ for i := 1; i <= n; i++ {
+ msg.Seq = int64(i)
+ t1a := time.Now().UnixNano()
+ msg.Ts = t1a
+ res1, err := cli.Ping(context.Background(), msg)
+ t2a := time.Now().UnixNano()
+ if err != nil {
+ log.Errf("grpc error from Ping1 iter %d: %v", i, err)
+ return -1, err
+ }
+ t1b := res1.Ts
+ res2, err := cli.Ping(context.Background(), msg)
+ t3a := time.Now().UnixNano()
+ t2b := res2.Ts
+ if err != nil {
+ log.Errf("grpc error from Ping2 iter %d: %v", i, err)
+ return -1, err
+ }
+ rt1 := t2a - t1a
+ rttHistogram.Record(float64(rt1) / 1000.)
+ rt2 := t3a - t2a
+ rttHistogram.Record(float64(rt2) / 1000.)
+ rtR := t2b - t1b
+ rttHistogram.Record(float64(rtR) / 1000.)
+ midR := t1b + (rtR / 2)
+ avgRtt := (rt1 + rt2 + rtR) / 3
+ x := (midR - t2a)
+ log.Infof("Ping RTT %d (avg of %d, %d, %d ns) clock skew %d",
+ avgRtt, rt1, rtR, rt2, x)
+ skewHistogram.Record(float64(x) / 1000.)
+ msg = res2
+ }
+ skewHistogram.Print(os.Stdout, "Clock skew histogram usec", []float64{50})
+ rttHistogram.Print(os.Stdout, "RTT histogram usec", []float64{50})
+ return rttHistogram.Avg() / 1e6, nil
+}
+
+// HealthResultMap short cut for the map of results to count.
+type HealthResultMap map[string]int64
+
+// GrpcHealthCheck makes a grpc client call to the standard grpc health check
+// service.
+func GrpcHealthCheck(serverAddr, cacert string, svcname string, n int) (*HealthResultMap, error) {
+ log.Debugf("GrpcHealthCheck for %s svc '%s', %d iterations", serverAddr, svcname, n)
+ o := GRPCRunnerOptions{Destination: serverAddr, CACert: cacert}
+ conn, err := Dial(&o)
+ if err != nil {
+ return nil, err
+ }
+ msg := &grpc_health_v1.HealthCheckRequest{Service: svcname}
+ cli := grpc_health_v1.NewHealthClient(conn)
+ rttHistogram := stats.NewHistogram(0, 10)
+ statuses := make(HealthResultMap)
+
+ for i := 1; i <= n; i++ {
+ start := time.Now()
+ res, err := cli.Check(context.Background(), msg)
+ dur := time.Since(start)
+ log.LogVf("Reply from health check %d: %+v", i, res)
+ if err != nil {
+ log.Errf("grpc error from Check %v", err)
+ return nil, err
+ }
+ statuses[res.Status.String()]++
+ rttHistogram.Record(dur.Seconds() * 1000000.)
+ }
+ rttHistogram.Print(os.Stdout, "RTT histogram usec", []float64{50})
+ for k, v := range statuses {
+ fmt.Printf("Health %s : %d\n", k, v)
+ }
+ return &statuses, nil
+}
diff --git a/vendor/fortio.org/fortio/fgrpc/pingsrv_test.go b/vendor/fortio.org/fortio/fgrpc/pingsrv_test.go
new file mode 100644
index 0000000000..851a302c60
--- /dev/null
+++ b/vendor/fortio.org/fortio/fgrpc/pingsrv_test.go
@@ -0,0 +1,93 @@
+// Copyright 2017 Istio Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package fgrpc
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+ "time"
+
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/health/grpc_health_v1"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+)
+
+func init() {
+ log.SetLogLevel(log.Debug)
+}
+
+func TestPingServer(t *testing.T) {
+ iPort := PingServerTCP("0", "", "", "foo", 0)
+ iAddr := fmt.Sprintf("localhost:%d", iPort)
+ t.Logf("insecure grpc ping server running, will connect to %s", iAddr)
+ sPort := PingServerTCP("0", svrCrt, svrKey, "foo", 0)
+ sAddr := fmt.Sprintf("localhost:%d", sPort)
+ t.Logf("secure grpc ping server running, will connect to %s", sAddr)
+ delay := 100 * time.Millisecond
+ latency, err := PingClientCall(iAddr, "", 7, "test payload", delay)
+ if err != nil || latency < delay.Seconds() || latency > 10.*delay.Seconds() {
+ t.Errorf("Unexpected result %f, %v with ping calls and delay of %v", latency, err, delay)
+ }
+ if latency, err := PingClientCall(fnet.PrefixHTTPS+"fortio.istio.io:443", "", 7,
+ "test payload", 0); err != nil || latency <= 0 {
+ t.Errorf("Unexpected result %f, %v with ping calls", latency, err)
+ }
+ if latency, err := PingClientCall(sAddr, caCrt, 7, "test payload", 0); err != nil || latency <= 0 {
+ t.Errorf("Unexpected result %f, %v with ping calls", latency, err)
+ }
+ if latency, err := PingClientCall(iAddr, caCrt, 1, "", 0); err == nil {
+ t.Errorf("Should have had an error instead of result %f for secure ping to insecure port", latency)
+ }
+ if latency, err := PingClientCall(sAddr, "", 1, "", 0); err == nil {
+ t.Errorf("Should have had an error instead of result %f for insecure ping to secure port", latency)
+ }
+ if creds, err := credentials.NewServerTLSFromFile(failCrt, failKey); err == nil {
+ t.Errorf("Should have had an error instead of result %f for ping server", creds)
+ }
+ serving := grpc_health_v1.HealthCheckResponse_SERVING.String()
+ if r, err := GrpcHealthCheck(iAddr, "", "", 1); err != nil || (*r)[serving] != 1 {
+ t.Errorf("Unexpected result %+v, %v with empty service health check", r, err)
+ }
+ if r, err := GrpcHealthCheck(sAddr, caCrt, "", 1); err != nil || (*r)[serving] != 1 {
+ t.Errorf("Unexpected result %+v, %v with empty service health check", r, err)
+ }
+ if r, err := GrpcHealthCheck(fnet.PrefixHTTPS+"fortio.istio.io:443", "", "", 1); err != nil || (*r)[serving] != 1 {
+ t.Errorf("Unexpected result %+v, %v with empty service health check", r, err)
+ }
+ if r, err := GrpcHealthCheck(iAddr, "", "foo", 3); err != nil || (*r)[serving] != 3 {
+ t.Errorf("Unexpected result %+v, %v with health check for same service as started (foo)", r, err)
+ }
+ if r, err := GrpcHealthCheck(sAddr, caCrt, "foo", 3); err != nil || (*r)[serving] != 3 {
+ t.Errorf("Unexpected result %+v, %v with health check for same service as started (foo)", r, err)
+ }
+ if r, err := GrpcHealthCheck(iAddr, "", "willfail", 1); err == nil || r != nil {
+ t.Errorf("Was expecting error when using unknown service, didn't get one, got %+v", r)
+ }
+ if r, err := GrpcHealthCheck(sAddr, caCrt, "willfail", 1); err == nil || r != nil {
+ t.Errorf("Was expecting error when using unknown service, didn't get one, got %+v", r)
+ }
+ if r, err := GrpcHealthCheck(sAddr, failCrt, "willfail", 1); err == nil {
+ t.Errorf("Was expecting dial error when using invalid certificate, didn't get one, got %+v", r)
+ }
+ // 2nd server on same port should fail to bind:
+ newPort := PingServerTCP(strconv.Itoa(iPort), "", "", "will fail", 0)
+ if newPort != -1 {
+ t.Errorf("Didn't expect 2nd server on same port to succeed: %d %d", newPort, iPort)
+ }
+}
diff --git a/vendor/fortio.org/fortio/fhttp/http_client.go b/vendor/fortio.org/fortio/fhttp/http_client.go
new file mode 100644
index 0000000000..621f1e11cf
--- /dev/null
+++ b/vendor/fortio.org/fortio/fhttp/http_client.go
@@ -0,0 +1,815 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fhttp // import "fortio.org/fortio/fhttp"
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/version"
+)
+
+// Fetcher is the Url content fetcher that the different client implements.
+type Fetcher interface {
+ // Fetch returns http code, data, offset of body (for client which returns
+ // headers)
+ Fetch() (int, []byte, int)
+ // Close() cleans up connections and state - must be paired with NewClient calls.
+ // returns how many sockets have been used (Fastclient only)
+ Close() int
+}
+
+var (
+ // BufferSizeKb size of the buffer (max data) for optimized client in kilobytes defaults to 128k.
+ BufferSizeKb = 128
+ // CheckConnectionClosedHeader indicates whether to check for server side connection closed headers.
+ CheckConnectionClosedHeader = false
+ // 'constants', case doesn't matter for those 3
+ contentLengthHeader = []byte("\r\ncontent-length:")
+ connectionCloseHeader = []byte("\r\nconnection: close")
+ chunkedHeader = []byte("\r\nTransfer-Encoding: chunked")
+)
+
+// NewHTTPOptions creates and initialize a HTTPOptions object.
+// It replaces plain % to %25 in the url. If you already have properly
+// escaped URLs use o.URL = to set it.
+func NewHTTPOptions(url string) *HTTPOptions {
+ h := HTTPOptions{}
+ return h.Init(url)
+}
+
+// Init initializes the headers in an HTTPOptions (User-Agent).
+func (h *HTTPOptions) Init(url string) *HTTPOptions {
+ if h.initDone {
+ return h
+ }
+ h.initDone = true
+ h.URL = url
+ h.NumConnections = 1
+ if h.HTTPReqTimeOut == 0 {
+ log.Debugf("Request timeout not set, using default %v", HTTPReqTimeOutDefaultValue)
+ h.HTTPReqTimeOut = HTTPReqTimeOutDefaultValue
+ }
+ if h.HTTPReqTimeOut < 0 {
+ log.Warnf("Invalid timeout %v, setting to %v", h.HTTPReqTimeOut, HTTPReqTimeOutDefaultValue)
+ h.HTTPReqTimeOut = HTTPReqTimeOutDefaultValue
+ }
+ h.URLSchemeCheck()
+ return h
+}
+
+const (
+ contentType = "Content-Type"
+ contentLength = "Content-Length"
+)
+
+// GenerateHeaders completes the header generation, including Content-Type/Length
+// and user credential coming from the http options in addition to extra headers
+// coming from flags and AddAndValidateExtraHeader().
+// Warning this gets called more than once, do not generate duplicate headers.
+func (h *HTTPOptions) GenerateHeaders() http.Header {
+ if h.extraHeaders == nil { // not already initialized from flags.
+ h.InitHeaders()
+ }
+ allHeaders := h.extraHeaders
+ payloadLen := len(h.Payload)
+ // If content-type isn't already specified and we have a payload, let's use the
+ // standard for binary content:
+ if payloadLen > 0 && len(h.ContentType) == 0 && len(allHeaders.Get(contentType)) == 0 {
+ h.ContentType = "application/octet-stream"
+ }
+ if len(h.ContentType) > 0 {
+ allHeaders.Set(contentType, h.ContentType)
+ }
+ // Add content-length unless already set in custom headers (or we're not doing a POST)
+ if (payloadLen > 0 || len(h.ContentType) > 0) && len(allHeaders.Get(contentLength)) == 0 {
+ allHeaders.Set(contentLength, strconv.Itoa(payloadLen))
+ }
+ err := h.ValidateAndAddBasicAuthentication(allHeaders)
+ if err != nil {
+ log.Errf("User credential is not valid: %v", err)
+ }
+ return allHeaders
+}
+
+// URLSchemeCheck makes sure the client will work with the scheme requested.
+// it also adds missing http:// to emulate curl's behavior.
+func (h *HTTPOptions) URLSchemeCheck() {
+ log.LogVf("URLSchemeCheck %+v", h)
+ if len(h.URL) == 0 {
+ log.Errf("Unexpected init with empty url")
+ return
+ }
+ hs := "https://" // longer of the 2 prefixes
+ lcURL := h.URL
+ if len(lcURL) > len(hs) {
+ lcURL = strings.ToLower(h.URL[:len(hs)]) // no need to tolower more than we check
+ }
+ if strings.HasPrefix(lcURL, hs) {
+ h.https = true
+ if !h.DisableFastClient {
+ log.Warnf("https requested, switching to standard go client")
+ h.DisableFastClient = true
+ }
+ return // url is good
+ }
+ if !strings.HasPrefix(lcURL, "http://") {
+ log.Warnf("Assuming http:// on missing scheme for '%s'", h.URL)
+ h.URL = "http://" + h.URL
+ }
+}
+
+var userAgent = "fortio.org/fortio-" + version.Short()
+
+const (
+ retcodeOffset = len("HTTP/1.X ")
+ // HTTPReqTimeOutDefaultValue is the default timeout value. 15s.
+ HTTPReqTimeOutDefaultValue = 15 * time.Second
+)
+
+// HTTPOptions holds the common options of both http clients and the headers.
+type HTTPOptions struct {
+ URL string
+ NumConnections int // num connections (for std client)
+ Compression bool // defaults to no compression, only used by std client
+ DisableFastClient bool // defaults to fast client
+ HTTP10 bool // defaults to http1.1
+ DisableKeepAlive bool // so default is keep alive
+ AllowHalfClose bool // if not keepalive, whether to half close after request
+ Insecure bool // do not verify certs for https
+ FollowRedirects bool // For the Std Client only: follow redirects.
+ initDone bool
+ https bool // whether URLSchemeCheck determined this was an https:// call or not
+ // ExtraHeaders to be added to each request (UserAgent and headers set through AddAndValidateExtraHeader()).
+ extraHeaders http.Header
+ // Host is treated specially, remember that virtual header separately.
+ hostOverride string
+ HTTPReqTimeOut time.Duration // timeout value for http request
+
+ UserCredentials string // user credentials for authorization
+ ContentType string // indicates request body type, implies POST instead of GET
+ Payload []byte // body for http request, implies POST if not empty.
+
+ UnixDomainSocket string // Path of unix domain socket to use instead of host:port from URL
+}
+
+// ResetHeaders resets all the headers, including the User-Agent: one (and the Host: logical special header).
+// This is used from the UI as the user agent is settable from the form UI.
+func (h *HTTPOptions) ResetHeaders() {
+ h.extraHeaders = make(http.Header)
+ h.hostOverride = ""
+}
+
+// InitHeaders initialize and/or resets the default headers (ie just User-Agent).
+func (h *HTTPOptions) InitHeaders() {
+ h.ResetHeaders()
+ h.extraHeaders.Add("User-Agent", userAgent)
+ // No other headers should be added here based on options content as this is called only once
+ // before command line option -H are parsed/set.
+}
+
+// PayloadString returns the payload as a string. If payload is null return empty string
+// This is only needed due to grpc ping proto. It takes string instead of byte array.
+func (h *HTTPOptions) PayloadString() string {
+ if len(h.Payload) == 0 {
+ return ""
+ }
+ return string(h.Payload)
+}
+
+// ValidateAndAddBasicAuthentication validates user credentials and adds basic authentication to http header,
+// if user credentials are valid.
+func (h *HTTPOptions) ValidateAndAddBasicAuthentication(headers http.Header) error {
+ if len(h.UserCredentials) <= 0 {
+ return nil // user credential is not entered
+ }
+ s := strings.SplitN(h.UserCredentials, ":", 2)
+ if len(s) != 2 {
+ return fmt.Errorf("invalid user credentials \"%s\", expecting \"user:password\"", h.UserCredentials)
+ }
+ headers.Set("Authorization", generateBase64UserCredentials(h.UserCredentials))
+ return nil
+}
+
+// AllHeaders returns the current set of headers including virtual/special Host header.
+func (h *HTTPOptions) AllHeaders() http.Header {
+ headers := h.GenerateHeaders()
+ if h.hostOverride != "" {
+ headers.Add("Host", h.hostOverride)
+ }
+ return headers
+}
+
+// Method returns the method of the http req.
+func (h *HTTPOptions) Method() string {
+ if len(h.Payload) > 0 || h.ContentType != "" {
+ return fnet.POST
+ }
+ return fnet.GET
+}
+
+// AddAndValidateExtraHeader collects extra headers (see commonflags.go for example).
+func (h *HTTPOptions) AddAndValidateExtraHeader(hdr string) error {
+ // This function can be called from the flag settings, before we have a URL
+ // so we can't just call h.Init(h.URL)
+ if h.extraHeaders == nil {
+ h.InitHeaders()
+ }
+ s := strings.SplitN(hdr, ":", 2)
+ if len(s) != 2 {
+ return fmt.Errorf("invalid extra header '%s', expecting Key: Value", hdr)
+ }
+ key := strings.TrimSpace(s[0])
+ value := strings.TrimSpace(s[1])
+ if strings.EqualFold(key, "host") {
+ log.LogVf("Will be setting special Host header to %s", value)
+ h.hostOverride = value
+ } else {
+ log.LogVf("Setting regular extra header %s: %s", key, value)
+ h.extraHeaders.Add(key, value)
+ log.Debugf("headers now %+v", h.extraHeaders)
+ }
+ return nil
+}
+
+// newHttpRequest makes a new http GET request for url with User-Agent.
+func newHTTPRequest(o *HTTPOptions) *http.Request {
+ method := o.Method()
+ var body io.Reader
+ if method == fnet.POST {
+ body = bytes.NewReader(o.Payload)
+ }
+ req, err := http.NewRequest(method, o.URL, body)
+ if err != nil {
+ log.Errf("Unable to make %s request for %s : %v", method, o.URL, err)
+ return nil
+ }
+ req.Header = o.GenerateHeaders()
+ if o.hostOverride != "" {
+ req.Host = o.hostOverride
+ }
+ if !log.LogDebug() {
+ return req
+ }
+ bytes, err := httputil.DumpRequestOut(req, false)
+ if err != nil {
+ log.Errf("Unable to dump request: %v", err)
+ } else {
+ log.Debugf("For URL %s, sending:\n%s", o.URL, bytes)
+ }
+ return req
+}
+
+// Client object for making repeated requests of the same URL using the same
+// http client (net/http)
+type Client struct {
+ url string
+ req *http.Request
+ client *http.Client
+ transport *http.Transport
+}
+
+// Close cleans up any resources used by NewStdClient
+func (c *Client) Close() int {
+ log.Debugf("Close() on %+v", c)
+ if c.req != nil {
+ if c.req.Body != nil {
+ if err := c.req.Body.Close(); err != nil {
+ log.Warnf("Error closing std client body: %v", err)
+ }
+ }
+ c.req = nil
+ }
+ if c.transport != nil {
+ c.transport.CloseIdleConnections()
+ }
+ return 0 // TODO: find a way to track std client socket usage.
+}
+
+// ChangeURL only for standard client, allows fetching a different URL
+func (c *Client) ChangeURL(urlStr string) (err error) {
+ c.url = urlStr
+ c.req.URL, err = url.Parse(urlStr)
+ return err
+}
+
+// Fetch fetches the byte and code for pre created client
+func (c *Client) Fetch() (int, []byte, int) {
+ // req can't be null (client itself would be null in that case)
+ resp, err := c.client.Do(c.req)
+ if err != nil {
+ log.Errf("Unable to send %s request for %s : %v", c.req.Method, c.url, err)
+ return http.StatusBadRequest, []byte(err.Error()), 0
+ }
+ var data []byte
+ if log.LogDebug() {
+ if data, err = httputil.DumpResponse(resp, false); err != nil {
+ log.Errf("Unable to dump response %v", err)
+ } else {
+ log.Debugf("For URL %s, received:\n%s", c.url, data)
+ }
+ }
+ data, err = ioutil.ReadAll(resp.Body)
+ resp.Body.Close() //nolint(errcheck)
+ if err != nil {
+ log.Errf("Unable to read response for %s : %v", c.url, err)
+ code := resp.StatusCode
+ if code == http.StatusOK {
+ code = http.StatusNoContent
+ log.Warnf("Ok code despite read error, switching code to %d", code)
+ }
+ return code, data, 0
+ }
+ code := resp.StatusCode
+ log.Debugf("Got %d : %s for %s %s - response is %d bytes", code, resp.Status, c.req.Method, c.url, len(data))
+ return code, data, 0
+}
+
+// NewClient creates either a standard or fast client (depending on
+// the DisableFastClient flag)
+func NewClient(o *HTTPOptions) Fetcher {
+ o.Init(o.URL) // For completely new options
+ // For changes to options after init
+ o.URLSchemeCheck()
+ if o.DisableFastClient {
+ return NewStdClient(o)
+ }
+ return NewFastClient(o)
+}
+
+// NewStdClient creates a client object that wraps the net/http standard client.
+func NewStdClient(o *HTTPOptions) *Client {
+ o.Init(o.URL) // also normalizes NumConnections etc to be valid.
+ req := newHTTPRequest(o)
+ if req == nil {
+ return nil
+ }
+ tr := http.Transport{
+ MaxIdleConns: o.NumConnections,
+ MaxIdleConnsPerHost: o.NumConnections,
+ DisableCompression: !o.Compression,
+ DisableKeepAlives: o.DisableKeepAlive,
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: o.HTTPReqTimeOut,
+ }).Dial,
+ TLSHandshakeTimeout: o.HTTPReqTimeOut,
+ }
+ if o.Insecure && o.https {
+ log.LogVf("using insecure https")
+ tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} // nolint: gas
+ }
+ client := Client{
+ url: o.URL,
+ req: req,
+ client: &http.Client{
+ Timeout: o.HTTPReqTimeOut,
+ Transport: &tr,
+ },
+ transport: &tr,
+ }
+ if !o.FollowRedirects {
+ // Lets us see the raw response instead of auto following redirects.
+ client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ }
+ }
+ return &client
+}
+
+// FetchURL fetches the data at the given url using the standard client and default options.
+// Returns the http status code (http.StatusOK == 200 for success) and the data.
+// To be used only for single fetches or when performance doesn't matter as the client is closed at the end.
+func FetchURL(url string) (int, []byte) {
+ o := NewHTTPOptions(url)
+ // Maximize chances of getting the data back, vs the raw payload like the fast client
+ o.DisableFastClient = true
+ o.FollowRedirects = true
+ return Fetch(o)
+}
+
+// Fetch creates a client an performs a fetch according to the http options passed in.
+// To be used only for single fetches or when performance doesn't matter as the client is closed at the end.
+func Fetch(httpOptions *HTTPOptions) (int, []byte) {
+ cli := NewClient(httpOptions)
+ code, data, _ := cli.Fetch()
+ cli.Close()
+ return code, data
+}
+
+// FastClient is a fast, lockfree single purpose http 1.0/1.1 client.
+type FastClient struct {
+ buffer []byte
+ req []byte
+ dest net.Addr
+ socket net.Conn
+ socketCount int
+ size int
+ code int
+ errorCount int
+ headerLen int
+ url string
+ host string
+ hostname string
+ port string
+ http10 bool // http 1.0, simplest: no Host, forced no keepAlive, no parsing
+ keepAlive bool
+ parseHeaders bool // don't bother in http/1.0
+ halfClose bool // allow/do half close when keepAlive is false
+ reqTimeout time.Duration
+}
+
+// Close cleans up any resources used by FastClient
+func (c *FastClient) Close() int {
+ log.Debugf("Closing %p %s socket count %d", c, c.url, c.socketCount)
+ if c.socket != nil {
+ if err := c.socket.Close(); err != nil {
+ log.Warnf("Error closing fast client's socket: %v", err)
+ }
+ c.socket = nil
+ }
+ return c.socketCount
+}
+
+// NewFastClient makes a basic, efficient http 1.0/1.1 client.
+// This function itself doesn't need to be super efficient as it is created at
+// the beginning and then reused many times.
+func NewFastClient(o *HTTPOptions) Fetcher {
+ method := o.Method()
+ payloadLen := len(o.Payload)
+ o.Init(o.URL)
+ proto := "1.1"
+ if o.HTTP10 {
+ proto = "1.0"
+ }
+ // Parse the url, extract components.
+ url, err := url.Parse(o.URL)
+ if err != nil {
+ log.Errf("Bad url '%s' : %v", o.URL, err)
+ return nil
+ }
+ if url.Scheme != "http" {
+ log.Errf("Only http is supported with the optimized client, use -stdclient for url %s", o.URL)
+ return nil
+ }
+ // note: Host includes the port
+ bc := FastClient{url: o.URL, host: url.Host, hostname: url.Hostname(), port: url.Port(),
+ http10: o.HTTP10, halfClose: o.AllowHalfClose}
+ bc.buffer = make([]byte, BufferSizeKb*1024)
+ if bc.port == "" {
+ bc.port = url.Scheme // ie http which turns into 80 later
+ log.LogVf("No port specified, using %s", bc.port)
+ }
+ var addr net.Addr
+ if o.UnixDomainSocket != "" {
+ log.Infof("Using unix domain socket %v instead of %v %v", o.UnixDomainSocket, bc.hostname, bc.port)
+ uds := &net.UnixAddr{Name: o.UnixDomainSocket, Net: fnet.UnixDomainSocket}
+ addr = uds
+ } else {
+ addr = fnet.Resolve(bc.hostname, bc.port)
+ }
+ if addr == nil {
+ // Error already logged
+ return nil
+ }
+ bc.dest = addr
+ // Create the bytes for the request:
+ host := bc.host
+ if o.hostOverride != "" {
+ host = o.hostOverride
+ }
+ var buf bytes.Buffer
+ buf.WriteString(method + " " + url.RequestURI() + " HTTP/" + proto + "\r\n")
+ if !bc.http10 {
+ buf.WriteString("Host: " + host + "\r\n")
+ bc.parseHeaders = true
+ if !o.DisableKeepAlive {
+ bc.keepAlive = true
+ } else {
+ buf.WriteString("Connection: close\r\n")
+ }
+ }
+ bc.reqTimeout = o.HTTPReqTimeOut
+ w := bufio.NewWriter(&buf)
+ // This writes multiple valued headers properly (unlike calling Get() to do it ourselves)
+ o.GenerateHeaders().Write(w) // nolint: errcheck,gas
+ w.Flush() // nolint: errcheck,gas
+ buf.WriteString("\r\n")
+ //Add the payload to http body
+ if payloadLen > 0 {
+ buf.Write(o.Payload)
+ }
+ bc.req = buf.Bytes()
+ log.Debugf("Created client:\n%+v\n%s", bc.dest, bc.req)
+ return &bc
+}
+
+// return the result from the state.
+func (c *FastClient) returnRes() (int, []byte, int) {
+ return c.code, c.buffer[:c.size], c.headerLen
+}
+
+// connect to destination.
+func (c *FastClient) connect() net.Conn {
+ c.socketCount++
+ socket, err := net.Dial(c.dest.Network(), c.dest.String())
+ if err != nil {
+ log.Errf("Unable to connect to %v : %v", c.dest, err)
+ return nil
+ }
+ tcpSock, ok := socket.(*net.TCPConn)
+ if !ok {
+ log.LogVf("Not setting socket options on non tcp socket %v", socket.RemoteAddr())
+ return socket
+ }
+ // For now those errors are not critical/breaking
+ if err = tcpSock.SetNoDelay(true); err != nil {
+ log.Warnf("Unable to connect to set tcp no delay %v %v : %v", socket, c.dest, err)
+ }
+ if err = tcpSock.SetWriteBuffer(len(c.req)); err != nil {
+ log.Warnf("Unable to connect to set write buffer %d %v %v : %v", len(c.req), socket, c.dest, err)
+ }
+ if err = tcpSock.SetReadBuffer(len(c.buffer)); err != nil {
+ log.Warnf("Unable to connect to read buffer %d %v %v : %v", len(c.buffer), socket, c.dest, err)
+ }
+ return socket
+}
+
+// Extra error codes outside of the HTTP Status code ranges. ie negative.
+const (
+ // SocketError is return when a transport error occurred: unexpected EOF, connection error, etc...
+ SocketError = -1
+ // RetryOnce is used internally as an error code to allow 1 retry for bad socket reuse.
+ RetryOnce = -2
+)
+
+// Fetch fetches the url content. Returns http code, data, offset of body.
+func (c *FastClient) Fetch() (int, []byte, int) {
+ c.code = SocketError
+ c.size = 0
+ c.headerLen = 0
+ // Connect or reuse existing socket:
+ conn := c.socket
+ reuse := (conn != nil)
+ if !reuse {
+ conn = c.connect()
+ if conn == nil {
+ return c.returnRes()
+ }
+ } else {
+ log.Debugf("Reusing socket %v", conn)
+ }
+ c.socket = nil // because of error returns and single retry
+ conErr := conn.SetReadDeadline(time.Now().Add(c.reqTimeout))
+ // Send the request:
+ n, err := conn.Write(c.req)
+ if err != nil || conErr != nil {
+ if reuse {
+ // it's ok for the (idle) socket to die once, auto reconnect:
+ log.Infof("Closing dead socket %v (%v)", conn, err)
+ conn.Close() // nolint: errcheck,gas
+ c.errorCount++
+ return c.Fetch() // recurse once
+ }
+ log.Errf("Unable to write to %v %v : %v", conn, c.dest, err)
+ return c.returnRes()
+ }
+ if n != len(c.req) {
+ log.Errf("Short write to %v %v : %d instead of %d", conn, c.dest, n, len(c.req))
+ return c.returnRes()
+ }
+ if !c.keepAlive && c.halfClose {
+ tcpConn, ok := conn.(*net.TCPConn)
+ if ok {
+ if err = tcpConn.CloseWrite(); err != nil {
+ log.Errf("Unable to close write to %v %v : %v", conn, c.dest, err)
+ return c.returnRes()
+ } // else:
+ log.Debugf("Half closed ok after sending request %v %v", conn, c.dest)
+ } else {
+ log.Warnf("Unable to close write non tcp connection %v", conn)
+ }
+ }
+ // Read the response:
+ c.readResponse(conn, reuse)
+ if c.code == RetryOnce {
+ // Special "eof on reused socket" code
+ return c.Fetch() // recurse once
+ }
+ // Return the result:
+ return c.returnRes()
+}
+
+// Response reading:
+// TODO: refactor - unwiedly/ugly atm
+func (c *FastClient) readResponse(conn net.Conn, reusedSocket bool) {
+ max := len(c.buffer)
+ parsedHeaders := false
+ // TODO: safer to start with -1 / SocketError and fix ok for http 1.0
+ c.code = http.StatusOK // In http 1.0 mode we don't bother parsing anything
+ endofHeadersStart := retcodeOffset + 3
+ keepAlive := c.keepAlive
+ chunkedMode := false
+ checkConnectionClosedHeader := CheckConnectionClosedHeader
+ skipRead := false
+ for {
+ // Ugly way to cover the case where we get more than 1 chunk at the end
+ // TODO: need automated tests
+ if !skipRead {
+ n, err := conn.Read(c.buffer[c.size:])
+ if err != nil {
+ if reusedSocket && c.size == 0 {
+ // Ok for reused socket to be dead once (close by server)
+ log.Infof("Closing dead socket %v (err %v at first read)", conn, err)
+ c.errorCount++
+ err = conn.Close() // close the previous one
+ if err != nil {
+ log.Warnf("Error closing dead socket %v: %v", conn, err)
+ }
+ c.code = RetryOnce // special "retry once" code
+ return
+ }
+ if err == io.EOF && c.size != 0 {
+ // handled below as possibly normal end of stream after we read something
+ break
+ }
+ log.Errf("Read error %v %v %d : %v", conn, c.dest, c.size, err)
+ c.code = SocketError
+ break
+ }
+ c.size += n
+ if log.LogDebug() {
+ log.Debugf("Read ok %d total %d so far (-%d headers = %d data) %s",
+ n, c.size, c.headerLen, c.size-c.headerLen, DebugSummary(c.buffer[c.size-n:c.size], 256))
+ }
+ }
+ skipRead = false
+ // Have not yet parsed the headers, need to parse the headers, and have enough data to
+ // at least parse the http retcode:
+ if !parsedHeaders && c.parseHeaders && c.size >= retcodeOffset+3 {
+ // even if the bytes are garbage we'll get a non 200 code (bytes are unsigned)
+ c.code = ParseDecimal(c.buffer[retcodeOffset : retcodeOffset+3]) //TODO do that only once...
+ // TODO handle 100 Continue
+ if c.code != http.StatusOK {
+ log.Warnf("Parsed non ok code %d (%v)", c.code, string(c.buffer[:retcodeOffset+3]))
+ break
+ }
+ if log.LogDebug() {
+ log.Debugf("Code %d, looking for end of headers at %d / %d, last CRLF %d",
+ c.code, endofHeadersStart, c.size, c.headerLen)
+ }
+ // TODO: keep track of list of newlines to efficiently search headers only there
+ idx := endofHeadersStart
+ for idx < c.size-1 {
+ if c.buffer[idx] == '\r' && c.buffer[idx+1] == '\n' {
+ if c.headerLen == idx-2 { // found end of headers
+ parsedHeaders = true
+ break
+ }
+ c.headerLen = idx
+ idx++
+ }
+ idx++
+ }
+ endofHeadersStart = c.size // start there next read
+ if parsedHeaders {
+ // We have headers !
+ c.headerLen += 4 // we use this and not endofHeadersStart so http/1.0 does return 0 and not the optimization for search start
+ if log.LogDebug() {
+ log.Debugf("headers are %d: %s", c.headerLen, c.buffer[:idx])
+ }
+ // Find the content length or chunked mode
+ if keepAlive {
+ var contentLength int
+ found, offset := FoldFind(c.buffer[:c.headerLen], contentLengthHeader)
+ if found {
+ // Content-Length mode:
+ contentLength = ParseDecimal(c.buffer[offset+len(contentLengthHeader) : c.headerLen])
+ if contentLength < 0 {
+ log.Warnf("Warning: content-length unparsable %s", string(c.buffer[offset+2:offset+len(contentLengthHeader)+4]))
+ keepAlive = false
+ break
+ }
+ max = c.headerLen + contentLength
+ if log.LogDebug() { // somehow without the if we spend 400ms/10s in LogV (!)
+ log.Debugf("found content length %d", contentLength)
+ }
+ } else {
+ // Chunked mode (or err/missing):
+ if found, _ := FoldFind(c.buffer[:c.headerLen], chunkedHeader); found {
+ chunkedMode = true
+ var dataStart int
+ dataStart, contentLength = ParseChunkSize(c.buffer[c.headerLen:c.size])
+ if contentLength == -1 {
+ // chunk length not available yet
+ log.LogVf("chunk mode but no first chunk length yet, reading more")
+ max = c.headerLen
+ continue
+ }
+ max = c.headerLen + dataStart + contentLength + 2 // extra CR LF
+ log.Debugf("chunk-length is %d (%s) setting max to %d",
+ contentLength, c.buffer[c.headerLen:c.headerLen+dataStart-2],
+ max)
+ } else {
+ if log.LogVerbose() {
+ log.LogVf("Warning: content-length missing in %s", string(c.buffer[:c.headerLen]))
+ } else {
+ log.Warnf("Warning: content-length missing (%d bytes headers)", c.headerLen)
+ }
+ keepAlive = false // can't keep keepAlive
+ break
+ }
+ } // end of content-length section
+ if max > len(c.buffer) {
+ log.Warnf("Buffer is too small for headers %d + data %d - change -httpbufferkb flag to at least %d",
+ c.headerLen, contentLength, (c.headerLen+contentLength)/1024+1)
+ // TODO: just consume the extra instead
+ max = len(c.buffer)
+ }
+ if checkConnectionClosedHeader {
+ if found, _ := FoldFind(c.buffer[:c.headerLen], connectionCloseHeader); found {
+ log.Infof("Server wants to close connection, no keep-alive!")
+ keepAlive = false
+ max = len(c.buffer) // reset to read as much as available
+ }
+ }
+ }
+ }
+ } // end of big if parse header
+ if c.size >= max {
+ if !keepAlive {
+ log.Errf("More data is available but stopping after %d, increase -httpbufferkb", max)
+ }
+ if !parsedHeaders && c.parseHeaders {
+ log.Errf("Buffer too small (%d) to even finish reading headers, increase -httpbufferkb to get all the data", max)
+ keepAlive = false
+ }
+ if chunkedMode {
+ // Next chunk:
+ dataStart, nextChunkLen := ParseChunkSize(c.buffer[max:c.size])
+ if nextChunkLen == -1 {
+ if c.size == max {
+ log.Debugf("Couldn't find next chunk size, reading more %d %d", max, c.size)
+ } else {
+ log.Infof("Partial chunk size (%s), reading more %d %d", DebugSummary(c.buffer[max:c.size], 20), max, c.size)
+ }
+ continue
+ } else if nextChunkLen == 0 {
+ log.Debugf("Found last chunk %d %d", max+dataStart, c.size)
+ if c.size != max+dataStart+2 || string(c.buffer[c.size-2:c.size]) != "\r\n" {
+ log.Errf("Unexpected mismatch at the end sz=%d expected %d; end of buffer %q", c.size, max+dataStart+2, c.buffer[max:c.size])
+ }
+ } else {
+ max += dataStart + nextChunkLen + 2 // extra CR LF
+ log.Debugf("One more chunk %d -> new max %d", nextChunkLen, max)
+ if max > len(c.buffer) {
+ log.Errf("Buffer too small for %d data", max)
+ } else {
+ if max <= c.size {
+ log.Debugf("Enough data to reach next chunk, skipping a read")
+ skipRead = true
+ }
+ continue
+ }
+ }
+ }
+ break // we're done!
+ }
+ } // end of big for loop
+ // Figure out whether to keep or close the socket:
+ if keepAlive && c.code == http.StatusOK {
+ c.socket = conn // keep the open socket
+ } else {
+ if err := conn.Close(); err != nil {
+ log.Errf("Close error %v %v %d : %v", conn, c.dest, c.size, err)
+ } else {
+ log.Debugf("Closed ok %v from %v after reading %d bytes", conn, c.dest, c.size)
+ }
+ // we cleared c.socket in caller already
+ }
+}
diff --git a/vendor/fortio.org/fortio/fhttp/http_loglevel_test.go b/vendor/fortio.org/fortio/fhttp/http_loglevel_test.go
new file mode 100644
index 0000000000..51d73f8f00
--- /dev/null
+++ b/vendor/fortio.org/fortio/fhttp/http_loglevel_test.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !race
+
+package fhttp
+
+import (
+ "testing"
+
+ "fortio.org/fortio/log"
+)
+
+// Rerun some test with various log level for coverage of the debug statements
+// TODO: golden copy type check of output ?
+
+func TestDebugMode(t *testing.T) {
+ log.SetLogLevel(log.Debug)
+ TestHTTPRunner(t)
+ TestNoFirstChunkSizeInitially(t)
+ TestFetchAndOnBehalfOf(t)
+}
diff --git a/vendor/fortio.org/fortio/fhttp/http_server.go b/vendor/fortio.org/fortio/fhttp/http_server.go
new file mode 100644
index 0000000000..a6caa66f5f
--- /dev/null
+++ b/vendor/fortio.org/fortio/fhttp/http_server.go
@@ -0,0 +1,411 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fhttp // import "fortio.org/fortio/fhttp"
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+ // get /debug/pprof endpoints on a mux through SetupPPROF
+ "net/http/pprof"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/version"
+)
+
+// -- Echo Server --
+
+var (
+ // Start time of the server (used in debug handler for uptime).
+ startTime time.Time
+ // EchoRequests is the number of request received. Only updated in Debug mode.
+ EchoRequests int64
+)
+
+// EchoHandler is an http server handler echoing back the input.
+func EchoHandler(w http.ResponseWriter, r *http.Request) {
+ if log.LogVerbose() {
+ LogRequest(r, "Echo") // will also print headers
+ }
+ data, err := ioutil.ReadAll(r.Body) // must be done before calling FormValue
+ if err != nil {
+ log.Errf("Error reading %v", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ log.Debugf("Read %d", len(data))
+ dur := generateDelay(r.FormValue("delay"))
+ if dur > 0 {
+ log.LogVf("Sleeping for %v", dur)
+ time.Sleep(dur)
+ }
+ statusStr := r.FormValue("status")
+ var status int
+ if statusStr != "" {
+ status = generateStatus(statusStr)
+ } else {
+ status = http.StatusOK
+ }
+ if log.LogDebug() {
+ // TODO: this easily lead to contention - use 'thread local'
+ rqNum := atomic.AddInt64(&EchoRequests, 1)
+ log.Debugf("Request # %v", rqNum)
+ }
+ if r.FormValue("close") != "" {
+ log.Debugf("Adding Connection:close / will close socket")
+ w.Header().Set("Connection", "close")
+ }
+ // process header(s) args, must be before size to compose properly
+ for _, hdr := range r.Form["header"] {
+ log.LogVf("Adding requested header %s", hdr)
+ if len(hdr) == 0 {
+ continue
+ }
+ s := strings.SplitN(hdr, ":", 2)
+ if len(s) != 2 {
+ log.Errf("invalid extra header '%s', expecting Key: Value", hdr)
+ continue
+ }
+ w.Header().Add(s[0], s[1])
+ }
+ size := generateSize(r.FormValue("size"))
+ if size >= 0 {
+ log.LogVf("Writing %d size with %d status", size, status)
+ writePayload(w, status, size)
+ return
+ }
+ // echo back the Content-Type and Content-Length in the response
+ for _, k := range []string{"Content-Type", "Content-Length"} {
+ if v := r.Header.Get(k); v != "" {
+ w.Header().Set(k, v)
+ }
+ }
+ w.WriteHeader(status)
+ if _, err = w.Write(data); err != nil {
+ log.Errf("Error writing response %v to %v", err, r.RemoteAddr)
+ }
+}
+
+func writePayload(w http.ResponseWriter, status int, size int) {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(size))
+ w.WriteHeader(status)
+ n, err := w.Write(fnet.Payload[:size])
+ if err != nil || n != size {
+ log.Errf("Error writing payload of size %d: %d %v", size, n, err)
+ }
+}
+
+func closingServer(listener net.Listener) error {
+ var err error
+ for {
+ var c net.Conn
+ c, err = listener.Accept()
+ if err != nil {
+ log.Errf("Accept error in dummy server %v", err)
+ break
+ }
+ log.LogVf("Got connection from %v, closing", c.RemoteAddr())
+ err = c.Close()
+ if err != nil {
+ log.Errf("Close error in dummy server %v", err)
+ break
+ }
+ }
+ return err
+}
+
+// HTTPServer creates an http server named name on address/port port.
+// Port can include binding address and/or be port 0.
+func HTTPServer(name string, port string) (*http.ServeMux, net.Addr) {
+ m := http.NewServeMux()
+ s := &http.Server{
+ Handler: m,
+ }
+ listener, addr := fnet.Listen(name, port)
+ if listener == nil {
+ return nil, nil // error already logged
+ }
+ go func() {
+ err := s.Serve(listener)
+ if err != nil {
+ log.Fatalf("Unable to serve %s on %s: %v", name, addr.String(), err)
+ }
+ }()
+ return m, addr
+}
+
+// DynamicHTTPServer listens on an available port, sets up an http or a closing
+// server simulating an https server (when closing is true) server on it and
+// returns the listening port and mux to which one can attach handlers to.
+// Note: in a future version of istio, the closing will be actually be secure
+// on/off and create an https server instead of a closing server.
+// As this is a dynamic tcp socket server, the address is TCP.
+func DynamicHTTPServer(closing bool) (*http.ServeMux, *net.TCPAddr) {
+ if !closing {
+ mux, addr := HTTPServer("dynamic", "0")
+ return mux, addr.(*net.TCPAddr)
+ }
+ // Note: we actually use the fact it's not supported as an error server for tests - need to change that
+ log.Errf("Secure setup not yet supported. Will just close incoming connections for now")
+ listener, addr := fnet.Listen("closing server", "0")
+ //err = http.ServeTLS(listener, nil, "", "") // go 1.9
+ go func() {
+ err := closingServer(listener)
+ if err != nil {
+ log.Fatalf("Unable to serve closing server on %s: %v", addr.String(), err)
+ }
+ }()
+ return nil, addr.(*net.TCPAddr)
+}
+
+/*
+// DebugHandlerTemplate returns debug/useful info on the http requet.
+// slower heavier but nicer source code version of DebugHandler
+func DebugHandlerTemplate(w http.ResponseWriter, r *http.Request) {
+ log.LogVf("%v %v %v %v", r.Method, r.URL, r.Proto, r.RemoteAddr)
+ hostname, _ := os.Hostname()
+ data, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ log.Errf("Error reading %v", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ // Note: this looks nicer but is about 2x slower / less qps / more cpu and 25% bigger executable than doing the writes oneself:
+ const templ = `Φορτίο version {{.Version}} echo debug server on {{.Hostname}} - request from {{.R.RemoteAddr}}
+
+{{.R.Method}} {{.R.URL}} {{.R.Proto}}
+
+headers:
+
+{{ range $name, $vals := .R.Header }}{{range $val := $vals}}{{$name}}: {{ $val }}
+{{end}}{{end}}
+body:
+
+{{.Body}}
+{{if .DumpEnv}}
+environment:
+{{ range $idx, $e := .Env }}
+{{$e}}{{end}}
+{{end}}`
+ t := template.Must(template.New("debugOutput").Parse(templ))
+ err = t.Execute(w, &struct {
+ R *http.Request
+ Hostname string
+ Version string
+ Body string
+ DumpEnv bool
+ Env []string
+ }{r, hostname, Version, DebugSummary(data, 512), r.FormValue("env") == "dump", os.Environ()})
+ if err != nil {
+ Critf("Template execution failed: %v", err)
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
+}
+*/
+
+// DebugHandler returns debug/useful info to http client.
+func DebugHandler(w http.ResponseWriter, r *http.Request) {
+ if log.LogVerbose() {
+ LogRequest(r, "Debug")
+ }
+ var buf bytes.Buffer
+ buf.WriteString("Φορτίο version ")
+ buf.WriteString(version.Long())
+ buf.WriteString(" echo debug server up for ")
+ buf.WriteString(fmt.Sprint(RoundDuration(time.Since(startTime))))
+ buf.WriteString(" on ")
+ hostname, _ := os.Hostname() // nolint: gas
+ buf.WriteString(hostname)
+ buf.WriteString(" - request from ")
+ buf.WriteString(r.RemoteAddr)
+ buf.WriteString("\n\n")
+ buf.WriteString(r.Method)
+ buf.WriteByte(' ')
+ buf.WriteString(r.URL.String())
+ buf.WriteByte(' ')
+ buf.WriteString(r.Proto)
+ buf.WriteString("\n\nheaders:\n\n")
+ // Host is removed from headers map and put here (!)
+ buf.WriteString("Host: ")
+ buf.WriteString(r.Host)
+
+ var keys []string
+ for k := range r.Header {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, name := range keys {
+ buf.WriteByte('\n')
+ buf.WriteString(name)
+ buf.WriteString(": ")
+ first := true
+ headers := r.Header[name]
+ for _, h := range headers {
+ if !first {
+ buf.WriteByte(',')
+ }
+ buf.WriteString(h)
+ first = false
+ }
+ }
+ data, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ log.Errf("Error reading %v", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ buf.WriteString("\n\nbody:\n\n")
+ buf.WriteString(DebugSummary(data, 512))
+ buf.WriteByte('\n')
+ if r.FormValue("env") == "dump" {
+ buf.WriteString("\nenvironment:\n\n")
+ for _, v := range os.Environ() {
+ buf.WriteString(v)
+ buf.WriteByte('\n')
+ }
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
+ if _, err = w.Write(buf.Bytes()); err != nil {
+ log.Errf("Error writing response %v to %v", err, r.RemoteAddr)
+ }
+}
+
+// CacheOn sets the header for indefinite caching.
+func CacheOn(w http.ResponseWriter) {
+ w.Header().Set("Cache-Control", "max-age=365000000, immutable")
+}
+
+// Serve starts a debug / echo http server on the given port.
+// Returns the mux and addr where the listening socket is bound.
+// The .Port can be retrieved from it when requesting the 0 port as
+// input for dynamic http server.
+func Serve(port, debugPath string) (*http.ServeMux, net.Addr) {
+ startTime = time.Now()
+ mux, addr := HTTPServer("echo", port)
+ if addr == nil {
+ return nil, nil // error already logged
+ }
+ if debugPath != "" {
+ mux.HandleFunc(debugPath, DebugHandler)
+ }
+ mux.HandleFunc("/", EchoHandler)
+ return mux, addr
+}
+
+// ServeTCP is Serve() but restricted to TCP (return address is assumed
+// to be TCP - will panic for unix domain)
+func ServeTCP(port, debugPath string) (*http.ServeMux, *net.TCPAddr) {
+ mux, addr := Serve(port, debugPath)
+ if addr == nil {
+ return nil, nil // error already logged
+ }
+ return mux, addr.(*net.TCPAddr)
+}
+
+// -- formerly in ui handler
+
+// SetupPPROF add pprof to the mux (mirror the init() of http pprof).
+func SetupPPROF(mux *http.ServeMux) {
+ mux.HandleFunc("/debug/pprof/", LogAndCall("pprof:index", pprof.Index))
+ mux.HandleFunc("/debug/pprof/cmdline", LogAndCall("pprof:cmdline", pprof.Cmdline))
+ mux.HandleFunc("/debug/pprof/profile", LogAndCall("pprof:profile", pprof.Profile))
+ mux.HandleFunc("/debug/pprof/symbol", LogAndCall("pprof:symbol", pprof.Symbol))
+ mux.HandleFunc("/debug/pprof/trace", LogAndCall("pprof:trace", pprof.Trace))
+}
+
+// -- Fetch er (simple http proxy) --
+
+// FetcherHandler is the handler for the fetcher/proxy.
+func FetcherHandler(w http.ResponseWriter, r *http.Request) {
+ LogRequest(r, "Fetch (prefix stripped)")
+ hj, ok := w.(http.Hijacker)
+ if !ok {
+ log.Critf("hijacking not supported")
+ return
+ }
+ conn, _, err := hj.Hijack()
+ if err != nil {
+ log.Errf("hijacking error %v", err)
+ return
+ }
+ // Don't forget to close the connection:
+ defer conn.Close() // nolint: errcheck
+ // Stripped prefix gets replaced by ./ - sometimes...
+ url := strings.TrimPrefix(r.URL.String(), "./")
+ opts := NewHTTPOptions("http://" + url)
+ opts.HTTPReqTimeOut = 5 * time.Minute
+ OnBehalfOf(opts, r)
+ client := NewClient(opts)
+ if client == nil {
+ return // error logged already
+ }
+ _, data, _ := client.Fetch()
+ _, err = conn.Write(data)
+ if err != nil {
+ log.Errf("Error writing fetched data to %v: %v", r.RemoteAddr, err)
+ }
+ client.Close()
+}
+
+// -- Redirection to https feature --
+
+// RedirectToHTTPSHandler handler sends a redirect to same URL with https.
+func RedirectToHTTPSHandler(w http.ResponseWriter, r *http.Request) {
+ dest := "https://" + r.Host + r.URL.String()
+ LogRequest(r, "Redirecting to "+dest)
+ http.Redirect(w, r, dest, http.StatusSeeOther)
+}
+
+// RedirectToHTTPS Sets up a redirector to https on the given port.
+// (Do not create a loop, make sure this is addressed from an ingress)
+func RedirectToHTTPS(port string) net.Addr {
+ m, a := HTTPServer("https redirector", port)
+ if m == nil {
+ return nil // error already logged
+ }
+ m.HandleFunc("/", RedirectToHTTPSHandler)
+ return a
+}
+
+// LogRequest logs the incoming request, including headers when loglevel is verbose
+func LogRequest(r *http.Request, msg string) {
+ log.Infof("%s: %v %v %v %v (%s)", msg, r.Method, r.URL, r.Proto, r.RemoteAddr,
+ r.Header.Get("X-Forwarded-Proto"))
+ if log.LogVerbose() {
+ for name, headers := range r.Header {
+ for _, h := range headers {
+ log.LogVf("Header %v: %v\n", name, h)
+ }
+ }
+ }
+}
+
+// LogAndCall wrapps an HTTP handler to log the request first.
+func LogAndCall(msg string, hf http.HandlerFunc) http.HandlerFunc {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ LogRequest(r, msg)
+ hf(w, r)
+ })
+}
diff --git a/vendor/fortio.org/fortio/fhttp/http_test.go b/vendor/fortio.org/fortio/fhttp/http_test.go
new file mode 100644
index 0000000000..d6e2853168
--- /dev/null
+++ b/vendor/fortio.org/fortio/fhttp/http_test.go
@@ -0,0 +1,1184 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fhttp
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+)
+
+func init() {
+ log.SetLogLevel(log.Debug)
+}
+
+func TestGetHeaders(t *testing.T) {
+ o := &HTTPOptions{}
+ o.AddAndValidateExtraHeader("FOo:baR")
+ oo := *o // check that copying works
+ h := oo.AllHeaders()
+ if len(h) != 2 { // 1 above + user-agent
+ t.Errorf("Header count mismatch, got %d instead of 3", len(h))
+ }
+ if h.Get("Foo") != "baR" {
+ t.Errorf("Foo header mismatch, got '%v'", h.Get("Foo"))
+ }
+ if h.Get("Host") != "" {
+ t.Errorf("Host header should be nil initially, got '%v'", h.Get("Host"))
+ }
+ o.AddAndValidateExtraHeader("hoSt: aBc:123")
+ h = o.AllHeaders()
+ if h.Get("Host") != "aBc:123" {
+ t.Errorf("Host header mismatch, got '%v'", h.Get("Host"))
+ }
+ if len(h) != 3 { // 2 above + user-agent
+ t.Errorf("Header count mismatch, got %d instead of 3", len(h))
+ }
+ err := o.AddAndValidateExtraHeader("foo") // missing : value
+ if err == nil {
+ t.Errorf("Expected error for header without value, did not get one")
+ }
+ o.ResetHeaders()
+ h = o.AllHeaders()
+ if h.Get("Host") != "" {
+ t.Errorf("After reset Host header should be nil, got '%v'", h.Get("Host"))
+ }
+ if len(h) != 0 {
+ t.Errorf("Header count mismatch after reset, got %d instead of 1", len(h))
+ }
+}
+
+func TestNewHTTPRequest(t *testing.T) {
+ var tests = []struct {
+ url string // input
+ ok bool // ok/error
+ }{
+ {"http://www.google.com/", true},
+ {"ht tp://www.google.com/", false},
+ }
+ for _, tst := range tests {
+ o := NewHTTPOptions(tst.url)
+ o.AddAndValidateExtraHeader("Host: www.google.com")
+ r := newHTTPRequest(o)
+ if tst.ok != (r != nil) {
+ t.Errorf("Got %v, expecting ok %v for url '%s'", r, tst.ok, tst.url)
+ }
+ }
+}
+
+func TestMultiInitAndEscape(t *testing.T) {
+ // 2 escaped already
+ o := NewHTTPOptions("localhost%3A8080/?delay=10ms:10,0.5s:15%25,0.25s:5")
+ // shouldn't perform any escapes
+ expected := "http://localhost%3A8080/?delay=10ms:10,0.5s:15%25,0.25s:5"
+ if o.URL != expected {
+ t.Errorf("Got initially '%s', expected '%s'", o.URL, expected)
+ }
+ o.AddAndValidateExtraHeader("FoO: BaR")
+ // re init should not erase headers
+ o.Init(o.URL)
+ if o.AllHeaders().Get("Foo") != "BaR" {
+ t.Errorf("Lost header after Init %+v", o.AllHeaders())
+ }
+}
+
+func TestSchemeCheck(t *testing.T) {
+ var tests = []struct {
+ input string
+ output string
+ stdcli bool
+ }{
+ {"https://www.google.com/", "https://www.google.com/", true},
+ {"www.google.com", "http://www.google.com", false},
+ {"hTTps://foo.bar:123/ab/cd", "hTTps://foo.bar:123/ab/cd", true}, // not double http:
+ {"HTTP://foo.bar:124/ab/cd", "HTTP://foo.bar:124/ab/cd", false}, // not double http:
+ {"", "", false}, // and error in the logs
+ {"x", "http://x", false}, //should not crash because url is shorter than prefix
+ {"http:/", "http://http:/", false}, //boundary
+ {"http://", "http://", false}, //boundary
+ {"https://", "https://", true}, //boundary
+ {"https:/", "http://https:/", false}, //boundary
+ }
+ for _, tst := range tests {
+ o := NewHTTPOptions(tst.input)
+ if o.URL != tst.output {
+ t.Errorf("Got %v, expecting %v for url '%s'", o.URL, tst.output, tst.input)
+ }
+ if o.DisableFastClient != tst.stdcli {
+ t.Errorf("Got %v, expecting %v for stdclient for url '%s'", o.DisableFastClient, tst.stdcli, tst.input)
+ }
+ }
+}
+
+func TestFoldFind1(t *testing.T) {
+ var tests = []struct {
+ haystack string // input
+ needle string // input
+ found bool // expected result
+ offset int // where
+ }{
+ {"", "", true, 0},
+ {"", "A", false, -1},
+ {"abc", "", true, 0},
+ {"abc", "ABCD", false, -1},
+ {"abc", "ABC", true, 0},
+ {"aBcd", "ABC", true, 0},
+ {"xaBc", "ABC", true, 1},
+ {"XYZaBcUVW", "Abc", true, 3},
+ {"xaBcd", "ABC", true, 1},
+ {"Xa", "A", true, 1},
+ {"axabaBcd", "ABC", true, 4},
+ {"axabxaBcd", "ABC", true, 5},
+ {"axabxaBd", "ABC", false, -1},
+ {"AAAAB", "AAAB", true, 1},
+ {"xAAAxAAA", "AAAB", false, -1},
+ {"xxxxAc", "AB", false, -1},
+ {"X-: X", "-: ", true, 1},
+ {"\nX", "*X", false, -1}, // \n shouldn't fold into *
+ {"*X", "\nX", false, -1}, // \n shouldn't fold into *
+ {"\rX", "-X", false, -1}, // \r shouldn't fold into -
+ {"-X", "\rX", false, -1}, // \r shouldn't fold into -
+ {"foo\r\nContent-Length: 34\r\n", "CONTENT-LENGTH:", true, 5},
+ }
+ for _, tst := range tests {
+ f, o := FoldFind([]byte(tst.haystack), []byte(tst.needle))
+ if tst.found != f {
+ t.Errorf("Got %v, expecting found %v for FoldFind('%s', '%s')", f, tst.found, tst.haystack, tst.needle)
+ }
+ if tst.offset != o {
+ t.Errorf("Offset %d, expecting %d for FoldFind('%s', '%s')", o, tst.offset, tst.haystack, tst.needle)
+ }
+ }
+}
+
+func TestFoldFind2(t *testing.T) {
+ var haystack [1]byte
+ var needle [1]byte
+ // we don't mind for these to map to eachother in exchange for 30% perf gain
+ okExceptions := "@[\\]^_`{|}~"
+ for i := 0; i < 127; i++ { // skipping 127 too, matches _
+ haystack[0] = byte(i)
+ for j := 0; j < 128; j++ {
+ needle[0] = byte(j)
+ sh := string(haystack[:])
+ sn := string(needle[:])
+ f, o := FoldFind(haystack[:], needle[:])
+ shouldFind := strings.EqualFold(sh, sn)
+ if i == j || shouldFind {
+ if !f || o != 0 {
+ t.Errorf("Not found when should: %d 0x%x '%s' matching %d 0x%x '%s'",
+ i, i, sh, j, j, sn)
+ }
+ continue
+ }
+ if f || o != -1 {
+ if strings.Contains(okExceptions, sh) {
+ continue
+ }
+ t.Errorf("Found when shouldn't: %d 0x%x '%s' matching %d 0x%x '%s'",
+ i, i, sh, j, j, sn)
+ }
+ }
+ }
+}
+
+var utf8Str = "世界aBcdefGHiJklmnopqrstuvwxyZ"
+
+func TestASCIIToUpper(t *testing.T) {
+ log.SetLogLevel(log.Debug)
+ var tests = []struct {
+ input string // input
+ expected string // output
+ }{
+ {"", ""},
+ {"A", "A"},
+ {"aBC", "ABC"},
+ {"AbC", "ABC"},
+ {utf8Str, "\026LABCDEFGHIJKLMNOPQRSTUVWXYZ" /* got mangled but only first 2 */},
+ }
+ for _, tst := range tests {
+ actual := ASCIIToUpper(tst.input)
+ if tst.expected != string(actual) {
+ t.Errorf("Got '%+v', expecting '%+v' for ASCIIFold('%s')", actual, tst.expected, tst.input)
+ }
+ }
+ utf8bytes := []byte(utf8Str)
+ if len(utf8bytes) != 26+6 {
+ t.Errorf("Got %d utf8 bytes, expecting 6+26 for '%s'", len(utf8bytes), utf8Str)
+ }
+ folded := ASCIIToUpper(utf8Str)
+ if len(folded) != 26+2 {
+ t.Errorf("Got %d folded bytes, expecting 2+26 for '%s'", len(folded), utf8Str)
+ }
+}
+
+func TestParseDecimal(t *testing.T) {
+ var tests = []struct {
+ input string // input
+ expected int // output
+ }{
+ {"", -1},
+ {"3", 3},
+ {" 456cxzc", 456},
+ {"-45", -1}, // - is not expected, positive numbers only
+ {"3.2", 3}, // stops at first non digit
+ {" 1 2", 1},
+ {"0", 0},
+ }
+ for _, tst := range tests {
+ actual := ParseDecimal([]byte(tst.input))
+ if tst.expected != actual {
+ t.Errorf("Got %d, expecting %d for ParseDecimal('%s')", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestParseChunkSize(t *testing.T) {
+ var tests = []struct {
+ input string // input
+ expOffset int // expected offset
+ expValue int // expected value
+ }{
+ // Errors :
+ {"", 0, -1},
+ {"0", 1, -1},
+ {"0\r", 2, -1},
+ {"0\n", 2, -1},
+ {"g\r\n", 0, -1},
+ {"0\r0\n", 4, -1},
+ // Ok: (size of input is the expected offset)
+ {"0\r\n", 3, 0},
+ {"0x\r\n", 4, 0},
+ {"f\r\n", 3, 15},
+ {"10\r\n", 4, 16},
+ {"fF\r\n", 4, 255},
+ {"abcdef\r\n", 8, 0xabcdef},
+ {"100; foo bar\r\nanother line\r\n", 14 /* and not the whole thing */, 256},
+ }
+ for _, tst := range tests {
+ actOffset, actVal := ParseChunkSize([]byte(tst.input))
+ if tst.expValue != actVal {
+ t.Errorf("Got %d, expecting %d for value of ParseChunkSize('%+s')", actVal, tst.expValue, tst.input)
+ }
+ if tst.expOffset != actOffset {
+ t.Errorf("Got %d, expecting %d for offset of ParseChunkSize('%+s')", actOffset, tst.expOffset, tst.input)
+ }
+ }
+}
+
+func TestDebugSummary(t *testing.T) {
+ var tests = []struct {
+ input string
+ expected string
+ }{
+ {"12345678", "12345678"},
+ {"123456789", "123456789"},
+ {"1234567890", "1234567890"},
+ {"12345678901", "12345678901"},
+ {"123456789012", "12: 1234...9012"},
+ {"1234567890123", "13: 1234...0123"},
+ {"12345678901234", "14: 1234...1234"},
+ {"A\r\000\001\x80\nB", `A\r\x00\x01\x80\nB`}, // escaping
+ {"A\r\000Xyyyyyyyyy\001\x80\nB", `17: A\r\x00X...\x01\x80\nB`}, // escaping
+ }
+ for _, tst := range tests {
+ if actual := DebugSummary([]byte(tst.input), 8); actual != tst.expected {
+ t.Errorf("Got '%s', expected '%s' for DebugSummary(%q)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestParseStatus(t *testing.T) {
+ var tests = []struct {
+ input string
+ expected int
+ }{
+ // Error cases
+ {"x", 400},
+ {"1::", 400},
+ {"x:10", 400},
+ {"555:-1", 400},
+ {"555:101", 400},
+ {"551:45,551:56", 400},
+ // Good cases
+ {"555", 555},
+ {"555:100", 555},
+ {"555:100%", 555},
+ {"555:0", 200},
+ {"555:0%", 200},
+ {"551:45,551:55", 551},
+ {"551:45%,551:55%", 551},
+ }
+ for _, tst := range tests {
+ if actual := generateStatus(tst.input); actual != tst.expected {
+ t.Errorf("Got %d, expected %d for generateStatus(%q)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestParseDelay(t *testing.T) {
+ var tests = []struct {
+ input string
+ expected time.Duration
+ }{
+ // Error cases
+ {"", -1},
+ {"x", -1},
+ {"1::", -1},
+ {"x:10", -1},
+ {"10ms:-1", -1},
+ {"20ms:101", -1},
+ {"20ms:101%", -1},
+ {"10ms:45,100ms:56", -1},
+ // Max delay case:
+ {"10s:45,10s:55", MaxDelay},
+ // Good cases
+ {"100ms", 100 * time.Millisecond},
+ {"100ms:100", 100 * time.Millisecond},
+ {"100ms:100%", 100 * time.Millisecond},
+ {"100ms:0", 0},
+ {"100ms:0%", 0},
+ {"10ms:45,10ms:55", 10 * time.Millisecond},
+ {"10ms:45%,10ms:55%", 10 * time.Millisecond},
+ }
+ for _, tst := range tests {
+ if actual := generateDelay(tst.input); actual != tst.expected {
+ t.Errorf("Got %d, expected %d for generateStatus(%q)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestGenerateStatusBasic(t *testing.T) {
+ var tests = []struct {
+ input string
+ expected int
+ }{
+ // Error cases
+ {"x", 400},
+ {"1::", 400},
+ {"x:10", 400},
+ {"555:x", 400},
+ {"555:-1", 400},
+ {"555:101", 400},
+ {"551:45,551:56", 400},
+ // Good cases
+ {"555", 555},
+ {"555:100", 555},
+ {"555:0", 200},
+ {"551:45,551:55", 551},
+ }
+ for _, tst := range tests {
+ if actual := generateStatus(tst.input); actual != tst.expected {
+ t.Errorf("Got %d, expected %d for generateStatus(%q)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestGenerateStatusEdgeSum(t *testing.T) {
+ st := "503:99.0,503:1.00001"
+ // Gets 400 without rounding as it exceeds 100, another corner case is if you
+ // add 0.1 1000 times you get 0.99999... so you may get stray 200s without Rounding
+ if actual := generateStatus(st); actual != 503 {
+ t.Errorf("Got %d for generateStatus(%q)", actual, st)
+ }
+ st += ",500:0.0001"
+ if actual := generateStatus(st); actual != 400 {
+ t.Errorf("Got %d for long generateStatus(%q) when expecting 400 for > 100", actual, st)
+ }
+}
+
+// Round down to the nearest thousand
+func roundthousand(x int) int {
+ return int(float64(x)+500.) / 1000
+}
+
+func TestGenerateStatusDistribution(t *testing.T) {
+ log.SetLogLevel(log.Info)
+ str := "501:20,502:30,503:0.5"
+ m := make(map[int]int)
+ for i := 0; i < 10000; i++ {
+ m[generateStatus(str)]++
+ }
+ if len(m) != 4 {
+ t.Errorf("Unexpected result, expecting 4 statuses, got %+v", m)
+ }
+ if m[200]+m[501]+m[502]+m[503] != 10000 {
+ t.Errorf("Unexpected result, expecting 4 statuses summing to 10000 got %+v", m)
+ }
+ if m[503] <= 10 {
+ t.Errorf("Unexpected result, expecting at least 10 count for 0.5%% probability over 10000 got %+v", m)
+ }
+ // Round the data
+ f01 := roundthousand(m[501]) // 20% -> 2
+ f02 := roundthousand(m[502]) // 30% -> 3
+ fok := roundthousand(m[200]) // rest is 50% -> 5
+ f03 := roundthousand(m[503]) // 0.5% -> rounds down to 0 10s of %
+
+ if f01 != 2 || f02 != 3 || fok != 5 || (f03 != 0) {
+ t.Errorf("Unexpected distribution for %+v - wanted 2 3 5, got %d %d %d", m, f01, f02, fok)
+ }
+}
+
+func TestRoundDuration(t *testing.T) {
+ var tests = []struct {
+ input time.Duration
+ expected time.Duration
+ }{
+ {0, 0},
+ {1200 * time.Millisecond, 1200 * time.Millisecond},
+ {1201 * time.Millisecond, 1200 * time.Millisecond},
+ {1249 * time.Millisecond, 1200 * time.Millisecond},
+ {1250 * time.Millisecond, 1300 * time.Millisecond},
+ {1299 * time.Millisecond, 1300 * time.Millisecond},
+ }
+ for _, tst := range tests {
+ if actual := RoundDuration(tst.input); actual != tst.expected {
+ t.Errorf("Got %v, expected %v for RoundDuration(%v)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestGenerateSize(t *testing.T) {
+ var tests = []struct {
+ input string
+ expected int
+ }{
+ // Error cases
+ {"x", -1},
+ {"1::", -1},
+ {"x:10", -1},
+ {"555:x", -1},
+ {"555:-1", -1},
+ {"555:101", -1},
+ {"551:45,551:56", -1},
+ // Good cases
+ {"", -1},
+ {"512", 512},
+ {"512:100", 512},
+ {"512:0", -1},
+ {"512:45,512:55", 512},
+ {"0", 0}, // and not -1
+ {"262144", 262144},
+ {"262145", fnet.MaxPayloadSize}, // MaxSize test
+ {"1000000:10,2000000:90", 262144},
+ }
+ for _, tst := range tests {
+ if actual := generateSize(tst.input); actual != tst.expected {
+ t.Errorf("Got %d, expected %d for generateSize(%q)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestPayloadWithEchoBack(t *testing.T) {
+ var tests = []struct {
+ payload []byte
+ disableFastClient bool
+ }{
+ {[]byte{44, 45, 00, 46, 47}, false},
+ {[]byte{44, 45, 00, 46, 47}, true},
+ {[]byte("groß"), false},
+ {[]byte("groß"), true},
+ }
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/", EchoHandler)
+ url := fmt.Sprintf("http://localhost:%d/", a.Port)
+ for _, test := range tests {
+ opts := NewHTTPOptions(url)
+ opts.DisableFastClient = test.disableFastClient
+ opts.Payload = test.payload
+ cli := NewClient(opts)
+ code, body, header := cli.Fetch()
+ if code != 200 {
+ t.Errorf("Unexpected error %d", code)
+ }
+ if !bytes.Equal(body[header:], test.payload) {
+ t.Errorf("Got %s, expected %q from echo", DebugSummary(body, 512), test.payload)
+ }
+ if !test.disableFastClient {
+ cli.Close()
+ }
+ }
+}
+
+// Many of the earlier http tests are through httprunner but new tests should go here
+
+func TestUnixDomainHttp(t *testing.T) {
+ uds := fnet.GetUniqueUnixDomainPath("fortio-http-test-uds")
+ _, addr := Serve(uds, "/debug1")
+ if addr == nil {
+ t.Fatalf("Error for Serve for %s", uds)
+ }
+ o := HTTPOptions{UnixDomainSocket: uds, URL: "http://foo.bar:123/debug1"}
+ client := NewClient(&o)
+ code, data, _ := client.Fetch()
+ if code != http.StatusOK {
+ t.Errorf("Got error %d fetching uds %s", code, uds)
+ }
+ if !strings.Contains(string(data), "Host: foo.bar:123") {
+ t.Errorf("Didn't find expected Host header in debug handler: %s", DebugSummary(data, 1024))
+ }
+}
+
+func TestEchoBack(t *testing.T) {
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/", EchoHandler)
+ v := url.Values{}
+ v.Add("foo", "bar")
+ url := fmt.Sprintf("http://localhost:%d/?delay=2s", a.Port) // trigger max delay
+ resp, err := http.PostForm(url, v)
+ if err != nil {
+ t.Fatalf("post form err %v", err)
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatalf("readall err %v", err)
+ }
+ expected := "foo=bar"
+ if string(b) != expected {
+ t.Errorf("Got %s while expected %s", DebugSummary(b, 128), expected)
+ }
+}
+
+func TestH10Cli(t *testing.T) {
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/", EchoHandler)
+ url := fmt.Sprintf("http://localhost:%d/", a.Port)
+ opts := NewHTTPOptions(url)
+ opts.HTTP10 = true
+ opts.AddAndValidateExtraHeader("Host: mhostname")
+ cli := NewFastClient(opts)
+ code, _, _ := cli.Fetch()
+ if code != 200 {
+ t.Errorf("http 1.0 unexpected error %d", code)
+ }
+ s := cli.(*FastClient).socket
+ if s != nil {
+ t.Errorf("http 1.0 socket should be nil after fetch (no keepalive) %+v instead", s)
+ }
+ cli.Close()
+}
+
+func TestSmallBufferAndNoKeepAlive(t *testing.T) {
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/", EchoHandler)
+ BufferSizeKb = 16
+ sz := BufferSizeKb * 1024
+ url := fmt.Sprintf("http://localhost:%d/?size=%d", a.Port, sz+1) // trigger buffer problem
+ opts := NewHTTPOptions(url)
+ cli := NewFastClient(opts)
+ _, data, _ := cli.Fetch()
+ recSz := len(data)
+ if recSz > sz {
+ t.Errorf("config1: was expecting truncated read, got %d", recSz)
+ }
+ cli.Close()
+ // Same test without keepalive (exercises a different path)
+ opts.DisableKeepAlive = true
+ cli = NewFastClient(opts)
+ _, data, _ = cli.Fetch()
+ recSz = len(data)
+ if recSz > sz {
+ t.Errorf("config2: was expecting truncated read, got %d", recSz)
+ }
+ cli.Close()
+}
+
+func TestBadUrl(t *testing.T) {
+ opts := NewHTTPOptions("not a valid url")
+ cli := NewFastClient(opts)
+ if cli != nil {
+ t.Errorf("config1: got a client %v despite bogus url %s", cli, opts.URL)
+ cli.Close()
+ }
+ opts.URL = "http://doesnotexist.istio.io"
+ cli = NewFastClient(opts)
+ if cli != nil {
+ t.Errorf("config2: got a client %v despite bogus url %s", cli, opts.URL)
+ cli.Close()
+ }
+}
+
+func TestDefaultPort(t *testing.T) {
+ url := "http://fortio.istio.io/" // shall imply port 80
+ opts := NewHTTPOptions(url)
+ cli := NewFastClient(opts)
+ code, _, _ := cli.Fetch()
+ if code != 303 {
+ t.Errorf("unexpected code for %s: %d (expecting 303 redirect to https)", url, code)
+ }
+ conn := cli.(*FastClient).connect()
+ if conn != nil {
+ p := conn.RemoteAddr().(*net.TCPAddr).Port
+ if p != 80 {
+ t.Errorf("unexpected port for %s: %d", url, p)
+ }
+ conn.Close()
+ } else {
+ t.Errorf("unable to connect to %s", url)
+ }
+ cli.Close()
+ opts.URL = "https://fortio.istio.io" // will be https port 443
+ opts.Insecure = true // not needed as we have valid certs but to exercise that code
+ cli = NewFastClient(opts)
+ if cli != nil {
+ // If https support was added, remove this whitebox/for coverage purpose assertion
+ t.Errorf("fast client isn't supposed to support https (yet), got %v", cli)
+ }
+ cli = NewClient(opts)
+ if cli == nil {
+ t.Fatalf("Couldn't get a client using NewClient on modified opts.")
+ }
+ // currently fast client fails with https:
+ code, _, _ = cli.Fetch()
+ if code != 200 {
+ t.Errorf("Standard client http error code %d", code)
+ }
+ cli.Close()
+}
+
+// Test for bug #127
+
+var testBody = "delayedChunkedSize-body"
+
+func delayedChunkedSize(w http.ResponseWriter, r *http.Request) {
+ log.LogVf("delayedChunkedSize %v %v %v %v", r.Method, r.URL, r.Proto, r.RemoteAddr)
+ w.WriteHeader(http.StatusOK)
+ flusher, _ := w.(http.Flusher)
+ flusher.Flush()
+ time.Sleep(1 * time.Second)
+ w.Write([]byte(testBody))
+}
+
+func TestNoFirstChunkSizeInitially(t *testing.T) {
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/", delayedChunkedSize)
+ url := fmt.Sprintf("http://localhost:%d/delayedChunkedSize", a.Port)
+ o := HTTPOptions{URL: url}
+ client := NewClient(&o)
+ code, data, header := client.Fetch() // used to panic/bug #127
+ t.Logf("delayedChunkedSize result code %d, data len %d, headerlen %d", code, len(data), header)
+ if code != 200 {
+ t.Errorf("Got %d instead of 200", code)
+ }
+ expected := "17\r\n" + testBody + "\r\n0\r\n\r\n" // 17 is hex size of testBody
+ if string(data[header:]) != expected {
+ t.Errorf("Got %s not as expected %q at offset %d", DebugSummary(data, 256), expected, header)
+ }
+}
+
+func TestInvalidRequest(t *testing.T) {
+ o := HTTPOptions{
+ URL: "http://www.google.com/", // valid url
+ NumConnections: -3, // bogus NumConnections will get fixed
+ HTTPReqTimeOut: -1,
+ }
+ client := NewStdClient(&o)
+ if o.NumConnections <= 0 {
+ t.Errorf("Got %d NumConnections, was expecting normalization to 1", o.NumConnections)
+ }
+ client.ChangeURL(" http://bad.url.with.space.com/") // invalid url
+ // should not crash (issue #93), should error out
+ code, _, _ := client.Fetch()
+ if code != http.StatusBadRequest {
+ t.Errorf("Got %d code while expecting bad request (%d)", code, http.StatusBadRequest)
+ }
+ o.URL = client.url
+ c2 := NewStdClient(&o)
+ if c2 != nil {
+ t.Errorf("Got non nil client %+v code while expecting nil for bad request", c2)
+ }
+}
+
+func TestPayloadSizeSmall(t *testing.T) {
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/", EchoHandler)
+ for _, size := range []int{768, 0, 1} {
+ url := fmt.Sprintf("http://localhost:%d/with-size?size=%d", a.Port, size)
+ o := HTTPOptions{URL: url}
+ client := NewClient(&o)
+ code, data, header := client.Fetch() // used to panic/bug #127
+ t.Logf("TestPayloadSize result code %d, data len %d, headerlen %d", code, len(data), header)
+ if code != http.StatusOK {
+ t.Errorf("Got %d instead of 200", code)
+ }
+ if len(data)-header != size {
+ t.Errorf("Got len(data)-header %d not as expected %d : got %s", len(data)-header, size, DebugSummary(data, 512))
+ }
+ }
+}
+
+// TODO: improve/unify/simplify those payload/POST tests: just go to /debug handler for both clients and check what is echoed back
+
+func TestPayloadForClient(t *testing.T) {
+ var tests = []struct {
+ contentType string
+ payload []byte
+ expectedMethod string
+ }{
+ {"application/json",
+ []byte("{\"test\" : \"test\"}"),
+ "POST"},
+ {"application/xml",
+ []byte(""),
+ "POST"},
+ {"",
+ nil,
+ "GET"},
+ }
+ for _, test := range tests {
+ hOptions := HTTPOptions{}
+ hOptions.URL = "www.google.com"
+ hOptions.ContentType = test.contentType
+ hOptions.Payload = test.payload
+ client := NewStdClient(&hOptions)
+ contentType := client.req.Header.Get("Content-Type")
+ if contentType != test.contentType {
+ t.Errorf("Got %s, expected %s as a content type", contentType, test.contentType)
+ }
+ method := client.req.Method
+ if method != test.expectedMethod {
+ t.Errorf("Got %s, expected %s as a method", method, test.expectedMethod)
+ }
+ body := client.req.Body
+ if body == nil {
+ if len(test.payload) > 0 {
+ t.Errorf("Got empty nil body, expected %s as a body", test.payload)
+ }
+ continue
+ }
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(body)
+ payload := buf.Bytes()
+ if !bytes.Equal(payload, test.payload) {
+ t.Errorf("Got %s, expected %s as a body", string(payload), string(test.payload))
+ }
+ }
+}
+
+func TestPayloadForFastClient(t *testing.T) {
+ var tests = []struct {
+ contentType string
+ payload []byte
+ expectedReqBody string
+ }{
+ {"application/json",
+ []byte("{\"test\" : \"test\"}"),
+ fmt.Sprintf("POST / HTTP/1.1\r\nHost: www.google.com\r\nContent-Length: 17\r\nContent-Type: "+
+ "application/json\r\nUser-Agent: %s\r\n\r\n{\"test\" : \"test\"}", userAgent)},
+ {"application/xml",
+ []byte(""),
+ fmt.Sprintf("POST / HTTP/1.1\r\nHost: www.google.com\r\nContent-Length: 18\r\nContent-Type: "+
+ "application/xml\r\nUser-Agent: %s\r\n\r\n", userAgent)},
+ {"",
+ nil,
+ fmt.Sprintf("GET / HTTP/1.1\r\nHost: www.google.com\r\nUser-Agent: %s\r\n\r\n", userAgent)},
+ }
+ for _, test := range tests {
+ hOptions := HTTPOptions{}
+ hOptions.URL = "www.google.com"
+ hOptions.ContentType = test.contentType
+ hOptions.Payload = test.payload
+ client := NewFastClient(&hOptions)
+ body := string(client.(*FastClient).req)
+ if body != test.expectedReqBody {
+ t.Errorf("Got\n%s\nexpecting\n%s", body, test.expectedReqBody)
+ }
+ }
+}
+
+func TestPayloadSizeLarge(t *testing.T) {
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/", EchoHandler)
+ //basic client 128k buffer can't do 200k, also errors out on non 200 codes so doing this other bg
+ size := 200000
+ url := fmt.Sprintf("http://localhost:%d/with-size?size=%d&status=888", a.Port, size)
+ o := HTTPOptions{URL: url, DisableFastClient: true}
+ client := NewClient(&o)
+ code, data, header := client.Fetch() // used to panic/bug #127
+ t.Logf("TestPayloadSize result code %d, data len %d, headerlen %d", code, len(data), header)
+ if code != 888 {
+ t.Errorf("Got %d instead of 888", code)
+ }
+ if len(data)-header != size {
+ t.Errorf("Got len(data)-header %d not as expected %d : got %s", len(data)-header, size, DebugSummary(data, 512))
+ }
+}
+
+func TestDebugHandlerSortedHeaders(t *testing.T) {
+ m, a := DynamicHTTPServer(false)
+ m.HandleFunc("/debug", DebugHandler)
+ url := fmt.Sprintf("http://localhost:%d/debug", a.Port)
+ o := HTTPOptions{URL: url, DisableFastClient: true}
+ o.AddAndValidateExtraHeader("BBB: bbb")
+ o.AddAndValidateExtraHeader("CCC: ccc")
+ o.AddAndValidateExtraHeader("ZZZ: zzz")
+ o.AddAndValidateExtraHeader("AAA: aaa")
+ client := NewClient(&o)
+ code, data, header := client.Fetch() // used to panic/bug #127
+ t.Logf("TestDebugHandlerSortedHeaders result code %d, data len %d, headerlen %d", code, len(data), header)
+ if code != http.StatusOK {
+ t.Errorf("Got %d instead of 200", code)
+ }
+ //remove the first line ('Φορτίο version...') from the body
+ body := string(data)
+ i := strings.Index(body, "\n")
+ body = body[i+1:]
+ expected := fmt.Sprintf("\nGET /debug HTTP/1.1\n\n"+
+ "headers:\n\n"+
+ "Host: localhost:%d\n"+
+ "Aaa: aaa\n"+
+ "Bbb: bbb\n"+
+ "Ccc: ccc\n"+
+ "User-Agent: %s\n"+
+ "Zzz: zzz\n\n"+
+ "body:\n\n\n", a.Port, userAgent)
+ if body != expected {
+ t.Errorf("Get body: %s not as expected: %s", body, expected)
+ }
+}
+
+func TestEchoHeaders(t *testing.T) {
+ _, a := ServeTCP("0", "")
+ var headers = []struct {
+ key string
+ value string
+ }{
+ {"Foo", "Bar1"},
+ {"Foo", "Bar2"}, // Test multiple same header
+ {"X", "Y"},
+ {"Z", "abc def:xyz"},
+ }
+ v := url.Values{}
+ for _, pair := range headers {
+ v.Add("header", pair.key+":"+pair.value)
+ }
+ // minimal manual encoding (only escape the space) + errors for coverage sake
+ var urls []string
+ urls = append(urls,
+ fmt.Sprintf("http://localhost:%d/echo?size=10&header=Foo:Bar1&header=Foo:Bar2&header=X:Y&header=Z:abc+def:xyz&header=&header=Foo",
+ a.Port))
+ // proper encoding
+ urls = append(urls, fmt.Sprintf("http://localhost:%d/echo?%s", a.Port, v.Encode()))
+ for _, url := range urls {
+ resp, err := http.Get(url)
+ if err != nil {
+ t.Fatalf("Failed get for %s : %v", url, err)
+ }
+ t.Logf("TestEchoHeaders url = %s : status %s", url, resp.Status)
+ if resp.StatusCode != http.StatusOK {
+ t.Errorf("Got %d instead of 200", resp.StatusCode)
+ }
+ for _, pair := range headers {
+ got := resp.Header[pair.key]
+ found := false
+ for _, v := range got {
+ if v == pair.value {
+ found = true
+ break // found == good
+ }
+ }
+ if !found {
+ t.Errorf("Mismatch: got %+v and didn't find \"%s\" for header %s (url %s)", got, pair.value, pair.key, url)
+ }
+ }
+ }
+}
+
+func TestPPROF(t *testing.T) {
+ mux, addrN := HTTPServer("test pprof", "0")
+ addr := addrN.(*net.TCPAddr)
+ url := fmt.Sprintf("localhost:%d/debug/pprof/heap?debug=1", addr.Port)
+ code, _ := Fetch(&HTTPOptions{URL: url})
+ if code != http.StatusNotFound {
+ t.Errorf("Got %d instead of expected 404/not found for %s", code, url)
+ }
+ SetupPPROF(mux)
+ code, data := FetchURL(url)
+ if code != http.StatusOK {
+ t.Errorf("Got %d %s instead of ok for %s", code, DebugSummary(data, 256), url)
+ }
+ if !bytes.Contains(data, []byte("TotalAlloc")) {
+ t.Errorf("Result %s doesn't contain expected TotalAlloc", DebugSummary(data, 1024))
+ }
+}
+
+func TestFetchAndOnBehalfOf(t *testing.T) {
+ mux, addr := ServeTCP("0", "/debug")
+ mux.Handle("/fetch/", http.StripPrefix("/fetch/", http.HandlerFunc(FetcherHandler)))
+ url := fmt.Sprintf("localhost:%d/fetch/localhost:%d/debug", addr.Port, addr.Port)
+ code, data := Fetch(&HTTPOptions{URL: url})
+ if code != http.StatusOK {
+ t.Errorf("Got %d %s instead of ok for %s", code, DebugSummary(data, 256), url)
+ }
+ // ideally we'd check more of the header but it can be 127.0.0.1:port or [::1]:port depending on ipv6 support etc...
+ if !bytes.Contains(data, []byte("X-On-Behalf-Of: ")) {
+ t.Errorf("Result %s doesn't contain expected On-Behalf-Of:", DebugSummary(data, 1024))
+ }
+}
+
+func TestServeError(t *testing.T) {
+ _, addr := Serve("0", "")
+ port := fnet.GetPort(addr)
+ mux2, addr2 := Serve(port, "")
+ if mux2 != nil || addr2 != nil {
+ t.Errorf("2nd Serve() on same port %v should have failed: %v %v", port, mux2, addr2)
+ }
+}
+
+func testCacheHeaderHandler(w http.ResponseWriter, r *http.Request) {
+ LogRequest(r, "testCacheHeader")
+ CacheOn(w)
+ w.Write([]byte("cache me"))
+}
+
+func TestCache(t *testing.T) {
+ mux, addr := ServeTCP("0", "")
+ mux.HandleFunc("/cached", testCacheHeaderHandler)
+ baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port)
+ o := NewHTTPOptions(baseURL)
+ code, data := Fetch(o)
+ if code != 200 {
+ t.Errorf("error fetching %s: %v %s", o.URL, code, DebugSummary(data, 256))
+ }
+ expectedWithCache := []byte("Cache-Control:")
+ if bytes.Contains(data, expectedWithCache) {
+ t.Errorf("Got %s when shouldn't have for %s: %v", expectedWithCache, o.URL, DebugSummary(data, 256))
+ }
+ o.URL += "cached"
+ code, data = Fetch(o)
+ if code != 200 {
+ t.Errorf("error fetching %s: %v %s", o.URL, code, DebugSummary(data, 256))
+ }
+ if !bytes.Contains(data, expectedWithCache) {
+ t.Errorf("Didn't get %s when should have for %s: %v", expectedWithCache, o.URL, DebugSummary(data, 256))
+ }
+}
+
+func TestRedirector(t *testing.T) {
+ addr := RedirectToHTTPS(":0")
+ relativeURL := "/foo/bar?some=param&anotherone"
+ port := fnet.GetPort(addr)
+ url := fmt.Sprintf("http://localhost:%s%s", port, relativeURL)
+ opts := NewHTTPOptions(url)
+ opts.AddAndValidateExtraHeader("Host: foo.istio.io")
+ code, data := Fetch(opts)
+ if code != http.StatusSeeOther {
+ t.Errorf("Got %d %s instead of %d for %s", code, DebugSummary(data, 256), http.StatusSeeOther, url)
+ }
+ if !bytes.Contains(data, []byte("Location: https://foo.istio.io"+relativeURL)) {
+ t.Errorf("Result %s doesn't contain Location: redirect", DebugSummary(data, 1024))
+ }
+ // 2nd one should fail
+ addr2 := RedirectToHTTPS(port)
+ if addr2 != nil {
+ t.Errorf("2nd RedirectToHTTPS() on same port %s should have failed: %v", port, addr2)
+
+ }
+}
+
+var testNeedEscape = "link "
+
+func escapeTestHandler(w http.ResponseWriter, r *http.Request) {
+ LogRequest(r, "escapeTestHandler")
+ out := NewHTMLEscapeWriter(w)
+ fmt.Fprintln(out, testNeedEscape)
+}
+
+func TestHTMLEscapeWriter(t *testing.T) {
+ mux, addr := HTTPServer("test escape", ":0")
+ mux.HandleFunc("/", escapeTestHandler)
+ url := fmt.Sprintf("http://localhost:%s/", fnet.GetPort(addr))
+ code, data := FetchURL(url)
+ if code != http.StatusOK {
+ t.Errorf("Got %d %s instead of ok for %s", code, DebugSummary(data, 256), url)
+ }
+ if !bytes.Contains(data, []byte("<a href='http://google.com'>link")) {
+ t.Errorf("Result %s doesn't contain expected escaped html:", DebugSummary(data, 1024))
+ }
+}
+
+func TestNewHTMLEscapeWriterError(t *testing.T) {
+ log.Infof("Expect error complaining about not an http/flusher:")
+ out := NewHTMLEscapeWriter(os.Stdout) // should cause flusher to be null
+ hw := out.(*HTMLEscapeWriter)
+ if hw.Flusher != nil {
+ t.Errorf("Shouldn't have a flusher when not passing in an http: %+v", hw.Flusher)
+ }
+}
+
+func TestDefaultHeadersAndOptionsInit(t *testing.T) {
+ _, addr := ServeTCP("0", "/debug")
+ // Un initialized http options:
+ o := HTTPOptions{URL: fmt.Sprintf("http://localhost:%d/debug", addr.Port)}
+ o1 := o
+ cli1 := NewStdClient(&o1)
+ code, data, _ := cli1.Fetch()
+ if code != 200 {
+ t.Errorf("Non ok code %d for debug default fetch1", code)
+ }
+ expected := []byte("User-Agent: fortio.org/fortio-")
+ if !bytes.Contains(data, expected) {
+ t.Errorf("Didn't find default header echoed back in std client1 %s (expecting %s)", DebugSummary(data, 512), expected)
+ }
+ o2 := o
+ cli2 := NewFastClient(&o2)
+ code, data, _ = cli2.Fetch()
+ if code != 200 {
+ t.Errorf("Non ok code %d for debug default fetch2", code)
+ }
+ if !bytes.Contains(data, expected) {
+ t.Errorf("Didn't find default header echoed back in fast client1 %s (expecting %s)", DebugSummary(data, 512), expected)
+ }
+}
+
+func TestAddHTTPS(t *testing.T) {
+ var tests = []struct {
+ input string
+ expected string
+ }{
+ {"foo", "https://foo"},
+ {"foo.com", "https://foo.com"},
+ {"http://foo.com", "https://foo.com"},
+ {"https://foo.com", "https://foo.com"},
+ {"hTTps://foo.com", "https://foo.com"},
+ {"hTTp://foo.com", "https://foo.com"},
+ {"http://fOO.com", "https://fOO.com"},
+ {"hTTp://foo.com/about.html", "https://foo.com/about.html"},
+ {"hTTp://foo.com/ABOUT.html", "https://foo.com/ABOUT.html"},
+ {"hTTp://foo.com/BaR", "https://foo.com/BaR"},
+ {"https://foo.com/BaR", "https://foo.com/BaR"},
+ {"http", "https://http"},
+ }
+
+ for _, test := range tests {
+ output := AddHTTPS(test.input)
+ if output != test.expected {
+ t.Errorf("%s is received but %s was expected", output, test.expected)
+ }
+ }
+}
+
+func TestValidateAndAddBasicAuthentication(t *testing.T) {
+ var tests = []struct {
+ o HTTPOptions
+ isCredentialsValid bool
+ isAuthHeaderAdded bool
+ }{
+ {HTTPOptions{UserCredentials: "foo:foo"}, true, true},
+ {HTTPOptions{UserCredentials: "foofoo"}, false, false},
+ {HTTPOptions{UserCredentials: ""}, true, false},
+ }
+
+ for _, test := range tests {
+ h := make(http.Header)
+ err := test.o.ValidateAndAddBasicAuthentication(h)
+ if err == nil && !test.isCredentialsValid {
+ t.Errorf("Error was not expected for %s", test.o.UserCredentials)
+ }
+ if test.isAuthHeaderAdded && len(h.Get("Authorization")) <= 0 {
+ t.Errorf("Authorization header was expected for %s credentials", test.o.UserCredentials)
+ }
+
+ }
+}
+
+// --- for bench mark/comparison
+
+func asciiFold0(str string) []byte {
+ return []byte(strings.ToUpper(str))
+}
+
+var toLowerMaskRune = rune(toUpperMask)
+
+func toLower(r rune) rune {
+ return r & toLowerMaskRune
+}
+
+func asciiFold1(str string) []byte {
+ return []byte(strings.Map(toLower, str))
+}
+
+var lw []byte
+
+func BenchmarkASCIIFoldNormalToLower(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ lw = asciiFold0(utf8Str)
+ }
+}
+func BenchmarkASCIIFoldCustomToLowerMap(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ lw = asciiFold1(utf8Str)
+ }
+}
+
+// Package's version (3x fastest)
+func BenchmarkASCIIToUpper(b *testing.B) {
+ log.SetLogLevel(log.Warning)
+ for n := 0; n < b.N; n++ {
+ lw = ASCIIToUpper(utf8Str)
+ }
+}
+
+// Note: newline inserted in set-cookie line because of linter (line too long)
+var testHaystack = []byte(`HTTP/1.1 200 OK
+Date: Sun, 16 Jul 2017 21:00:29 GMT
+Expires: -1
+Cache-Control: private, max-age=0
+Content-Type: text/html; charset=ISO-8859-1
+P3P: CP="This is not a P3P policy! See https://www.google.com/support/accounts/answer/151657?hl=en for more info."
+Server: gws
+X-XSS-Protection: 1; mode=block
+X-Frame-Options: SAMEORIGIN
+Set-Cookie: NID=107=sne5itxJgY_4dD951psa7cyP_rQ3ju-J9p0QGmKYl0l0xUVSVmGVeX8smU0VV6FyfQnZ4kkhaZ9ozxLpUWH-77K_0W8aXzE3
+PDQxwAynvJgGGA9rMRB9bperOblUOQ3XilG6B5-8auMREgbc; expires=Mon, 15-Jan-2018 21:00:29 GMT; path=/; domain=.google.com; HttpOnly
+Accept-Ranges: none
+Vary: Accept-Encoding
+Transfer-Encoding: chunked
+`)
+
+func FoldFind0(haystack []byte, needle []byte) (bool, int) {
+ offset := strings.Index(strings.ToUpper(string(haystack)), string(needle))
+ found := (offset >= 0)
+ return found, offset
+}
+
+// -- benchmarks --
+
+func BenchmarkFoldFind0(b *testing.B) {
+ needle := []byte("VARY")
+ for n := 0; n < b.N; n++ {
+ FoldFind0(testHaystack, needle)
+ }
+}
+
+func BenchmarkFoldFind(b *testing.B) {
+ needle := []byte("VARY")
+ for n := 0; n < b.N; n++ {
+ FoldFind(testHaystack, needle)
+ }
+}
+
+// -- end of benchmark tests / end of this file
diff --git a/vendor/fortio.org/fortio/fhttp/http_utils.go b/vendor/fortio.org/fortio/fhttp/http_utils.go
new file mode 100644
index 0000000000..195461e8af
--- /dev/null
+++ b/vendor/fortio.org/fortio/fhttp/http_utils.go
@@ -0,0 +1,470 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fhttp // import "fortio.org/fortio/fhttp"
+
+import (
+ "encoding/base64"
+ "fmt"
+ "html/template"
+ "io"
+ "math/rand"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/stats"
+)
+
+// Used for the fast case insensitive search
+const toUpperMask = ^byte('a' - 'A')
+
+// Slow but correct version
+func toUpper(b byte) byte {
+ if b >= 'a' && b <= 'z' {
+ b -= ('a' - 'A')
+ }
+ return b
+}
+
+// ASCIIToUpper returns a byte array equal to the input string but in lowercase.
+// Only works for ASCII, not meant for unicode.
+func ASCIIToUpper(str string) []byte {
+ numChars := utf8.RuneCountInString(str)
+ if numChars != len(str) && log.LogVerbose() {
+ log.Errf("ASCIIFold(\"%s\") contains %d characters, some non ascii (byte length %d): will mangle", str, numChars, len(str))
+ }
+ res := make([]byte, numChars)
+ // less surprising if we only mangle the extended characters
+ i := 0
+ for _, c := range str { // Attention: _ here != i for unicode characters
+ res[i] = toUpper(byte(c))
+ i++
+ }
+ return res
+}
+
+// FoldFind searches the bytes assuming ascii, ignoring the lowercase bit
+// for testing. Not intended to work with unicode, meant for http headers
+// and to be fast (see benchmark in test file).
+func FoldFind(haystack []byte, needle []byte) (bool, int) {
+ idx := 0
+ found := false
+ hackstackLen := len(haystack)
+ needleLen := len(needle)
+ if needleLen == 0 {
+ return true, 0
+ }
+ if needleLen > hackstackLen { // those 2 ifs also handles haystackLen == 0
+ return false, -1
+ }
+ needleOffset := 0
+ for {
+ h := haystack[idx]
+ n := needle[needleOffset]
+ // This line is quite performance sensitive. calling toUpper() for instance
+ // is a 30% hit, even if called only on the haystack. The XOR lets us be
+ // true for equality and the & with mask also true if the only difference
+ // between the 2 is the case bit.
+ xor := h ^ n // == 0 if strictly equal
+ if (xor&toUpperMask) != 0 || (((h < 32) || (n < 32)) && (xor != 0)) {
+ idx -= (needleOffset - 1) // does ++ most of the time
+ needleOffset = 0
+ if idx >= hackstackLen {
+ break
+ }
+ continue
+ }
+ if needleOffset == needleLen-1 {
+ found = true
+ break
+ }
+ needleOffset++
+ idx++
+ if idx >= hackstackLen {
+ break
+ }
+ }
+ if !found {
+ return false, -1
+ }
+ return true, idx - needleOffset
+}
+
+// ParseDecimal extracts the first positive integer number from the input.
+// spaces are ignored.
+// any character that isn't a digit cause the parsing to stop
+func ParseDecimal(inp []byte) int {
+ res := -1
+ for _, b := range inp {
+ if b == ' ' && res == -1 {
+ continue
+ }
+ if b < '0' || b > '9' {
+ break
+ }
+ digit := int(b - '0')
+ if res == -1 {
+ res = digit
+ } else {
+ res = 10*res + digit
+ }
+ }
+ return res
+}
+
+// ParseChunkSize extracts the chunk size and consumes the line.
+// Returns the offset of the data and the size of the chunk,
+// 0, -1 when not found.
+func ParseChunkSize(inp []byte) (int, int) {
+ if log.LogDebug() {
+ log.Debugf("ParseChunkSize(%s)", DebugSummary(inp, 128))
+ }
+ res := -1
+ off := 0
+ end := len(inp)
+ inDigits := true
+ for {
+ if off >= end {
+ return off, -1
+ }
+ if inDigits {
+ b := toUpper(inp[off])
+ var digit int
+ if b >= 'A' && b <= 'F' {
+ digit = 10 + int(b-'A')
+ } else if b >= '0' && b <= '9' {
+ digit = int(b - '0')
+ } else {
+ inDigits = false
+ if res == -1 {
+ log.Errf("Didn't find hex number %q", inp)
+ return off, res
+ }
+ continue
+ }
+ if res == -1 {
+ res = digit
+ } else {
+ res = 16*res + digit
+ }
+ } else {
+ // After digits, skipping ahead to find \r\n
+ if inp[off] == '\r' {
+ off++
+ if off >= end {
+ return off, -1
+ }
+ if inp[off] == '\n' {
+ // good case
+ return off + 1, res
+ }
+ }
+ }
+ off++
+ }
+}
+
+// EscapeBytes returns printable string. Same as %q format without the
+// surrounding/extra "".
+func EscapeBytes(buf []byte) string {
+ e := fmt.Sprintf("%q", buf)
+ return e[1 : len(e)-1]
+}
+
+// DebugSummary returns a string with the size and escaped first max/2 and
+// last max/2 bytes of a buffer (or the whole escaped buffer if small enough).
+func DebugSummary(buf []byte, max int) string {
+ l := len(buf)
+ if l <= max+3 { //no point in shortening to add ... if we could return those 3
+ return EscapeBytes(buf)
+ }
+ max /= 2
+ return fmt.Sprintf("%d: %s...%s", l, EscapeBytes(buf[:max]), EscapeBytes(buf[l-max:]))
+}
+
+// -- server utils
+
+func removeTrailingPercent(s string) string {
+ if strings.HasSuffix(s, "%") {
+ return s[:len(s)-1]
+ }
+ return s
+}
+
+// generateStatus from string, format: status="503" for 100% 503s
+// status="503:20,404:10,403:0.5" for 20% 503s, 10% 404s, 0.5% 403s 69.5% 200s
+func generateStatus(status string) int {
+ lst := strings.Split(status, ",")
+ log.Debugf("Parsing status %s -> %v", status, lst)
+ // Simple non probabilistic status case:
+ if len(lst) == 1 && !strings.ContainsRune(status, ':') {
+ s, err := strconv.Atoi(status)
+ if err != nil {
+ log.Warnf("Bad input status %v, not a number nor comma and colon separated %% list", status)
+ return http.StatusBadRequest
+ }
+ log.Debugf("Parsed status %s -> %d", status, s)
+ return s
+ }
+ weights := make([]float32, len(lst))
+ codes := make([]int, len(lst))
+ lastPercent := float64(0)
+ i := 0
+ for _, entry := range lst {
+ l2 := strings.Split(entry, ":")
+ if len(l2) != 2 {
+ log.Warnf("Should have exactly 1 : in status list %s -> %v", status, entry)
+ return http.StatusBadRequest
+ }
+ s, err := strconv.Atoi(l2[0])
+ if err != nil {
+ log.Warnf("Bad input status %v -> %v, not a number before colon", status, l2[0])
+ return http.StatusBadRequest
+ }
+ percStr := removeTrailingPercent(l2[1])
+ p, err := strconv.ParseFloat(percStr, 32)
+ if err != nil || p < 0 || p > 100 {
+ log.Warnf("Percentage is not a [0. - 100.] number in %v -> %v : %v %f", status, percStr, err, p)
+ return http.StatusBadRequest
+ }
+ lastPercent += p
+ // Round() needed to cover 'exactly' 100% and not more or less because of rounding errors
+ p32 := float32(stats.Round(lastPercent))
+ if p32 > 100. {
+ log.Warnf("Sum of percentage is greater than 100 in %v %f %f %f", status, lastPercent, p, p32)
+ return http.StatusBadRequest
+ }
+ weights[i] = p32
+ codes[i] = s
+ i++
+ }
+ res := 100. * rand.Float32()
+ for i, v := range weights {
+ if res <= v {
+ log.Debugf("[0.-100.[ for %s roll %f got #%d -> %d", status, res, i, codes[i])
+ return codes[i]
+ }
+ }
+ log.Debugf("[0.-100.[ for %s roll %f no hit, defaulting to OK", status, res)
+ return http.StatusOK // default/reminder of probability table
+}
+
+// generateSize from string, format: "size=512" for 100% 512 bytes body replies,
+// size="512:20,16384:10" for 20% 512 bytes, 10% 16k, 70% default echo back.
+// returns -1 for the default case, so one can specify 0 and force no payload
+// even if it's a post request with a payload (to test asymmetric large inbound
+// small outbound).
+// TODO: refactor similarities with status and delay
+func generateSize(sizeInput string) (size int) {
+ size = -1 // default value/behavior
+ if len(sizeInput) == 0 {
+ return size
+ }
+ lst := strings.Split(sizeInput, ",")
+ log.Debugf("Parsing size %s -> %v", sizeInput, lst)
+ // Simple non probabilistic status case:
+ if len(lst) == 1 && !strings.ContainsRune(sizeInput, ':') {
+ s, err := strconv.Atoi(sizeInput)
+ if err != nil {
+ log.Warnf("Bad input size %v, not a number nor comma and colon separated %% list", sizeInput)
+ return size
+ }
+ size = s
+ log.Debugf("Parsed size %s -> %d", sizeInput, size)
+ fnet.ValidatePayloadSize(&size)
+ return size
+ }
+ weights := make([]float32, len(lst))
+ sizes := make([]int, len(lst))
+ lastPercent := float64(0)
+ i := 0
+ for _, entry := range lst {
+ l2 := strings.Split(entry, ":")
+ if len(l2) != 2 {
+ log.Warnf("Should have exactly 1 : in size list %s -> %v", sizeInput, entry)
+ return size
+ }
+ s, err := strconv.Atoi(l2[0])
+ if err != nil {
+ log.Warnf("Bad input size %v -> %v, not a number before colon", sizeInput, l2[0])
+ return size
+ }
+ fnet.ValidatePayloadSize(&s)
+ percStr := removeTrailingPercent(l2[1])
+ p, err := strconv.ParseFloat(percStr, 32)
+ if err != nil || p < 0 || p > 100 {
+ log.Warnf("Percentage is not a [0. - 100.] number in %v -> %v : %v %f", sizeInput, percStr, err, p)
+ return size
+ }
+ lastPercent += p
+ // Round() needed to cover 'exactly' 100% and not more or less because of rounding errors
+ p32 := float32(stats.Round(lastPercent))
+ if p32 > 100. {
+ log.Warnf("Sum of percentage is greater than 100 in %v %f %f %f", sizeInput, lastPercent, p, p32)
+ return size
+ }
+ weights[i] = p32
+ sizes[i] = s
+ i++
+ }
+ res := 100. * rand.Float32()
+ for i, v := range weights {
+ if res <= v {
+ log.Debugf("[0.-100.[ for %s roll %f got #%d -> %d", sizeInput, res, i, sizes[i])
+ return sizes[i]
+ }
+ }
+ log.Debugf("[0.-100.[ for %s roll %f no hit, defaulting to -1", sizeInput, res)
+ return size // default/reminder of probability table
+}
+
+// MaxDelay is the maximum delay allowed for the echoserver responses.
+// 1.5s so we can test the default 1s timeout in envoy.
+const MaxDelay = 1500 * time.Millisecond
+
+// generateDelay from string, format: delay="100ms" for 100% 100ms delay
+// delay="10ms:20,20ms:10,1s:0.5" for 20% 10ms, 10% 20ms, 0.5% 1s and 69.5% 0
+// TODO: very similar with generateStatus - refactor?
+func generateDelay(delay string) time.Duration {
+ lst := strings.Split(delay, ",")
+ log.Debugf("Parsing delay %s -> %v", delay, lst)
+ if len(delay) == 0 {
+ return -1
+ }
+ // Simple non probabilistic status case:
+ if len(lst) == 1 && !strings.ContainsRune(delay, ':') {
+ d, err := time.ParseDuration(delay)
+ if err != nil {
+ log.Warnf("Bad input delay %v, not a duration nor comma and colon separated %% list", delay)
+ return -1
+ }
+ log.Debugf("Parsed delay %s -> %d", delay, d)
+ if d > MaxDelay {
+ d = MaxDelay
+ }
+ return d
+ }
+ weights := make([]float32, len(lst))
+ delays := make([]time.Duration, len(lst))
+ lastPercent := float64(0)
+ i := 0
+ for _, entry := range lst {
+ l2 := strings.Split(entry, ":")
+ if len(l2) != 2 {
+ log.Warnf("Should have exactly 1 : in delay list %s -> %v", delay, entry)
+ return -1
+ }
+ d, err := time.ParseDuration(l2[0])
+ if err != nil {
+ log.Warnf("Bad input delay %v -> %v, not a number before colon", delay, l2[0])
+ return -1
+ }
+ if d > MaxDelay {
+ d = MaxDelay
+ }
+ percStr := removeTrailingPercent(l2[1])
+ p, err := strconv.ParseFloat(percStr, 32)
+ if err != nil || p < 0 || p > 100 {
+ log.Warnf("Percentage is not a [0. - 100.] number in %v -> %v : %v %f", delay, percStr, err, p)
+ return -1
+ }
+ lastPercent += p
+ // Round() needed to cover 'exactly' 100% and not more or less because of rounding errors
+ p32 := float32(stats.Round(lastPercent))
+ if p32 > 100. {
+ log.Warnf("Sum of percentage is greater than 100 in %v %f %f %f", delay, lastPercent, p, p32)
+ return -1
+ }
+ weights[i] = p32
+ delays[i] = d
+ i++
+ }
+ res := 100. * rand.Float32()
+ for i, v := range weights {
+ if res <= v {
+ log.Debugf("[0.-100.[ for %s roll %f got #%d -> %d", delay, res, i, delays[i])
+ return delays[i]
+ }
+ }
+ log.Debugf("[0.-100.[ for %s roll %f no hit, defaulting to 0", delay, res)
+ return 0
+}
+
+// RoundDuration rounds to 10th of second. Only for positive durations.
+// TODO: switch to Duration.Round once switched to go 1.9
+func RoundDuration(d time.Duration) time.Duration {
+ tenthSec := int64(100 * time.Millisecond)
+ r := int64(d+50*time.Millisecond) / tenthSec
+ return time.Duration(tenthSec * r)
+}
+
+// -- formerly in uihandler:
+
+// HTMLEscapeWriter is an io.Writer escaping the output for safe html inclusion.
+type HTMLEscapeWriter struct {
+ NextWriter io.Writer
+ Flusher http.Flusher
+}
+
+func (w *HTMLEscapeWriter) Write(p []byte) (int, error) {
+ template.HTMLEscape(w.NextWriter, p)
+ if w.Flusher != nil {
+ w.Flusher.Flush()
+ }
+ return len(p), nil
+}
+
+// NewHTMLEscapeWriter creates a io.Writer that can safely output
+// to an http.ResponseWrite with HTMLEscape-ing.
+func NewHTMLEscapeWriter(w io.Writer) io.Writer {
+ flusher, ok := w.(http.Flusher)
+ if !ok {
+ log.Errf("expected writer %+v to be an http.ResponseWriter and thus a http.Flusher", w)
+ flusher = nil
+ }
+ return &HTMLEscapeWriter{NextWriter: w, Flusher: flusher}
+}
+
+// OnBehalfOf adds a header with the remote addr to an http options object.
+func OnBehalfOf(o *HTTPOptions, r *http.Request) {
+ _ = o.AddAndValidateExtraHeader("X-On-Behalf-Of: " + r.RemoteAddr)
+}
+
+// AddHTTPS replaces "http://" in url with "https://" or prepends "https://"
+// if url does not contain prefix "http://".
+func AddHTTPS(url string) string {
+ if len(url) > len(fnet.PrefixHTTP) {
+ if strings.EqualFold(url[:len(fnet.PrefixHTTP)], fnet.PrefixHTTP) {
+ log.Infof("Replacing http scheme with https for url: %s", url)
+ return fnet.PrefixHTTPS + url[len(fnet.PrefixHTTP):]
+ }
+ // returns url with normalized lowercase https prefix
+ if strings.EqualFold(url[:len(fnet.PrefixHTTPS)], fnet.PrefixHTTPS) {
+ return fnet.PrefixHTTPS + url[len(fnet.PrefixHTTPS):]
+ }
+ }
+ // url must not contain any prefix, so add https prefix
+ log.Infof("Prepending https:// to url: %s", url)
+ return fnet.PrefixHTTPS + url
+}
+
+// generateBase64UserCredentials encodes the user credential to base64 and adds a Basic as prefix.
+func generateBase64UserCredentials(userCredentials string) string {
+ return "Basic " + base64.StdEncoding.EncodeToString([]byte(userCredentials))
+}
diff --git a/vendor/fortio.org/fortio/fhttp/httprunner.go b/vendor/fortio.org/fortio/fhttp/httprunner.go
new file mode 100644
index 0000000000..5e15671128
--- /dev/null
+++ b/vendor/fortio.org/fortio/fhttp/httprunner.go
@@ -0,0 +1,175 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fhttp
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "runtime"
+ "runtime/pprof"
+ "sort"
+
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/periodic"
+ "fortio.org/fortio/stats"
+)
+
+// Most of the code in this file is the library-fication of code originally
+// in cmd/fortio/main.go
+
+// HTTPRunnerResults is the aggregated result of an HTTPRunner.
+// Also is the internal type used per thread/goroutine.
+type HTTPRunnerResults struct {
+ periodic.RunnerResults
+ client Fetcher
+ RetCodes map[int]int64
+ // internal type/data
+ sizes *stats.Histogram
+ headerSizes *stats.Histogram
+ // exported result
+ Sizes *stats.HistogramData
+ HeaderSizes *stats.HistogramData
+ URL string
+ SocketCount int
+ // http code to abort the run on (-1 for connection or other socket error)
+ AbortOn int
+ aborter *periodic.Aborter
+}
+
+// Run tests http request fetching. Main call being run at the target QPS.
+// To be set as the Function in RunnerOptions.
+func (httpstate *HTTPRunnerResults) Run(t int) {
+ log.Debugf("Calling in %d", t)
+ code, body, headerSize := httpstate.client.Fetch()
+ size := len(body)
+ log.Debugf("Got in %3d hsz %d sz %d - will abort on %d", code, headerSize, size, httpstate.AbortOn)
+ httpstate.RetCodes[code]++
+ httpstate.sizes.Record(float64(size))
+ httpstate.headerSizes.Record(float64(headerSize))
+ if httpstate.AbortOn == code {
+ httpstate.aborter.Abort()
+ log.Infof("Aborted run because of code %d - data %s", code, DebugSummary(body, 1024))
+ }
+}
+
+// HTTPRunnerOptions includes the base RunnerOptions plus http specific
+// options.
+type HTTPRunnerOptions struct {
+ periodic.RunnerOptions
+ HTTPOptions // Need to call Init() to initialize
+ Profiler string // file to save profiles to. defaults to no profiling
+ AllowInitialErrors bool // whether initial errors don't cause an abort
+ // Which status code cause an abort of the run (default 0 = don't abort; reminder -1 is returned for socket errors)
+ AbortOn int
+}
+
+// RunHTTPTest runs an http test and returns the aggregated stats.
+func RunHTTPTest(o *HTTPRunnerOptions) (*HTTPRunnerResults, error) {
+ o.RunType = "HTTP"
+ log.Infof("Starting http test for %s with %d threads at %.1f qps", o.URL, o.NumThreads, o.QPS)
+ r := periodic.NewPeriodicRunner(&o.RunnerOptions)
+ defer r.Options().Abort()
+ numThreads := r.Options().NumThreads
+ o.HTTPOptions.Init(o.URL)
+ out := r.Options().Out // Important as the default value is set from nil to stdout inside NewPeriodicRunner
+ total := HTTPRunnerResults{
+ RetCodes: make(map[int]int64),
+ sizes: stats.NewHistogram(0, 100),
+ headerSizes: stats.NewHistogram(0, 5),
+ URL: o.URL,
+ AbortOn: o.AbortOn,
+ aborter: r.Options().Stop,
+ }
+ httpstate := make([]HTTPRunnerResults, numThreads)
+ for i := 0; i < numThreads; i++ {
+ r.Options().Runners[i] = &httpstate[i]
+ // Create a client (and transport) and connect once for each 'thread'
+ httpstate[i].client = NewClient(&o.HTTPOptions)
+ if httpstate[i].client == nil {
+ return nil, fmt.Errorf("unable to create client %d for %s", i, o.URL)
+ }
+ if o.Exactly <= 0 {
+ code, data, headerSize := httpstate[i].client.Fetch()
+ if !o.AllowInitialErrors && code != http.StatusOK {
+ return nil, fmt.Errorf("error %d for %s: %q", code, o.URL, string(data))
+ }
+ if i == 0 && log.LogVerbose() {
+ log.LogVf("first hit of url %s: status %03d, headers %d, total %d\n%s\n", o.URL, code, headerSize, len(data), data)
+ }
+ }
+ // Setup the stats for each 'thread'
+ httpstate[i].sizes = total.sizes.Clone()
+ httpstate[i].headerSizes = total.headerSizes.Clone()
+ httpstate[i].RetCodes = make(map[int]int64)
+ httpstate[i].AbortOn = total.AbortOn
+ httpstate[i].aborter = total.aborter
+ }
+
+ if o.Profiler != "" {
+ fc, err := os.Create(o.Profiler + ".cpu")
+ if err != nil {
+ log.Critf("Unable to create .cpu profile: %v", err)
+ return nil, err
+ }
+ pprof.StartCPUProfile(fc) //nolint: gas,errcheck
+ }
+ total.RunnerResults = r.Run()
+ if o.Profiler != "" {
+ pprof.StopCPUProfile()
+ fm, err := os.Create(o.Profiler + ".mem")
+ if err != nil {
+ log.Critf("Unable to create .mem profile: %v", err)
+ return nil, err
+ }
+ runtime.GC() // get up-to-date statistics
+ pprof.WriteHeapProfile(fm) // nolint:gas,errcheck
+ fm.Close() // nolint:gas,errcheck
+ _, _ = fmt.Fprintf(out, "Wrote profile data to %s.{cpu|mem}\n", o.Profiler)
+ }
+ // Numthreads may have reduced but it should be ok to accumulate 0s from
+ // unused ones. We also must cleanup all the created clients.
+ keys := []int{}
+ for i := 0; i < numThreads; i++ {
+ total.SocketCount += httpstate[i].client.Close()
+ // Q: is there some copying each time stats[i] is used?
+ for k := range httpstate[i].RetCodes {
+ if _, exists := total.RetCodes[k]; !exists {
+ keys = append(keys, k)
+ }
+ total.RetCodes[k] += httpstate[i].RetCodes[k]
+ }
+ total.sizes.Transfer(httpstate[i].sizes)
+ total.headerSizes.Transfer(httpstate[i].headerSizes)
+ }
+ // Cleanup state:
+ r.Options().ReleaseRunners()
+ sort.Ints(keys)
+ totalCount := float64(total.DurationHistogram.Count)
+ _, _ = fmt.Fprintf(out, "Sockets used: %d (for perfect keepalive, would be %d)\n", total.SocketCount, r.Options().NumThreads)
+ for _, k := range keys {
+ _, _ = fmt.Fprintf(out, "Code %3d : %d (%.1f %%)\n", k, total.RetCodes[k], 100.*float64(total.RetCodes[k])/totalCount)
+ }
+ total.HeaderSizes = total.headerSizes.Export()
+ total.Sizes = total.sizes.Export()
+ if log.LogVerbose() {
+ total.HeaderSizes.Print(out, "Response Header Sizes Histogram")
+ total.Sizes.Print(out, "Response Body/Total Sizes Histogram")
+ } else if log.Log(log.Warning) {
+ total.headerSizes.Counter.Print(out, "Response Header Sizes")
+ total.sizes.Counter.Print(out, "Response Body/Total Sizes")
+ }
+ return &total, nil
+}
diff --git a/vendor/fortio.org/fortio/fhttp/httprunner_test.go b/vendor/fortio.org/fortio/fhttp/httprunner_test.go
new file mode 100644
index 0000000000..4f0ca71ee6
--- /dev/null
+++ b/vendor/fortio.org/fortio/fhttp/httprunner_test.go
@@ -0,0 +1,261 @@
+// Copyright 2017 Istio Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package fhttp
+
+import (
+ "fmt"
+ "net/http"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "fortio.org/fortio/log"
+)
+
+func TestHTTPRunner(t *testing.T) {
+ mux, addr := DynamicHTTPServer(false)
+ mux.HandleFunc("/foo/", EchoHandler)
+ baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port)
+
+ opts := HTTPRunnerOptions{}
+ opts.QPS = 100
+ opts.URL = baseURL
+ opts.DisableFastClient = true
+ _, err := RunHTTPTest(&opts)
+ if err == nil {
+ t.Error("Expecting an error but didn't get it when not using full url")
+ }
+ opts.DisableFastClient = false
+ opts.URL = baseURL + "foo/bar?delay=20ms&status=200:100"
+ opts.Profiler = "test.profile"
+ res, err := RunHTTPTest(&opts)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ totalReq := res.DurationHistogram.Count
+ httpOk := res.RetCodes[http.StatusOK]
+ if totalReq != httpOk {
+ t.Errorf("Mismatch between requests %d and ok %v", totalReq, res.RetCodes)
+ }
+ if res.SocketCount != res.RunnerResults.NumThreads {
+ t.Errorf("%d socket used, expected same as thread# %d", res.SocketCount, res.RunnerResults.NumThreads)
+ }
+ // Test raw client, should get warning about non init timeout:
+ rawOpts := HTTPOptions{
+ URL: opts.URL,
+ }
+ o1 := rawOpts
+ if r, _, _ := NewFastClient(&o1).Fetch(); r != http.StatusOK {
+ t.Errorf("Fast Client with raw option should still work with warning in logs")
+ }
+ o1 = rawOpts
+ o1.URL = "http://www.doesnotexist.badtld/"
+ c := NewStdClient(&o1)
+ c.ChangeURL(rawOpts.URL)
+ if r, _, _ := c.Fetch(); r != http.StatusOK {
+ t.Errorf("Std Client with raw option should still work with warning in logs")
+ }
+}
+
+func testHTTPNotLeaking(t *testing.T, opts *HTTPRunnerOptions) {
+ ngBefore1 := runtime.NumGoroutine()
+ t.Logf("Number go routine before test %d", ngBefore1)
+ mux, addr := DynamicHTTPServer(false)
+ mux.HandleFunc("/echo100", EchoHandler)
+ url := fmt.Sprintf("http://localhost:%d/echo100", addr.Port)
+ numCalls := 100
+ opts.NumThreads = numCalls / 2 // make 2 calls per thread
+ opts.Exactly = int64(numCalls)
+ opts.QPS = float64(numCalls) / 2 // take 1 second
+ opts.URL = url
+ // Warm up round 1
+ res, err := RunHTTPTest(opts)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ httpOk := res.RetCodes[http.StatusOK]
+ if opts.Exactly != httpOk {
+ t.Errorf("Run1: Mismatch between requested calls %d and ok %v", numCalls, res.RetCodes)
+ }
+ ngBefore2 := runtime.NumGoroutine()
+ t.Logf("Number of go routine after warm up / before 2nd test %d", ngBefore2)
+ // 2nd run, should be stable number of go routines after first, not keep growing:
+ res, err = RunHTTPTest(opts)
+ // it takes a while for the connections to close with std client (!) why isn't CloseIdleConnections() synchronous
+ runtime.GC()
+ runtime.GC() // 2x to clean up more... (#178)
+ ngAfter := runtime.NumGoroutine()
+ t.Logf("Number of go routine after 2nd test %d", ngAfter)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ httpOk = res.RetCodes[http.StatusOK]
+ if opts.Exactly != httpOk {
+ t.Errorf("Run2: Mismatch between requested calls %d and ok %v", numCalls, res.RetCodes)
+ }
+ // allow for ~8 goroutine variance, as we use 50 if we leak it will show (was failing before #167)
+ if ngAfter > ngBefore2+8 {
+ t.Errorf("Goroutines after test %d, expected it to stay near %d", ngAfter, ngBefore2)
+ }
+ if !opts.DisableFastClient {
+ // only fast client so far has a socket count
+ if res.SocketCount != res.RunnerResults.NumThreads {
+ t.Errorf("%d socket used, expected same as thread# %d", res.SocketCount, res.RunnerResults.NumThreads)
+ }
+ }
+}
+
+func TestHttpNotLeakingFastClient(t *testing.T) {
+ testHTTPNotLeaking(t, &HTTPRunnerOptions{})
+}
+
+func TestHttpNotLeakingStdClient(t *testing.T) {
+ testHTTPNotLeaking(t, &HTTPRunnerOptions{HTTPOptions: HTTPOptions{DisableFastClient: true}})
+}
+
+func TestHTTPRunnerClientRace(t *testing.T) {
+ mux, addr := DynamicHTTPServer(false)
+ mux.HandleFunc("/echo1/", EchoHandler)
+ URL := fmt.Sprintf("http://localhost:%d/echo1/", addr.Port)
+
+ opts := HTTPRunnerOptions{}
+ opts.Init(URL)
+ opts.QPS = 100
+ opts2 := opts
+ go RunHTTPTest(&opts2)
+ res, err := RunHTTPTest(&opts)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ totalReq := res.DurationHistogram.Count
+ httpOk := res.RetCodes[http.StatusOK]
+ if totalReq != httpOk {
+ t.Errorf("Mismatch between requests %d and ok %v", totalReq, res.RetCodes)
+ }
+}
+
+func TestClosingAndSocketCount(t *testing.T) {
+ mux, addr := DynamicHTTPServer(false)
+ mux.HandleFunc("/echo42/", EchoHandler)
+ URL := fmt.Sprintf("http://localhost:%d/echo42/?close=1", addr.Port)
+ opts := HTTPRunnerOptions{}
+ opts.Init(URL)
+ opts.QPS = 10
+ numReq := int64(50) // can't do too many without running out of fds on mac
+ opts.Exactly = numReq
+ opts.NumThreads = 5
+ res, err := RunHTTPTest(&opts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ totalReq := res.DurationHistogram.Count
+ if totalReq != numReq {
+ t.Errorf("Mismatch between requests %d and expected %d", totalReq, numReq)
+ }
+ httpOk := res.RetCodes[http.StatusOK]
+ if totalReq != httpOk {
+ t.Errorf("Mismatch between requests %d and ok %v", totalReq, res.RetCodes)
+ }
+ if int64(res.SocketCount) != numReq {
+ t.Errorf("When closing, got %d while expected as many sockets as requests %d", res.SocketCount, numReq)
+ }
+}
+
+func TestHTTPRunnerBadServer(t *testing.T) {
+ // Using http to an https server (or the current 'close all' dummy https server)
+ // should fail:
+ _, addr := DynamicHTTPServer(true)
+ baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port)
+
+ opts := HTTPRunnerOptions{}
+ opts.QPS = 10
+ opts.Init(baseURL)
+ _, err := RunHTTPTest(&opts)
+ if err == nil {
+ t.Fatal("Expecting an error but didn't get it when connecting to bad server")
+ }
+ log.Infof("Got expected error from mismatch/bad server: %v", err)
+}
+
+// need to be the last test as it installs Serve() which would make
+// the error test for / url above fail:
+
+func TestServe(t *testing.T) {
+ _, addr := ServeTCP("0", "/debugx1")
+ port := addr.Port
+ log.Infof("On addr %s found port: %d", addr, port)
+ url := fmt.Sprintf("http://localhost:%d/debugx1?env=dump", port)
+ if port == 0 {
+ t.Errorf("outport: %d must be different", port)
+ }
+ time.Sleep(100 * time.Millisecond)
+ o := NewHTTPOptions(url)
+ o.AddAndValidateExtraHeader("X-Header: value1")
+ o.AddAndValidateExtraHeader("X-Header: value2")
+ code, data, _ := NewClient(o).Fetch()
+ if code != http.StatusOK {
+ t.Errorf("Unexpected non 200 ret code for debug url %s : %d", url, code)
+ }
+ if len(data) <= 100 {
+ t.Errorf("Unexpected short data for debug url %s : %s", url, DebugSummary(data, 101))
+ }
+ if !strings.Contains(string(data), "X-Header: value1,value2") {
+ t.Errorf("Multi header not found in %s", DebugSummary(data, 1024))
+ }
+}
+
+func TestAbortOn(t *testing.T) {
+ mux, addr := DynamicHTTPServer(false)
+ mux.HandleFunc("/foo/", EchoHandler)
+ baseURL := fmt.Sprintf("http://localhost:%d/", addr.Port)
+ o := HTTPRunnerOptions{}
+ o.URL = baseURL
+ o.AbortOn = 404
+ o.Exactly = 40
+ o.NumThreads = 4
+ o.QPS = 10
+ r, err := RunHTTPTest(&o)
+ if err != nil {
+ t.Errorf("Error while starting runner1: %v", err)
+ }
+ count := r.Result().DurationHistogram.Count
+ if count > int64(o.NumThreads) {
+ t.Errorf("Abort1 not working, did %d requests expecting ideally 1 and <= %d", count, o.NumThreads)
+ }
+ o.URL += "foo/"
+ r, err = RunHTTPTest(&o)
+ if err != nil {
+ t.Errorf("Error while starting runner2: %v", err)
+ }
+ count = r.Result().DurationHistogram.Count
+ if count != o.Exactly {
+ t.Errorf("Did %d requests when expecting all %d (non matching AbortOn)", count, o.Exactly)
+ }
+ o.AbortOn = 200
+ r, err = RunHTTPTest(&o)
+ if err != nil {
+ t.Errorf("Error while starting runner3: %v", err)
+ }
+ count = r.Result().DurationHistogram.Count
+ if count > int64(o.NumThreads) {
+ t.Errorf("Abort2 not working, did %d requests expecting ideally 1 and <= %d", count, o.NumThreads)
+ }
+}
diff --git a/vendor/fortio.org/fortio/fnet/network.go b/vendor/fortio.org/fortio/fnet/network.go
new file mode 100644
index 0000000000..66aa8477bd
--- /dev/null
+++ b/vendor/fortio.org/fortio/fnet/network.go
@@ -0,0 +1,321 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fnet // import "fortio.org/fortio/fnet"
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/version"
+)
+
+const (
+ // DefaultGRPCPort is the Fortio gRPC server default port number.
+ DefaultGRPCPort = "8079"
+ // StandardHTTPPort is the Standard http port number.
+ StandardHTTPPort = "80"
+ // StandardHTTPSPort is the Standard https port number.
+ StandardHTTPSPort = "443"
+ // PrefixHTTP is a constant value for representing http protocol that can be added prefix of url
+ PrefixHTTP = "http://"
+ // PrefixHTTPS is a constant value for representing secure http protocol that can be added prefix of url
+ PrefixHTTPS = "https://"
+
+ // POST is a constant value that indicates http method as post
+ POST = "POST"
+ // GET is a constant value that indicates http method as get
+ GET = "GET"
+ // UnixDomainSocket type for network addresses.
+ UnixDomainSocket = "unix"
+)
+
+var (
+ // MaxPayloadSize is the maximum size of payload to be generated by the
+ // EchoHandler size= argument. In bytes.
+ MaxPayloadSize = 256 * 1024
+ // Payload that is returned during echo call
+ Payload []byte
+)
+
+func init() {
+ ChangeMaxPayloadSize(MaxPayloadSize)
+}
+
+// ChangeMaxPayloadSize is used to change max payload size and fill it with pseudorandom content
+func ChangeMaxPayloadSize(newMaxPayloadSize int) {
+ if newMaxPayloadSize >= 0 {
+ MaxPayloadSize = newMaxPayloadSize
+ } else {
+ MaxPayloadSize = 0
+ }
+ Payload = make([]byte, MaxPayloadSize)
+ // One shared and 'constant' (over time) but pseudo random content for payload
+ // (to defeat compression). We don't need crypto strength here, just low cpu
+ // and speed:
+ _, err := rand.Read(Payload)
+ if err != nil {
+ log.Errf("Error changing payload size, read for %d random payload failed: %v", newMaxPayloadSize, err)
+ }
+}
+
+// NormalizePort parses port and returns host:port if port is in the form
+// of host:port already or :port if port is only a port (doesn't contain :).
+func NormalizePort(port string) string {
+ if strings.ContainsAny(port, ":") {
+ return port
+ }
+ return ":" + port
+}
+
+// Listen returns a listener for the port. Port can be a port or a
+// bind address and a port (e.g. "8080" or "[::1]:8080"...). If the
+// port component is 0 a free port will be returned by the system.
+// If the port is a pathname (contains a /) a unix domain socket listener
+// will be used instead of regular tcp socket.
+// This logs critical on error and returns nil (is meant for servers
+// that must start).
+func Listen(name string, port string) (net.Listener, net.Addr) {
+ sockType := "tcp"
+ nPort := port
+ if strings.Contains(port, "/") {
+ sockType = UnixDomainSocket
+ } else {
+ nPort = NormalizePort(port)
+ }
+ listener, err := net.Listen(sockType, nPort)
+ if err != nil {
+ log.Critf("Can't listen to %s socket %v (%v) for %s: %v", sockType, port, nPort, name, err)
+ return nil, nil
+ }
+ lAddr := listener.Addr()
+ if len(name) > 0 {
+ fmt.Printf("Fortio %s %s server listening on %s\n", version.Short(), name, lAddr)
+ }
+ return listener, lAddr
+}
+
+// GetPort extracts the port for TCP sockets and the path for unix domain sockets.
+func GetPort(lAddr net.Addr) string {
+ var lPort string
+ // Note: might panic if called with something else than unix or tcp socket addr, it's ok.
+ if lAddr.Network() == UnixDomainSocket {
+ lPort = lAddr.(*net.UnixAddr).Name
+ } else {
+ lPort = strconv.Itoa(lAddr.(*net.TCPAddr).Port)
+ }
+ return lPort
+}
+
+// ResolveDestination returns the TCP address of the "host:port" suitable for net.Dial.
+// nil in case of errors.
+func ResolveDestination(dest string) net.Addr {
+ i := strings.LastIndex(dest, ":") // important so [::1]:port works
+ if i < 0 {
+ log.Errf("Destination '%s' is not host:port format", dest)
+ return nil
+ }
+ host := dest[0:i]
+ port := dest[i+1:]
+ return Resolve(host, port)
+}
+
+// Resolve returns the TCP address of the host,port suitable for net.Dial.
+// nil in case of errors.
+func Resolve(host string, port string) net.Addr {
+ log.Debugf("Resolve() called with host=%s port=%s", host, port)
+ dest := &net.TCPAddr{}
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ log.Debugf("host %s looks like an IPv6, stripping []", host)
+ host = host[1 : len(host)-1]
+ }
+ isAddr := net.ParseIP(host)
+ var err error
+ if isAddr != nil {
+ log.Debugf("Host already an IP, will go to %s", isAddr)
+ dest.IP = isAddr
+ } else {
+ var addrs []net.IP
+ addrs, err = net.LookupIP(host)
+ if err != nil {
+ log.Errf("Unable to lookup '%s' : %v", host, err)
+ return nil
+ }
+ if len(addrs) > 1 && log.LogDebug() {
+ log.Debugf("Using only the first of the addresses for %s : %v", host, addrs)
+ }
+ log.Debugf("Will go to %s", addrs[0])
+ dest.IP = addrs[0]
+ }
+ dest.Port, err = net.LookupPort("tcp", port)
+ if err != nil {
+ log.Errf("Unable to resolve port '%s' : %v", port, err)
+ return nil
+ }
+ return dest
+}
+
+func transfer(wg *sync.WaitGroup, dst net.Conn, src net.Conn) {
+ n, oErr := io.Copy(dst, src) // keep original error for logs below
+ log.LogVf("Proxy: transferred %d bytes from %v to %v (err=%v)", n, src.RemoteAddr(), dst.RemoteAddr(), oErr)
+ sTCP, ok := src.(*net.TCPConn)
+ if ok {
+ err := sTCP.CloseRead()
+ if err != nil { // We got an eof so it's already half closed.
+ log.LogVf("Proxy: semi expected error CloseRead on src %v: %v,%v", src.RemoteAddr(), err, oErr)
+ }
+ }
+ dTCP, ok := dst.(*net.TCPConn)
+ if ok {
+ err := dTCP.CloseWrite()
+ if err != nil {
+ log.Errf("Proxy: error CloseWrite on dst %v: %v,%v", dst.RemoteAddr(), err, oErr)
+ }
+ }
+ wg.Done()
+}
+
+func handleProxyRequest(conn net.Conn, dest net.Addr) {
+ err := fmt.Errorf("nil destination")
+ var d net.Conn
+ if dest != nil {
+ d, err = net.Dial(dest.Network(), dest.String())
+ }
+ if err != nil {
+ log.Errf("Proxy: unable to connect to %v for %v : %v", dest, conn.RemoteAddr(), err)
+ _ = conn.Close()
+ return
+ }
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go transfer(&wg, d, conn)
+ transfer(&wg, conn, d)
+ wg.Wait()
+ log.LogVf("Proxy: both sides of transfer to %v for %v done", dest, conn.RemoteAddr())
+ // Not checking as we are closing/ending anyway - note: bad side effect of coverage...
+ _ = d.Close()
+ _ = conn.Close()
+}
+
+// Proxy starts a tcp proxy.
+func Proxy(port string, dest net.Addr) net.Addr {
+ listener, lAddr := Listen(fmt.Sprintf("proxy for %v", dest), port)
+ if listener == nil {
+ return nil // error already logged
+ }
+ go func() {
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ log.Critf("Proxy: error accepting: %v", err) // will this loop with error?
+ } else {
+ log.LogVf("Proxy: Accepted proxy connection from %v -> %v (for listener %v)",
+ conn.RemoteAddr(), conn.LocalAddr(), dest)
+ // TODO limit number of go request, use worker pool, etc...
+ go handleProxyRequest(conn, dest)
+ }
+ }
+ }()
+ return lAddr
+}
+
+// ProxyToDestination opens a proxy from the listenPort (or addr:port or unix domain socket path) and forwards
+// all traffic to destination (host:port)
+func ProxyToDestination(listenPort string, destination string) net.Addr {
+ return Proxy(listenPort, ResolveDestination(destination))
+}
+
+// NormalizeHostPort generates host:port string for the address or uses localhost instead of [::]
+// when the original port binding input didn't specify an address
+func NormalizeHostPort(inputPort string, addr net.Addr) string {
+ urlHostPort := addr.String()
+ if addr.Network() == UnixDomainSocket {
+ urlHostPort = fmt.Sprintf("-unix-socket=%s", urlHostPort)
+ } else {
+ if strings.HasPrefix(inputPort, ":") || !strings.Contains(inputPort, ":") {
+ urlHostPort = fmt.Sprintf("localhost:%d", addr.(*net.TCPAddr).Port)
+ }
+ }
+ return urlHostPort
+}
+
+// ValidatePayloadSize compares input size with MaxPayLoadSize. If size exceeds the MaxPayloadSize
+// size will set to MaxPayLoadSize
+func ValidatePayloadSize(size *int) {
+ if *size > MaxPayloadSize && *size > 0 {
+ log.Warnf("Requested size %d greater than max size %d, using max instead (change max using -maxpayloadsizekb)",
+ *size, MaxPayloadSize)
+ *size = MaxPayloadSize
+ } else if *size < 0 {
+ log.Warnf("Requested size %d is negative, using 0 (no additional payload) instead.", *size)
+ *size = 0
+ }
+}
+
+// GenerateRandomPayload generates a random payload with given input size
+func GenerateRandomPayload(payloadSize int) []byte {
+ ValidatePayloadSize(&payloadSize)
+ return Payload[:payloadSize]
+}
+
+// ReadFileForPayload reads the file from given input path
+func ReadFileForPayload(payloadFilePath string) ([]byte, error) {
+ data, err := ioutil.ReadFile(payloadFilePath)
+ if err != nil {
+ return nil, err
+ }
+ return data, nil
+}
+
+// GeneratePayload generates a payload with given inputs.
+// First tries filePath, then random payload, at last payload
+func GeneratePayload(payloadFilePath string, payloadSize int, payload string) []byte {
+ if len(payloadFilePath) > 0 {
+ p, err := ReadFileForPayload(payloadFilePath)
+ if err != nil {
+ log.Warnf("File read operation is failed %v", err)
+ return nil
+ }
+ return p
+ } else if payloadSize > 0 {
+ return GenerateRandomPayload(payloadSize)
+ } else {
+ return []byte(payload)
+ }
+}
+
+// GetUniqueUnixDomainPath returns a path to be used for unix domain socket.
+func GetUniqueUnixDomainPath(prefix string) string {
+ if prefix == "" {
+ prefix = "fortio-uds"
+ }
+ f, err := ioutil.TempFile(os.TempDir(), prefix)
+ if err != nil {
+ log.Errf("Unable to generate temp file with prefix %s: %v", prefix, err)
+ return "/tmp/fortio-default-uds"
+ }
+ fname := f.Name()
+ _ = f.Close()
+ // for the bind to succeed we need the file to not pre exist:
+ _ = os.Remove(fname)
+ return fname
+}
diff --git a/vendor/fortio.org/fortio/fnet/network_test.go b/vendor/fortio.org/fortio/fnet/network_test.go
new file mode 100644
index 0000000000..6486f5d85d
--- /dev/null
+++ b/vendor/fortio.org/fortio/fnet/network_test.go
@@ -0,0 +1,382 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fnet
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "strings"
+ "testing"
+
+ "bytes"
+
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/version"
+)
+
+func TestNormalizePort(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ output string
+ }{
+ {
+ "port number only",
+ "8080",
+ ":8080",
+ },
+ {
+ "IPv4 host:port",
+ "10.10.10.1:8080",
+ "10.10.10.1:8080",
+ },
+ {
+ "IPv6 [host]:port",
+ "[2001:db1::1]:8080",
+ "[2001:db1::1]:8080",
+ },
+ }
+
+ for _, tc := range tests {
+ port := NormalizePort(tc.input)
+ if port != tc.output {
+ t.Errorf("Test case %s failed to normailze port %s\n\texpected: %s\n\t actual: %s",
+ tc.name,
+ tc.input,
+ tc.output,
+ port,
+ )
+ }
+ }
+}
+
+func TestListen(t *testing.T) {
+ l, a := Listen("test listen1", "0")
+ if l == nil || a == nil {
+ t.Fatalf("Unexpected nil in Listen() %v %v", l, a)
+ }
+ if a.(*net.TCPAddr).Port == 0 {
+ t.Errorf("Unexpected 0 port after listen %+v", a)
+ }
+ _ = l.Close() // nolint: gas
+}
+
+func TestListenFailure(t *testing.T) {
+ _, a1 := Listen("test listen2", "0")
+ if a1.(*net.TCPAddr).Port == 0 {
+ t.Errorf("Unexpected 0 port after listen %+v", a1)
+ }
+ l, a := Listen("this should fail", GetPort(a1))
+ if l != nil || a != nil {
+ t.Errorf("listen that should error got %v %v instead of nil", l, a)
+ }
+}
+
+func TestResolveDestination(t *testing.T) {
+ tests := []struct {
+ name string
+ destination string
+ want string
+ }{
+ // Error cases:
+ {"missing :", "foo", ""},
+ {"using ip:bogussvc", "8.8.8.8:doesnotexisthopefully", ""},
+ {"using bogus hostname", "doesnotexist.istio.io:443", ""},
+ // Good cases:
+ {"using ip:portname", "8.8.8.8:http", "8.8.8.8:80"},
+ {"using ip:port", "8.8.8.8:12345", "8.8.8.8:12345"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := ResolveDestination(tt.destination)
+ gotStr := ""
+ if got != nil {
+ gotStr = got.String()
+ }
+ if gotStr != tt.want {
+ t.Errorf("ResolveDestination(%s) = %v, want %s", tt.destination, got, tt.want)
+ }
+ })
+ }
+}
+
+func TestResolveDestinationMultipleIps(t *testing.T) {
+ addr := ResolveDestination("www.google.com:443")
+ t.Logf("Found google addr %+v", addr)
+ if addr == nil {
+ t.Error("got nil address for google")
+ }
+}
+
+func TestProxy(t *testing.T) {
+ addr := ProxyToDestination(":0", "www.google.com:80")
+ dAddr := net.TCPAddr{Port: addr.(*net.TCPAddr).Port}
+ d, err := net.DialTCP("tcp", nil, &dAddr)
+ if err != nil {
+ t.Fatalf("can't connect to our proxy: %v", err)
+ }
+ defer d.Close()
+ data := "HEAD / HTTP/1.0\r\nUser-Agent: fortio-unit-test-" + version.Long() + "\r\n\r\n"
+ d.Write([]byte(data))
+ d.CloseWrite()
+ res := make([]byte, 4096)
+ n, err := d.Read(res)
+ if err != nil {
+ t.Errorf("read error with proxy: %v", err)
+ }
+ resStr := string(res[:n])
+ expectedStart := "HTTP/1.0 200 OK\r\n"
+ if !strings.HasPrefix(resStr, expectedStart) {
+ t.Errorf("Unexpected reply '%q', expected starting with '%q'", resStr, expectedStart)
+ }
+}
+
+func TestBadGetUniqueUnixDomainPath(t *testing.T) {
+ badPath := []byte{0x41, 0, 0x42}
+ fname := GetUniqueUnixDomainPath(string(badPath))
+ if fname != "/tmp/fortio-default-uds" {
+ t.Errorf("Got %s when expecting default/error case for bad prefix", fname)
+ }
+}
+
+func TestDefaultGetUniqueUnixDomainPath(t *testing.T) {
+ n1 := GetUniqueUnixDomainPath("")
+ n2 := GetUniqueUnixDomainPath("")
+ if n1 == n2 {
+ t.Errorf("Got %s and %s when expecting unique names", n1, n2)
+ }
+}
+
+func TestUnixDomain(t *testing.T) {
+ // Test through the proxy as well (which indirectly tests Listen)
+ fname := GetUniqueUnixDomainPath("fortio-uds-test")
+ addr := ProxyToDestination(fname, "www.google.com:80")
+ defer os.Remove(fname) // to not leak the temp socket
+ if addr == nil {
+ t.Fatalf("Nil socket in unix socket proxy listen")
+ }
+ hp := NormalizeHostPort("", addr)
+ expected := fmt.Sprintf("-unix-socket=%s", fname)
+ if hp != expected {
+ t.Errorf("Got %s, expected %s from NormalizeHostPort(%v)", hp, expected, addr)
+ }
+ dAddr := net.UnixAddr{Name: fname, Net: UnixDomainSocket}
+ d, err := net.DialUnix(UnixDomainSocket, nil, &dAddr)
+ if err != nil {
+ t.Fatalf("can't connect to our proxy using unix socket %v: %v", fname, err)
+ }
+ defer d.Close()
+ data := "HEAD / HTTP/1.0\r\nUser-Agent: fortio-unit-test-" + version.Long() + "\r\n\r\n"
+ d.Write([]byte(data))
+ d.CloseWrite()
+ res := make([]byte, 4096)
+ n, err := d.Read(res)
+ if err != nil {
+ t.Errorf("read error with proxy: %v", err)
+ }
+ resStr := string(res[:n])
+ expectedStart := "HTTP/1.0 200 OK\r\n"
+ if !strings.HasPrefix(resStr, expectedStart) {
+ t.Errorf("Unexpected reply '%q', expected starting with '%q'", resStr, expectedStart)
+ }
+
+}
+func TestProxyErrors(t *testing.T) {
+ addr := ProxyToDestination(":0", "doesnotexist.istio.io:80")
+ dAddr := net.TCPAddr{Port: addr.(*net.TCPAddr).Port}
+ d, err := net.DialTCP("tcp", nil, &dAddr)
+ if err != nil {
+ t.Fatalf("can't connect to our proxy: %v", err)
+ }
+ defer d.Close()
+ res := make([]byte, 4096)
+ n, err := d.Read(res)
+ if err == nil {
+ t.Errorf("didn't get expected error with proxy %d", n)
+ }
+ // 2nd proxy on same port should fail
+ addr2 := ProxyToDestination(GetPort(addr), "www.google.com:80")
+ if addr2 != nil {
+ t.Errorf("Second proxy on same port should have failed, got %+v", addr2)
+ }
+}
+func TestResolveIpV6(t *testing.T) {
+ addr := Resolve("[::1]", "http")
+ addrStr := addr.String()
+ expected := "[::1]:80"
+ if addrStr != expected {
+ t.Errorf("Got '%s' instead of '%s'", addrStr, expected)
+ }
+}
+
+func TestJoinHostAndPort(t *testing.T) {
+ var tests = []struct {
+ inputPort string
+ addr *net.TCPAddr
+ expected string
+ }{
+ {"8080", &net.TCPAddr{
+ IP: []byte{192, 168, 2, 3},
+ Port: 8081,
+ }, "localhost:8081"},
+ {"192.168.30.14:8081", &net.TCPAddr{
+ IP: []byte{192, 168, 30, 15},
+ Port: 8080,
+ }, "192.168.30.15:8080"},
+ {":8080",
+ &net.TCPAddr{
+ IP: []byte{0, 0, 0, 1},
+ Port: 8080,
+ },
+ "localhost:8080"},
+ {"",
+ &net.TCPAddr{
+ IP: []byte{192, 168, 30, 14},
+ Port: 9090,
+ }, "localhost:9090"},
+ {"http",
+ &net.TCPAddr{
+ IP: []byte{192, 168, 30, 14},
+ Port: 9090,
+ }, "localhost:9090"},
+ {"192.168.30.14:9090",
+ &net.TCPAddr{
+ IP: []byte{192, 168, 30, 14},
+ Port: 9090,
+ }, "192.168.30.14:9090"},
+ }
+ for _, test := range tests {
+ urlHostPort := NormalizeHostPort(test.inputPort, test.addr)
+ if urlHostPort != test.expected {
+ t.Errorf("%s is received but %s was expected", urlHostPort, test.expected)
+ }
+ }
+}
+
+func TestChangeMaxPayloadSize(t *testing.T) {
+ var tests = []struct {
+ input int
+ expected int
+ }{
+ // negative test cases
+ {-1, 0},
+ // lesser than current default
+ {0, 0},
+ {64, 64},
+ // Greater than current default
+ {987 * 1024, 987 * 1024},
+ }
+ for _, tst := range tests {
+ ChangeMaxPayloadSize(tst.input)
+ actual := len(Payload)
+ if len(Payload) != tst.expected {
+ t.Errorf("Got %d, expected %d for ChangeMaxPayloadSize(%d)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestValidatePayloadSize(t *testing.T) {
+ ChangeMaxPayloadSize(256 * 1024)
+ var tests = []struct {
+ input int
+ expected int
+ }{
+ {257 * 1024, MaxPayloadSize},
+ {10, 10},
+ {0, 0},
+ {-1, 0},
+ }
+ for _, test := range tests {
+ size := test.input
+ ValidatePayloadSize(&size)
+ if size != test.expected {
+ t.Errorf("Got %d, expected %d for ValidatePayloadSize(%d)", size, test.expected, test.input)
+ }
+ }
+}
+
+func TestGenerateRandomPayload(t *testing.T) {
+ ChangeMaxPayloadSize(256 * 1024)
+ var tests = []struct {
+ input int
+ expected int
+ }{
+ {257 * 1024, MaxPayloadSize},
+ {10, 10},
+ {0, 0},
+ {-1, 0},
+ }
+ for _, test := range tests {
+ text := GenerateRandomPayload(test.input)
+ if len(text) != test.expected {
+ t.Errorf("Got %d, expected %d for GenerateRandomPayload(%d) payload size", len(text), test.expected, test.input)
+ }
+ }
+}
+
+func TestReadFileForPayload(t *testing.T) {
+ var tests = []struct {
+ payloadFile string
+ expectedText []byte
+ }{
+ {payloadFile: "../.testdata/payloadTest1.txt", expectedText: []byte("{\"test\":\"test\"}")},
+ {payloadFile: "", expectedText: nil},
+ }
+
+ for _, test := range tests {
+ data, err := ReadFileForPayload(test.payloadFile)
+ if err != nil && len(test.expectedText) > 0 {
+ t.Errorf("Error should not be happened for ReadFileForPayload")
+ }
+ if !bytes.Equal(data, test.expectedText) {
+ t.Errorf("Got %s, expected %s for ReadFileForPayload()", string(data), string(test.expectedText))
+ }
+ }
+}
+
+func TestGeneratePayload(t *testing.T) {
+ var tests = []struct {
+ payloadFile string
+ payloadSize int
+ payload string
+ expectedResLen int
+ }{
+ {payloadFile: "../.testdata/payloadTest1.txt", payloadSize: 123, payload: "",
+ expectedResLen: len("{\"test\":\"test\"}")},
+ {payloadFile: "nottestmock", payloadSize: 0, payload: "{\"test\":\"test1\"}",
+ expectedResLen: 0},
+ {payloadFile: "", payloadSize: 123, payload: "{\"test\":\"test1\"}",
+ expectedResLen: 123},
+ {payloadFile: "", payloadSize: 0, payload: "{\"test\":\"test1\"}",
+ expectedResLen: len("{\"test\":\"test1\"}")},
+ {payloadFile: "", payloadSize: 0, payload: "",
+ expectedResLen: 0},
+ }
+
+ for _, test := range tests {
+ payload := GeneratePayload(test.payloadFile, test.payloadSize, test.payload)
+ if len(payload) != test.expectedResLen {
+ t.Errorf("Got %d, expected %d for GeneratePayload() as payload length", len(payload),
+ test.expectedResLen)
+ }
+ }
+}
+
+// --- max logging for tests
+
+func init() {
+ log.SetLogLevel(log.Debug)
+}
diff --git a/vendor/fortio.org/fortio/fortio_main.go b/vendor/fortio.org/fortio/fortio_main.go
new file mode 100644
index 0000000000..f32b0f2dcd
--- /dev/null
+++ b/vendor/fortio.org/fortio/fortio_main.go
@@ -0,0 +1,378 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+// Do not add any external dependencies we want to keep fortio minimal.
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "runtime"
+ "strings"
+ "time"
+
+ "fortio.org/fortio/bincommon"
+ "fortio.org/fortio/fnet"
+
+ "fortio.org/fortio/fgrpc"
+ "fortio.org/fortio/fhttp"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/periodic"
+ "fortio.org/fortio/stats"
+ "fortio.org/fortio/ui"
+ "fortio.org/fortio/version"
+)
+
+// -- Support for multiple proxies (-P) flags on cmd line:
+type proxiesFlagList struct {
+}
+
+func (f *proxiesFlagList) String() string {
+ return ""
+}
+func (f *proxiesFlagList) Set(value string) error {
+ proxies = append(proxies, value)
+ return nil
+}
+
+// -- end of functions for -P support
+
+// Usage to a writer
+func usage(w io.Writer, msgs ...interface{}) {
+ _, _ = fmt.Fprintf(w, "Φορτίο %s usage:\n\t%s command [flags] target\n%s\n%s\n%s\n%s\n",
+ version.Short(),
+ os.Args[0],
+ "where command is one of: load (load testing), server (starts grpc ping and",
+ "http echo/ui/redirect/proxy servers), grpcping (grpc client), report (report",
+ "only UI server), redirect (redirect only server), or curl (single URL debug).",
+ "where target is a url (http load tests) or host:port (grpc health test).")
+ bincommon.FlagsUsage(w, msgs...)
+}
+
+// Prints usage and error messages with StdErr writer
+func usageErr(msgs ...interface{}) {
+ usage(os.Stderr, msgs...)
+ os.Exit(1)
+}
+
+// Attention: every flag that is common to http client goes to bincommon/
+// for sharing between fortio and fcurl binaries
+
+const (
+ disabled = "disabled"
+)
+
+var (
+ defaults = &periodic.DefaultRunnerOptions
+ // Very small default so people just trying with random URLs don't affect the target
+ qpsFlag = flag.Float64("qps", defaults.QPS, "Queries Per Seconds or 0 for no wait/max qps")
+ numThreadsFlag = flag.Int("c", defaults.NumThreads, "Number of connections/goroutine/threads")
+ durationFlag = flag.Duration("t", defaults.Duration, "How long to run the test or 0 to run until ^C")
+ percentilesFlag = flag.String("p", "50,75,90,99,99.9", "List of pXX to calculate")
+ resolutionFlag = flag.Float64("r", defaults.Resolution, "Resolution of the histogram lowest buckets in seconds")
+ goMaxProcsFlag = flag.Int("gomaxprocs", 0, "Setting for runtime.GOMAXPROCS, <1 doesn't change the default")
+ profileFlag = flag.String("profile", "", "write .cpu and .mem profiles to `file`")
+ grpcFlag = flag.Bool("grpc", false, "Use GRPC (health check by default, add -ping for ping) for load testing")
+ httpsInsecureFlag = flag.Bool("https-insecure", false, "Long form of the -k flag")
+ certFlag = flag.String("cert", "", "`Path` to the certificate file to be used for GRPC server TLS")
+ keyFlag = flag.String("key", "", "`Path` to the key file used for GRPC server TLS")
+ caCertFlag = flag.String("cacert", "",
+ "`Path` to a custom CA certificate file to be used for the GRPC client TLS, "+
+ "if empty, use https:// prefix for standard internet CAs TLS")
+ echoPortFlag = flag.String("http-port", "8080",
+ "http echo server port. Can be in the form of host:port, ip:port, port or /unix/domain/path.")
+ grpcPortFlag = flag.String("grpc-port", fnet.DefaultGRPCPort,
+ "grpc server port. Can be in the form of host:port, ip:port or port or /unix/domain/path or \""+disabled+
+ "\" to not start the grpc server.")
+ echoDbgPathFlag = flag.String("echo-debug-path", "/debug",
+ "http echo server URI for debug, empty turns off that part (more secure)")
+ jsonFlag = flag.String("json", "",
+ "Json output to provided file `path` or '-' for stdout (empty = no json output, unless -a is used)")
+ uiPathFlag = flag.String("ui-path", "/fortio/", "http server URI for UI, empty turns off that part (more secure)")
+ curlFlag = flag.Bool("curl", false, "Just fetch the content once")
+ labelsFlag = flag.String("labels", "",
+ "Additional config data/labels to add to the resulting JSON, defaults to target URL and hostname")
+ staticDirFlag = flag.String("static-dir", "", "Absolute `path` to the dir containing the static files dir")
+ dataDirFlag = flag.String("data-dir", defaultDataDir, "`Directory` where JSON results are stored/read")
+ proxiesFlags proxiesFlagList
+ proxies = make([]string, 0)
+
+ defaultDataDir = "."
+
+ allowInitialErrorsFlag = flag.Bool("allow-initial-errors", false, "Allow and don't abort on initial warmup errors")
+ abortOnFlag = flag.Int("abort-on", 0, "Http code that if encountered aborts the run. e.g. 503 or -1 for socket errors.")
+ autoSaveFlag = flag.Bool("a", false, "Automatically save JSON result with filename based on labels & timestamp")
+ redirectFlag = flag.String("redirect-port", "8081", "Redirect all incoming traffic to https URL"+
+ " (need ingress to work properly). Can be in the form of host:port, ip:port, port or \""+disabled+"\" to disable the feature.")
+ exactlyFlag = flag.Int64("n", 0,
+ "Run for exactly this number of calls instead of duration. Default (0) is to use duration (-t). "+
+ "Default is 1 when used as grpc ping count.")
+ syncFlag = flag.String("sync", "", "index.tsv or s3/gcs bucket xml URL to fetch at startup for server modes.")
+ syncIntervalFlag = flag.Duration("sync-interval", 0, "Refresh the url every given interval (default, no refresh)")
+
+ baseURLFlag = flag.String("base-url", "",
+ "base URL used as prefix for data/index.tsv generation. (when empty, the url from the first request is used)")
+ newMaxPayloadSizeKb = flag.Int("maxpayloadsizekb", fnet.MaxPayloadSize/1024,
+ "MaxPayloadSize is the maximum size of payload to be generated by the EchoHandler size= argument. In Kbytes.")
+
+ // GRPC related flags
+ // To get most debugging/tracing:
+ // GODEBUG="http2debug=2" GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info fortio grpcping -loglevel debug
+ doHealthFlag = flag.Bool("health", false, "grpc ping client mode: use health instead of ping")
+ doPingLoadFlag = flag.Bool("ping", false, "grpc load test: use ping instead of health")
+ healthSvcFlag = flag.String("healthservice", "", "which service string to pass to health check")
+ pingDelayFlag = flag.Duration("grpc-ping-delay", 0, "grpc ping delay in response")
+ streamsFlag = flag.Int("s", 1, "Number of streams per grpc connection")
+
+ maxStreamsFlag = flag.Uint("grpc-max-streams", 0,
+ "MaxConcurrentStreams for the grpc server. Default (0) is to leave the option unset.")
+)
+
+func main() {
+ flag.Var(&proxiesFlags, "P", "Proxies to run, e.g -P \"localport1 dest_host1:dest_port1\" -P \"[::1]:0 www.google.com:443\" ...")
+ bincommon.SharedMain(usage)
+ if len(os.Args) < 2 {
+ usageErr("Error: need at least 1 command parameter")
+ }
+ command := os.Args[1]
+ os.Args = append([]string{os.Args[0]}, os.Args[2:]...)
+ flag.Parse()
+ fnet.ChangeMaxPayloadSize(*newMaxPayloadSizeKb * 1024)
+ if *bincommon.QuietFlag {
+ log.SetLogLevelQuiet(log.Error)
+ }
+ percList, err := stats.ParsePercentiles(*percentilesFlag)
+ if err != nil {
+ usageErr("Unable to extract percentiles from -p: ", err)
+ }
+ baseURL := strings.Trim(*baseURLFlag, " \t\n\r/") // remove trailing slash and other whitespace
+ sync := strings.TrimSpace(*syncFlag)
+ if sync != "" {
+ if !ui.Sync(os.Stdout, sync, *dataDirFlag) {
+ os.Exit(1)
+ }
+ }
+ isServer := false
+ switch command {
+ case "curl":
+ fortioLoad(true, nil)
+ case "load":
+ fortioLoad(*curlFlag, percList)
+ case "redirect":
+ isServer = true
+ fhttp.RedirectToHTTPS(*redirectFlag)
+ case "report":
+ isServer = true
+ if *redirectFlag != disabled {
+ fhttp.RedirectToHTTPS(*redirectFlag)
+ }
+ if !ui.Report(baseURL, *echoPortFlag, *staticDirFlag, *dataDirFlag) {
+ os.Exit(1) // error already logged
+ }
+ case "server":
+ isServer = true
+ if *grpcPortFlag != disabled {
+ fgrpc.PingServer(*grpcPortFlag, *certFlag, *keyFlag, fgrpc.DefaultHealthServiceName, uint32(*maxStreamsFlag))
+ }
+ if *redirectFlag != disabled {
+ fhttp.RedirectToHTTPS(*redirectFlag)
+ }
+ if !ui.Serve(baseURL, *echoPortFlag, *echoDbgPathFlag, *uiPathFlag, *staticDirFlag, *dataDirFlag, percList) {
+ os.Exit(1) // error already logged
+ }
+ for _, proxy := range proxies {
+ s := strings.SplitN(proxy, " ", 2)
+ if len(s) != 2 {
+ log.Errf("Invalid syntax for proxy \"%s\", should be \"localAddr destHost:destPort\"", proxy)
+ }
+ fnet.ProxyToDestination(s[0], s[1])
+ }
+ case "grpcping":
+ grpcClient()
+ default:
+ usageErr("Error: unknown command ", command)
+ }
+ if isServer {
+ // To get a start time log/timestamp in the logs
+ log.Infof("All fortio %s servers started!", version.Long())
+ d := *syncIntervalFlag
+ if sync != "" && d > 0 {
+ log.Infof("Will re-sync data dir every %s", d)
+ ticker := time.NewTicker(d)
+ defer ticker.Stop()
+ for range ticker.C {
+ ui.Sync(os.Stdout, sync, *dataDirFlag)
+ }
+ } else {
+ select {}
+ }
+ }
+}
+
+func fortioLoad(justCurl bool, percList []float64) {
+ if len(flag.Args()) != 1 {
+ usageErr("Error: fortio load/curl needs a url or destination")
+ }
+ httpOpts := bincommon.SharedHTTPOptions()
+ if *httpsInsecureFlag {
+ httpOpts.Insecure = true
+ }
+ if justCurl {
+ bincommon.FetchURL(httpOpts)
+ return
+ }
+ url := httpOpts.URL
+ prevGoMaxProcs := runtime.GOMAXPROCS(*goMaxProcsFlag)
+ out := os.Stderr
+ qps := *qpsFlag // TODO possibly use translated <=0 to "max" from results/options normalization in periodic/
+ _, _ = fmt.Fprintf(out, "Fortio %s running at %g queries per second, %d->%d procs",
+ version.Short(), qps, prevGoMaxProcs, runtime.GOMAXPROCS(0))
+ if *exactlyFlag > 0 {
+ _, _ = fmt.Fprintf(out, ", for %d calls: %s\n", *exactlyFlag, url)
+ } else {
+ if *durationFlag <= 0 {
+ // Infinite mode is determined by having a negative duration value
+ *durationFlag = -1
+ _, _ = fmt.Fprintf(out, ", until interrupted: %s\n", url)
+ } else {
+ _, _ = fmt.Fprintf(out, ", for %v: %s\n", *durationFlag, url)
+ }
+ }
+ if qps <= 0 {
+ qps = -1 // 0==unitialized struct == default duration, -1 (0 for flag) is max
+ }
+ labels := *labelsFlag
+ if labels == "" {
+ hname, _ := os.Hostname()
+ shortURL := url
+ for _, p := range []string{"https://", "http://"} {
+ if strings.HasPrefix(url, p) {
+ shortURL = url[len(p):]
+ break
+ }
+ }
+ labels = shortURL + " , " + strings.SplitN(hname, ".", 2)[0]
+ log.LogVf("Generated Labels: %s", labels)
+ }
+ ro := periodic.RunnerOptions{
+ QPS: qps,
+ Duration: *durationFlag,
+ NumThreads: *numThreadsFlag,
+ Percentiles: percList,
+ Resolution: *resolutionFlag,
+ Out: out,
+ Labels: labels,
+ Exactly: *exactlyFlag,
+ }
+ var res periodic.HasRunnerResult
+ var err error
+ if *grpcFlag {
+ o := fgrpc.GRPCRunnerOptions{
+ RunnerOptions: ro,
+ Destination: url,
+ CACert: *caCertFlag,
+ Service: *healthSvcFlag,
+ Streams: *streamsFlag,
+ AllowInitialErrors: *allowInitialErrorsFlag,
+ Payload: httpOpts.PayloadString(),
+ Delay: *pingDelayFlag,
+ UsePing: *doPingLoadFlag,
+ UnixDomainSocket: httpOpts.UnixDomainSocket,
+ }
+ res, err = fgrpc.RunGRPCTest(&o)
+ } else {
+ o := fhttp.HTTPRunnerOptions{
+ HTTPOptions: *httpOpts,
+ RunnerOptions: ro,
+ Profiler: *profileFlag,
+ AllowInitialErrors: *allowInitialErrorsFlag,
+ AbortOn: *abortOnFlag,
+ }
+ res, err = fhttp.RunHTTPTest(&o)
+ }
+ if err != nil {
+ _, _ = fmt.Fprintf(out, "Aborting because %v\n", err)
+ os.Exit(1)
+ }
+ rr := res.Result()
+ warmup := *numThreadsFlag
+ if ro.Exactly > 0 {
+ warmup = 0
+ }
+ _, _ = fmt.Fprintf(out, "All done %d calls (plus %d warmup) %.3f ms avg, %.1f qps\n",
+ rr.DurationHistogram.Count,
+ warmup,
+ 1000.*rr.DurationHistogram.Avg,
+ rr.ActualQPS)
+ jsonFileName := *jsonFlag
+ if *autoSaveFlag || len(jsonFileName) > 0 {
+ var j []byte
+ j, err = json.MarshalIndent(res, "", " ")
+ if err != nil {
+ log.Fatalf("Unable to json serialize result: %v", err)
+ }
+ var f *os.File
+ if jsonFileName == "-" {
+ f = os.Stdout
+ jsonFileName = "stdout"
+ } else {
+ if len(jsonFileName) == 0 {
+ jsonFileName = path.Join(*dataDirFlag, rr.ID()+".json")
+ }
+ f, err = os.Create(jsonFileName)
+ if err != nil {
+ log.Fatalf("Unable to create %s: %v", jsonFileName, err)
+ }
+ }
+ n, err := f.Write(append(j, '\n'))
+ if err != nil {
+ log.Fatalf("Unable to write json to %s: %v", jsonFileName, err)
+ }
+ if f != os.Stdout {
+ err := f.Close()
+ if err != nil {
+ log.Fatalf("Close error for %s: %v", jsonFileName, err)
+ }
+ }
+ _, _ = fmt.Fprintf(out, "Successfully wrote %d bytes of Json data to %s\n", n, jsonFileName)
+ }
+}
+
+func grpcClient() {
+ if len(flag.Args()) != 1 {
+ usageErr("Error: fortio grpcping needs host argument in the form of host, host:port or ip:port")
+ }
+ host := flag.Arg(0)
+ count := int(*exactlyFlag)
+ if count <= 0 {
+ count = 1
+ }
+ cert := *caCertFlag
+ var err error
+ if *doHealthFlag {
+ _, err = fgrpc.GrpcHealthCheck(host, cert, *healthSvcFlag, count)
+ } else {
+ httpOpts := bincommon.SharedHTTPOptions()
+ _, err = fgrpc.PingClientCall(host, cert, count, httpOpts.PayloadString(), *pingDelayFlag)
+ }
+ if err != nil {
+ // already logged
+ os.Exit(1)
+ }
+}
diff --git a/vendor/fortio.org/fortio/histogram/main.go b/vendor/fortio.org/fortio/histogram/main.go
new file mode 100644
index 0000000000..ce6abde898
--- /dev/null
+++ b/vendor/fortio.org/fortio/histogram/main.go
@@ -0,0 +1,68 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// histogram : reads values from stdin and outputs an histogram
+
+package main
+
+import (
+ "bufio"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/stats"
+)
+
+func main() {
+ var (
+ offsetFlag = flag.Float64("offset", 0.0, "Offset for the data")
+ dividerFlag = flag.Float64("divider", 1, "Divider/scaling for the data")
+ percentilesFlag = flag.String("p", "50,75,99,99.9", "List of pXX to calculate")
+ jsonFlag = flag.Bool("json", false, "Json output")
+ )
+ flag.Parse()
+ h := stats.NewHistogram(*offsetFlag, *dividerFlag)
+ percList, err := stats.ParsePercentiles(*percentilesFlag)
+ if err != nil {
+ log.Fatalf("Unable to extract percentiles from -p: %v", err)
+ }
+
+ scanner := bufio.NewScanner(os.Stdin)
+ linenum := 1
+ for scanner.Scan() {
+ line := scanner.Text()
+ v, err := strconv.ParseFloat(line, 64)
+ if err != nil {
+ log.Fatalf("Can't parse line %d: %v", linenum, err)
+ }
+ h.Record(v)
+ linenum++
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("Err reading standard input %v", err)
+ }
+ if *jsonFlag {
+ b, err := json.MarshalIndent(h.Export().CalcPercentiles(percList), "", " ")
+ if err != nil {
+ log.Fatalf("Unable to create Json: %v", err)
+ }
+ fmt.Print(string(b))
+ } else {
+ h.Print(os.Stdout, "Histogram", percList)
+ }
+}
diff --git a/vendor/fortio.org/fortio/log/logger.go b/vendor/fortio.org/fortio/log/logger.go
new file mode 100644
index 0000000000..c0844524b9
--- /dev/null
+++ b/vendor/fortio.org/fortio/log/logger.go
@@ -0,0 +1,219 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log // import "fortio.org/fortio/log"
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "runtime"
+ "strings"
+)
+
+// Level is the level of logging (0 Debug -> 6 Fatal).
+type Level int
+
+// Log levels. Go can't have variable and function of the same name so we keep
+// medium length (Dbg,Info,Warn,Err,Crit,Fatal) names for the functions.
+const (
+ Debug Level = iota
+ Verbose
+ Info
+ Warning
+ Error
+ Critical
+ Fatal
+)
+
+var (
+ level = Info // default is Info and up
+ levelToStrA []string
+ levelToStrM map[string]Level
+ // LogPrefix is a prefix to include in each log line.
+ LogPrefix = flag.String("logprefix", "> ", "Prefix to log lines before logged messages")
+ // LogFileAndLine determines if the log lines will contain caller file name and line number.
+ LogFileAndLine = flag.Bool("logcaller", true, "Logs filename and line number of callers to log")
+)
+
+func init() {
+ levelToStrA = []string{
+ "Debug",
+ "Verbose",
+ "Info",
+ "Warning",
+ "Error",
+ "Critical",
+ "Fatal",
+ }
+ levelToStrM = make(map[string]Level, 2*len(levelToStrA))
+ for l, name := range levelToStrA {
+ // Allow both -loglevel Verbose and -loglevel verbose ...
+ levelToStrM[name] = Level(l)
+ levelToStrM[strings.ToLower(name)] = Level(l)
+ }
+ flag.Var(&level, "loglevel", fmt.Sprintf("loglevel, one of %v", levelToStrA))
+ log.SetFlags(log.Ltime)
+}
+
+// String returns the string representation of the level.
+// Needed for flag Var interface.
+func (l *Level) String() string {
+ return (*l).ToString()
+}
+
+// ToString returns the string representation of the level.
+// (this can't be the same name as the pointer receiver version)
+func (l Level) ToString() string {
+ return levelToStrA[l]
+}
+
+// Set is called by the flags.
+func (l *Level) Set(str string) error {
+ var lvl Level
+ var ok bool
+ if lvl, ok = levelToStrM[str]; !ok {
+ // flag processing already logs the value
+ return fmt.Errorf("should be one of %v", levelToStrA)
+ }
+ SetLogLevel(lvl)
+ return nil
+}
+
+// SetLogLevel sets the log level and returns the previous one.
+func SetLogLevel(lvl Level) Level {
+ return setLogLevel(lvl, true)
+}
+
+// SetLogLevelQuiet sets the log level and returns the previous one but does
+// not log the change of level itself.
+func SetLogLevelQuiet(lvl Level) Level {
+ return setLogLevel(lvl, false)
+}
+
+// setLogLevel sets the log level and returns the previous one.
+// if logChange is true the level change is logged.
+func setLogLevel(lvl Level, logChange bool) Level {
+ prev := level
+ if lvl < Debug {
+ log.Printf("SetLogLevel called with level %d lower than Debug!", lvl)
+ return -1
+ }
+ if lvl > Critical {
+ log.Printf("SetLogLevel called with level %d higher than Critical!", lvl)
+ return -1
+ }
+ if lvl != prev {
+ if logChange {
+ logPrintf(Info, "Log level is now %d %s (was %d %s)\n", lvl, lvl.ToString(), prev, prev.ToString())
+ }
+ level = lvl
+ }
+ return prev
+}
+
+// GetLogLevel returns the currently configured LogLevel.
+func GetLogLevel() Level {
+ return level
+}
+
+// Log returns true if a given level is currently logged.
+func Log(lvl Level) bool {
+ return lvl >= level
+}
+
+// LevelByName returns the LogLevel by its name.
+func LevelByName(str string) Level {
+ return levelToStrM[str]
+}
+
+// Logf logs with format at the given level.
+// 2 level of calls so it's always same depth for extracting caller file/line
+func Logf(lvl Level, format string, rest ...interface{}) {
+ logPrintf(lvl, format, rest...)
+}
+
+func logPrintf(lvl Level, format string, rest ...interface{}) {
+ if !Log(lvl) {
+ return
+ }
+ if *LogFileAndLine {
+ _, file, line, _ := runtime.Caller(2)
+ file = file[strings.LastIndex(file, "/")+1:]
+ log.Print(levelToStrA[lvl][0:1], " ", file, ":", line, *LogPrefix, fmt.Sprintf(format, rest...))
+ } else {
+ log.Print(levelToStrA[lvl][0:1], " ", *LogPrefix, fmt.Sprintf(format, rest...))
+ }
+ if lvl == Fatal {
+ panic("aborting...")
+ }
+}
+
+// SetOutput sets the output to a different writer (forwards to system logger).
+func SetOutput(w io.Writer) {
+ log.SetOutput(w)
+}
+
+// SetFlags forwards flags to the system logger.
+func SetFlags(f int) {
+ log.SetFlags(f)
+}
+
+// -- would be nice to be able to create those in a loop instead of copypasta:
+
+// Debugf logs if Debug level is on.
+func Debugf(format string, rest ...interface{}) {
+ logPrintf(Debug, format, rest...)
+}
+
+// LogVf logs if Verbose level is on.
+func LogVf(format string, rest ...interface{}) { //nolint: golint
+ logPrintf(Verbose, format, rest...)
+}
+
+// Infof logs if Info level is on.
+func Infof(format string, rest ...interface{}) {
+ logPrintf(Info, format, rest...)
+}
+
+// Warnf logs if Warning level is on.
+func Warnf(format string, rest ...interface{}) {
+ logPrintf(Warning, format, rest...)
+}
+
+// Errf logs if Warning level is on.
+func Errf(format string, rest ...interface{}) {
+ logPrintf(Error, format, rest...)
+}
+
+// Critf logs if Warning level is on.
+func Critf(format string, rest ...interface{}) {
+ logPrintf(Critical, format, rest...)
+}
+
+// Fatalf logs if Warning level is on.
+func Fatalf(format string, rest ...interface{}) {
+ logPrintf(Fatal, format, rest...)
+}
+
+// LogDebug shortcut for fortio.Log(fortio.Debug)
+func LogDebug() bool { //nolint: golint
+ return Log(Debug)
+}
+
+// LogVerbose shortcut for fortio.Log(fortio.Verbose)
+func LogVerbose() bool { //nolint: golint
+ return Log(Verbose)
+}
diff --git a/vendor/fortio.org/fortio/log/logger_test.go b/vendor/fortio.org/fortio/log/logger_test.go
new file mode 100644
index 0000000000..e1c49d3e5c
--- /dev/null
+++ b/vendor/fortio.org/fortio/log/logger_test.go
@@ -0,0 +1,135 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log // import "fortio.org/fortio/log"
+
+import (
+ "bufio"
+ "bytes"
+ "log"
+ "testing"
+)
+
+// leave this test first as it relies on line number not changing
+func TestLoggerFilenameLine(t *testing.T) {
+ SetLogLevel(Debug) // make sure it's already debug when we capture
+ on := true
+ LogFileAndLine = &on
+ *LogPrefix = "-prefix-"
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ SetOutput(w)
+ SetFlags(0)
+ SetLogLevel(Debug)
+ if LogDebug() {
+ Debugf("test") // line 36
+ }
+ w.Flush()
+ actual := b.String()
+ expected := "D logger_test.go:36-prefix-test\n"
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\nvs:\n%s\n", actual, expected)
+ }
+}
+
+func TestSetLevel(t *testing.T) {
+ prev := SetLogLevel(Info)
+ err := prev.Set("debug")
+ if err != nil {
+ t.Errorf("unexpected error for valid level %v", err)
+ }
+ prev = SetLogLevel(Info)
+ if prev != Debug {
+ t.Errorf("unexpected level after setting debug %v", prev)
+ }
+ err = prev.Set("bogus")
+ if err == nil {
+ t.Errorf("Didn't get an error setting bogus level")
+ }
+}
+
+func TestLogger1(t *testing.T) {
+ // Setup
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ SetLogLevel(Info) // reset from other tests
+ *LogFileAndLine = false
+ *LogPrefix = ""
+ log.SetOutput(w)
+ log.SetFlags(0)
+ // Start of the actual test
+ SetLogLevel(LevelByName("Verbose"))
+ expected := "I Log level is now 1 Verbose (was 2 Info)\n"
+ i := 0
+ if LogVerbose() {
+ LogVf("test Va %d", i) // Should show
+ }
+ i++
+ expected += "V test Va 0\n"
+ Warnf("test Wa %d", i) // Should show
+ i++
+ expected += "W test Wa 1\n"
+ SetLogLevelQuiet(Debug) // no additional logging about level change
+ prevLevel := SetLogLevel(LevelByName("error")) // works with lowercase too
+ expected += "I Log level is now 4 Error (was 0 Debug)\n"
+ LogVf("test Vb %d", i) // Should not show
+ Infof("test info when level is error %d", i) // Should not show
+ i++
+ Warnf("test Wb %d", i) // Should not show
+ i++
+ Errf("test E %d", i) // Should show
+ i++
+ expected += "E test E 4\n"
+ // test the rest of the api
+ Logf(LevelByName("Critical"), "test %d level str %s, cur %s", i, prevLevel.String(), GetLogLevel().ToString())
+ expected += "C test 5 level str Debug, cur Error\n"
+ i++
+ SetLogLevel(Debug) // should be fine and invisible change
+ SetLogLevel(Debug - 1)
+ expected += "SetLogLevel called with level -1 lower than Debug!\n"
+ SetLogLevel(Fatal) // Hiding critical level is not allowed
+ expected += "SetLogLevel called with level 6 higher than Critical!\n"
+ SetLogLevel(Critical) // should be fine
+ expected += "I Log level is now 5 Critical (was 0 Debug)\n"
+ Critf("testing crit %d", i) // should show
+ expected += "C testing crit 6\n"
+ w.Flush() // nolint: errcheck
+ actual := b.String()
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\nvs:\n%s\n", actual, expected)
+ }
+}
+
+func TestLogFatal(t *testing.T) {
+ defer func() {
+ if r := recover(); r == nil {
+ t.Errorf("expected a panic from log.Fatalf, didn't get one")
+ }
+ }()
+ Fatalf("test of log fatal")
+}
+
+func BenchmarkLogDirect1(b *testing.B) {
+ level = Error
+ for n := 0; n < b.N; n++ {
+ Debugf("foo bar %d", n)
+ }
+}
+
+func BenchmarkLogDirect2(b *testing.B) {
+ level = Error
+ for n := 0; n < b.N; n++ {
+ Logf(Debug, "foo bar %d", n)
+ }
+}
diff --git a/vendor/fortio.org/fortio/periodic/periodic.go b/vendor/fortio.org/fortio/periodic/periodic.go
new file mode 100644
index 0000000000..4c55b1f525
--- /dev/null
+++ b/vendor/fortio.org/fortio/periodic/periodic.go
@@ -0,0 +1,577 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package periodic for fortio (from greek for load) is a set of utilities to
+// run a given task at a target rate (qps) and gather statistics - for instance
+// http requests.
+//
+// The main executable using the library is fortio but there
+// is also ../histogram to use the stats from the command line and ../echosrv
+// as a very light http server that can be used to test proxies etc like
+// the Istio components.
+package periodic // import "fortio.org/fortio/periodic"
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "runtime"
+ "sync"
+ "time"
+
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/stats"
+ "fortio.org/fortio/version"
+)
+
+// DefaultRunnerOptions are the default values for options (do not mutate!).
+// This is only useful for initializing flag default values.
+// You do not need to use this directly, you can pass a newly created
+// RunnerOptions and 0 valued fields will be reset to these defaults.
+var DefaultRunnerOptions = RunnerOptions{
+ QPS: 8,
+ Duration: 5 * time.Second,
+ NumThreads: 4,
+ Percentiles: []float64{90.0},
+ Resolution: 0.001, // milliseconds
+}
+
+// Runnable are the function to run periodically.
+type Runnable interface {
+ Run(tid int)
+}
+
+// MakeRunners creates an array of NumThreads identical Runnable instances.
+// (for the (rare/test) cases where there is no unique state needed)
+func (r *RunnerOptions) MakeRunners(rr Runnable) {
+ log.Infof("Making %d clone of %+v", r.NumThreads, rr)
+ if len(r.Runners) < r.NumThreads {
+ log.Infof("Resizing runners from %d to %d", len(r.Runners), r.NumThreads)
+ r.Runners = make([]Runnable, r.NumThreads)
+ }
+ for i := 0; i < r.NumThreads; i++ {
+ r.Runners[i] = rr
+ }
+}
+
+// ReleaseRunners clear the runners state.
+func (r *RunnerOptions) ReleaseRunners() {
+ for idx := range r.Runners {
+ r.Runners[idx] = nil
+ }
+}
+
+// Aborter is the object controlling Abort() of the runs.
+type Aborter struct {
+ sync.Mutex
+ StopChan chan struct{}
+}
+
+// Abort signals all the go routine of this run to stop.
+// Implemented by closing the shared channel. The lock is to make sure
+// we close it exactly once to avoid go panic.
+func (a *Aborter) Abort() {
+ a.Lock()
+ if a.StopChan != nil {
+ log.LogVf("Closing %v", a.StopChan)
+ close(a.StopChan)
+ a.StopChan = nil
+ }
+ a.Unlock()
+}
+
+// NewAborter makes a new Aborter and initialize its StopChan.
+// The pointer should be shared. The structure is NoCopy.
+func NewAborter() *Aborter {
+ return &Aborter{StopChan: make(chan struct{}, 1)}
+}
+
+// RunnerOptions are the parameters to the PeriodicRunner.
+type RunnerOptions struct {
+ // Type of run (to be copied into results)
+ RunType string
+ // Array of objects to run in each thread (use MakeRunners() to clone the same one)
+ Runners []Runnable
+ // At which (target) rate to run the Runners across NumThreads.
+ QPS float64
+ // How long to run the test for. Unless Exactly is specified.
+ Duration time.Duration
+ // Note that this actually maps to gorountines and not actual threads
+ // but threads seems like a more familiar name to use for non go users
+ // and in a benchmarking context
+ NumThreads int
+ Percentiles []float64
+ Resolution float64
+ // Where to write the textual version of the results, defaults to stdout
+ Out io.Writer
+ // Extra data to be copied back to the results (to be saved/JSON serialized)
+ Labels string
+ // Aborter to interrupt a run. Will be created if not set/left nil. Or you
+ // can pass your own. It is very important this is a pointer and not a field
+ // as RunnerOptions themselves get copied while the channel and lock must
+ // stay unique (per run).
+ Stop *Aborter
+ // Mode where an exact number of iterations is requested. Default (0) is
+ // to not use that mode. If specified Duration is not used.
+ Exactly int64
+}
+
+// RunnerResults encapsulates the actual QPS observed and duration histogram.
+type RunnerResults struct {
+ RunType string
+ Labels string
+ StartTime time.Time
+ RequestedQPS string
+ RequestedDuration string // String version of the requested duration or exact count
+ ActualQPS float64
+ ActualDuration time.Duration
+ NumThreads int
+ Version string
+ DurationHistogram *stats.HistogramData
+ Exactly int64 // Echo back the requested count
+}
+
+// HasRunnerResult is the interface implictly implemented by HTTPRunnerResults
+// and GrpcRunnerResults so the common results can ge extracted irrespective
+// of the type.
+type HasRunnerResult interface {
+ Result() *RunnerResults
+}
+
+// Result returns the common RunnerResults.
+func (r *RunnerResults) Result() *RunnerResults {
+ return r
+}
+
+// PeriodicRunner let's you exercise the Function at the given QPS and collect
+// statistics and histogram about the run.
+type PeriodicRunner interface { // nolint: golint
+ // Starts the run. Returns actual QPS and Histogram of function durations.
+ Run() RunnerResults
+ // Returns the options normalized by constructor - do not mutate
+ // (where is const when you need it...)
+ Options() *RunnerOptions
+}
+
+// Unexposed implementation details for PeriodicRunner.
+type periodicRunner struct {
+ RunnerOptions
+}
+
+var (
+ gAbortChan chan os.Signal
+ gOutstandingRuns int64
+ gAbortMutex sync.Mutex
+)
+
+// Normalize initializes and normalizes the runner options. In particular it sets
+// up the channel that can be used to interrupt the run later.
+// Once Normalize is called, if Run() is skipped, Abort() must be called to
+// cleanup the watchers.
+func (r *RunnerOptions) Normalize() {
+ if r.QPS == 0 {
+ r.QPS = DefaultRunnerOptions.QPS
+ } else if r.QPS < 0 {
+ log.LogVf("Negative qps %f means max speed mode/no wait between calls", r.QPS)
+ r.QPS = -1
+ }
+ if r.Out == nil {
+ r.Out = os.Stdout
+ }
+ if r.NumThreads == 0 {
+ r.NumThreads = DefaultRunnerOptions.NumThreads
+ }
+ if r.NumThreads < 1 {
+ r.NumThreads = 1
+ }
+ if r.Percentiles == nil {
+ r.Percentiles = make([]float64, len(DefaultRunnerOptions.Percentiles))
+ copy(r.Percentiles, DefaultRunnerOptions.Percentiles)
+ }
+ if r.Resolution <= 0 {
+ r.Resolution = DefaultRunnerOptions.Resolution
+ }
+ if r.Duration == 0 {
+ r.Duration = DefaultRunnerOptions.Duration
+ }
+ if r.Runners == nil {
+ r.Runners = make([]Runnable, r.NumThreads)
+ }
+ if r.Stop == nil {
+ r.Stop = NewAborter()
+ runnerChan := r.Stop.StopChan // need a copy to not race with assignement to nil
+ go func() {
+ gAbortMutex.Lock()
+ gOutstandingRuns++
+ n := gOutstandingRuns
+ if gAbortChan == nil {
+ log.LogVf("WATCHER %d First outstanding run starting, catching signal", n)
+ gAbortChan = make(chan os.Signal, 1)
+ signal.Notify(gAbortChan, os.Interrupt)
+ }
+ abortChan := gAbortChan
+ gAbortMutex.Unlock()
+ log.LogVf("WATCHER %d starting new watcher for signal! chan g %v r %v (%d)", n, abortChan, runnerChan, runtime.NumGoroutine())
+ select {
+ case _, ok := <-abortChan:
+ log.LogVf("WATCHER %d got interrupt signal! %v", n, ok)
+ if ok {
+ gAbortMutex.Lock()
+ if gAbortChan != nil {
+ log.LogVf("WATCHER %d closing %v to notify all", n, gAbortChan)
+ close(gAbortChan)
+ gAbortChan = nil
+ }
+ gAbortMutex.Unlock()
+ }
+ r.Abort()
+ case <-runnerChan:
+ log.LogVf("WATCHER %d r.Stop readable", n)
+ // nothing to do, stop happened
+ }
+ log.LogVf("WATCHER %d End of go routine", n)
+ gAbortMutex.Lock()
+ gOutstandingRuns--
+ if gOutstandingRuns == 0 {
+ log.LogVf("WATCHER %d Last watcher: resetting signal handler", n)
+ gAbortChan = nil
+ signal.Reset(os.Interrupt)
+ } else {
+ log.LogVf("WATCHER %d isn't the last one, %d left", n, gOutstandingRuns)
+ }
+ gAbortMutex.Unlock()
+ }()
+ }
+}
+
+// Abort safely aborts the run by closing the channel and resetting that channel
+// to nil under lock so it can be called multiple times and not create panic for
+// already closed channel.
+func (r *RunnerOptions) Abort() {
+ log.LogVf("Abort called for %p %+v", r, r)
+ if r.Stop != nil {
+ r.Stop.Abort()
+ }
+}
+
+// internal version, returning the concrete implementation. logical std::move
+func newPeriodicRunner(opts *RunnerOptions) *periodicRunner {
+ r := &periodicRunner{*opts} // by default just copy the input params
+ opts.ReleaseRunners()
+ opts.Stop = nil
+ r.Normalize()
+ return r
+}
+
+// NewPeriodicRunner constructs a runner from input parameters/options.
+// The options will be moved and normalized to the returned object, do
+// not use the original options after this call, call Options() instead.
+// Abort() must be called if Run() is not called.
+func NewPeriodicRunner(params *RunnerOptions) PeriodicRunner {
+ return newPeriodicRunner(params)
+}
+
+// Options returns the options pointer.
+func (r *periodicRunner) Options() *RunnerOptions {
+ return &r.RunnerOptions // sort of returning this here
+}
+
+// Run starts the runner.
+func (r *periodicRunner) Run() RunnerResults {
+ r.Stop.Lock()
+ runnerChan := r.Stop.StopChan // need a copy to not race with assignement to nil
+ r.Stop.Unlock()
+ useQPS := (r.QPS > 0)
+ // r.Duration will be 0 if endless flag has been provided. Otherwise it will have the provided duration time.
+ hasDuration := (r.Duration > 0)
+ // r.Exactly is > 0 if we use Exactly iterations instead of the duration.
+ useExactly := (r.Exactly > 0)
+ var numCalls int64
+ var leftOver int64 // left over from r.Exactly / numThreads
+ requestedQPS := "max"
+ requestedDuration := "until stop"
+ if useQPS {
+ requestedQPS = fmt.Sprintf("%.9g", r.QPS)
+ if hasDuration || useExactly {
+ requestedDuration = fmt.Sprint(r.Duration)
+ numCalls = int64(r.QPS * r.Duration.Seconds())
+ if useExactly {
+ numCalls = r.Exactly
+ requestedDuration = fmt.Sprintf("exactly %d calls", numCalls)
+ }
+ if numCalls < 2 {
+ log.Warnf("Increasing the number of calls to the minimum of 2 with 1 thread. total duration will increase")
+ numCalls = 2
+ r.NumThreads = 1
+ }
+ if int64(2*r.NumThreads) > numCalls {
+ newN := int(numCalls / 2)
+ log.Warnf("Lowering number of threads - total call %d -> lowering from %d to %d threads", numCalls, r.NumThreads, newN)
+ r.NumThreads = newN
+ }
+ numCalls /= int64(r.NumThreads)
+ totalCalls := numCalls * int64(r.NumThreads)
+ if useExactly {
+ leftOver = r.Exactly - totalCalls
+ if log.Log(log.Warning) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "Starting at %g qps with %d thread(s) [gomax %d] : exactly %d, %d calls each (total %d + %d)\n",
+ r.QPS, r.NumThreads, runtime.GOMAXPROCS(0), r.Exactly, numCalls, totalCalls, leftOver)
+ }
+ } else {
+ if log.Log(log.Warning) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "Starting at %g qps with %d thread(s) [gomax %d] for %v : %d calls each (total %d)\n",
+ r.QPS, r.NumThreads, runtime.GOMAXPROCS(0), r.Duration, numCalls, totalCalls)
+ }
+ }
+ } else {
+ // Always print that as we need ^C to interrupt, in that case the user need to notice
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "Starting at %g qps with %d thread(s) [gomax %d] until interrupted\n",
+ r.QPS, r.NumThreads, runtime.GOMAXPROCS(0))
+ numCalls = 0
+ }
+ } else {
+ if !useExactly && !hasDuration {
+ // Always log something when waiting for ^C
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "Starting at max qps with %d thread(s) [gomax %d] until interrupted\n",
+ r.NumThreads, runtime.GOMAXPROCS(0))
+ } else {
+ if log.Log(log.Warning) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "Starting at max qps with %d thread(s) [gomax %d] ",
+ r.NumThreads, runtime.GOMAXPROCS(0))
+ }
+ if useExactly {
+ requestedDuration = fmt.Sprintf("exactly %d calls", r.Exactly)
+ numCalls = r.Exactly / int64(r.NumThreads)
+ leftOver = r.Exactly % int64(r.NumThreads)
+ if log.Log(log.Warning) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "for %s (%d per thread + %d)\n", requestedDuration, numCalls, leftOver)
+ }
+ } else {
+ requestedDuration = fmt.Sprint(r.Duration)
+ if log.Log(log.Warning) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "for %s\n", requestedDuration)
+ }
+ }
+ }
+ }
+ runnersLen := len(r.Runners)
+ if runnersLen == 0 {
+ log.Fatalf("Empty runners array !")
+ }
+ if r.NumThreads > runnersLen {
+ r.MakeRunners(r.Runners[0])
+ log.Warnf("Context array was of %d len, replacing with %d clone of first one", runnersLen, len(r.Runners))
+ }
+ start := time.Now()
+ // Histogram and stats for Function duration - millisecond precision
+ functionDuration := stats.NewHistogram(0, r.Resolution)
+ // Histogram and stats for Sleep time (negative offset to capture <0 sleep in their own bucket):
+ sleepTime := stats.NewHistogram(-0.001, 0.001)
+ if r.NumThreads <= 1 {
+ log.LogVf("Running single threaded")
+ runOne(0, runnerChan, functionDuration, sleepTime, numCalls+leftOver, start, r)
+ } else {
+ var wg sync.WaitGroup
+ var fDs []*stats.Histogram
+ var sDs []*stats.Histogram
+ for t := 0; t < r.NumThreads; t++ {
+ durP := functionDuration.Clone()
+ sleepP := sleepTime.Clone()
+ fDs = append(fDs, durP)
+ sDs = append(sDs, sleepP)
+ wg.Add(1)
+ thisNumCalls := numCalls
+ if (leftOver > 0) && (t == 0) {
+ // The first thread gets to do the additional work
+ thisNumCalls += leftOver
+ }
+ go func(t int, durP *stats.Histogram, sleepP *stats.Histogram) {
+ runOne(t, runnerChan, durP, sleepP, thisNumCalls, start, r)
+ wg.Done()
+ }(t, durP, sleepP)
+ }
+ wg.Wait()
+ for t := 0; t < r.NumThreads; t++ {
+ functionDuration.Transfer(fDs[t])
+ sleepTime.Transfer(sDs[t])
+ }
+ }
+ elapsed := time.Since(start)
+ actualQPS := float64(functionDuration.Count) / elapsed.Seconds()
+ if log.Log(log.Warning) {
+ // nolint: gas
+ _, _ = fmt.Fprintf(r.Out, "Ended after %v : %d calls. qps=%.5g\n", elapsed, functionDuration.Count, actualQPS)
+ }
+ if useQPS {
+ percentNegative := 100. * float64(sleepTime.Hdata[0]) / float64(sleepTime.Count)
+ // Somewhat arbitrary percentage of time the sleep was behind so we
+ // may want to know more about the distribution of sleep time and warn the
+ // user.
+ if percentNegative > 5 {
+ sleepTime.Print(r.Out, "Aggregated Sleep Time", []float64{50})
+ _, _ = fmt.Fprintf(r.Out, "WARNING %.2f%% of sleep were falling behind\n", percentNegative) // nolint: gas
+ } else {
+ if log.Log(log.Verbose) {
+ sleepTime.Print(r.Out, "Aggregated Sleep Time", []float64{50})
+ } else if log.Log(log.Warning) {
+ sleepTime.Counter.Print(r.Out, "Sleep times")
+ }
+ }
+ }
+ actualCount := functionDuration.Count
+ if useExactly && actualCount != r.Exactly {
+ requestedDuration += fmt.Sprintf(", interrupted after %d", actualCount)
+ }
+ result := RunnerResults{r.RunType, r.Labels, start, requestedQPS, requestedDuration,
+ actualQPS, elapsed, r.NumThreads, version.Short(), functionDuration.Export().CalcPercentiles(r.Percentiles), r.Exactly}
+ if log.Log(log.Warning) {
+ result.DurationHistogram.Print(r.Out, "Aggregated Function Time")
+ } else {
+ functionDuration.Counter.Print(r.Out, "Aggregated Function Time")
+ for _, p := range result.DurationHistogram.Percentiles {
+ _, _ = fmt.Fprintf(r.Out, "# target %g%% %.6g\n", p.Percentile, p.Value) // nolint: gas
+ }
+ }
+ select {
+ case <-runnerChan: // nothing
+ log.LogVf("RUNNER r.Stop already closed")
+ default:
+ log.LogVf("RUNNER r.Stop not already closed, closing")
+ r.Abort()
+ }
+ return result
+}
+
+// runOne runs in 1 go routine.
+func runOne(id int, runnerChan chan struct{},
+ funcTimes *stats.Histogram, sleepTimes *stats.Histogram, numCalls int64, start time.Time, r *periodicRunner) {
+ var i int64
+ endTime := start.Add(r.Duration)
+ tIDStr := fmt.Sprintf("T%03d", id)
+ perThreadQPS := r.QPS / float64(r.NumThreads)
+ useQPS := (perThreadQPS > 0)
+ hasDuration := (r.Duration > 0)
+ useExactly := (r.Exactly > 0)
+ f := r.Runners[id]
+
+MainLoop:
+ for {
+ fStart := time.Now()
+ if !useExactly && (hasDuration && fStart.After(endTime)) {
+ if !useQPS {
+ // max speed test reached end:
+ break
+ }
+ // QPS mode:
+ // Do least 2 iterations, and the last one before bailing because of time
+ if (i >= 2) && (i != numCalls-1) {
+ log.Warnf("%s warning only did %d out of %d calls before reaching %v", tIDStr, i, numCalls, r.Duration)
+ break
+ }
+ }
+ f.Run(id)
+ funcTimes.Record(time.Since(fStart).Seconds())
+ i++
+ // if using QPS / pre calc expected call # mode:
+ if useQPS {
+ if (useExactly || hasDuration) && i >= numCalls {
+ break // expected exit for that mode
+ }
+ elapsed := time.Since(start)
+ var targetElapsedInSec float64
+ if hasDuration {
+ // This next line is tricky - such as for 2s duration and 1qps there is 1
+ // sleep of 2s between the 2 calls and for 3qps in 1sec 2 sleep of 1/2s etc
+ targetElapsedInSec = (float64(i) + float64(i)/float64(numCalls-1)) / perThreadQPS
+ } else {
+ // Calculate the target elapsed when in endless execution
+ targetElapsedInSec = float64(i) / perThreadQPS
+ }
+ targetElapsedDuration := time.Duration(int64(targetElapsedInSec * 1e9))
+ sleepDuration := targetElapsedDuration - elapsed
+ log.Debugf("%s target next dur %v - sleep %v", tIDStr, targetElapsedDuration, sleepDuration)
+ sleepTimes.Record(sleepDuration.Seconds())
+ select {
+ case <-runnerChan:
+ break MainLoop
+ case <-time.After(sleepDuration):
+ // continue normal execution
+ }
+ } else { // Not using QPS
+ if useExactly && i >= numCalls {
+ break
+ }
+ select {
+ case <-runnerChan:
+ break MainLoop
+ default:
+ // continue to the next iteration
+ }
+ }
+ }
+ elapsed := time.Since(start)
+ actualQPS := float64(i) / elapsed.Seconds()
+ log.Infof("%s ended after %v : %d calls. qps=%g", tIDStr, elapsed, i, actualQPS)
+ if (numCalls > 0) && log.Log(log.Verbose) {
+ funcTimes.Log(tIDStr+" Function duration", []float64{99})
+ if log.Log(log.Debug) {
+ sleepTimes.Log(tIDStr+" Sleep time", []float64{50})
+ } else {
+ sleepTimes.Counter.Log(tIDStr + " Sleep time")
+ }
+ }
+}
+
+func formatDate(d *time.Time) string {
+ return fmt.Sprintf("%d-%02d-%02d-%02d%02d%02d", d.Year(), d.Month(), d.Day(),
+ d.Hour(), d.Minute(), d.Second())
+}
+
+// ID Returns an id for the result: 64 bytes YYYY-MM-DD-HHmmSS_{alpha_labels}
+// where alpha_labels is the filtered labels with only alphanumeric characters
+// and all non alpha num replaced by _; truncated to 64 bytes.
+func (r *RunnerResults) ID() string {
+ base := formatDate(&r.StartTime)
+ if r.Labels == "" {
+ return base
+ }
+ last := '_'
+ base += string(last)
+ for _, rune := range r.Labels {
+ if (rune >= 'a' && rune <= 'z') || (rune >= 'A' && rune <= 'Z') || (rune >= '0' && rune <= '9') {
+ last = rune
+ } else {
+ if last == '_' {
+ continue // only 1 _ separator at a time
+ }
+ last = '_'
+ }
+ base += string(last)
+ }
+ if last == '_' {
+ base = base[:len(base)-1]
+ }
+ if len(base) > 64 {
+ return base[:64]
+ }
+ return base
+}
diff --git a/vendor/fortio.org/fortio/periodic/periodic_loglevel_test.go b/vendor/fortio.org/fortio/periodic/periodic_loglevel_test.go
new file mode 100644
index 0000000000..f6c9c81507
--- /dev/null
+++ b/vendor/fortio.org/fortio/periodic/periodic_loglevel_test.go
@@ -0,0 +1,34 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !race
+
+package periodic
+
+import (
+ "testing"
+
+ "fortio.org/fortio/log"
+)
+
+// Rerun some test with various log level for coverage of the print statements
+// TODO: golden copy type check of output ?
+
+func TestQuietMode(t *testing.T) {
+ log.SetLogLevel(log.Error)
+ TestStart(t)
+ TestExactlyMaxQps(t)
+ log.SetLogLevel(log.Verbose)
+ TestStart(t)
+}
diff --git a/vendor/fortio.org/fortio/periodic/periodic_test.go b/vendor/fortio.org/fortio/periodic/periodic_test.go
new file mode 100644
index 0000000000..2a3c36c48c
--- /dev/null
+++ b/vendor/fortio.org/fortio/periodic/periodic_test.go
@@ -0,0 +1,369 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package periodic
+
+import (
+ "os"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "fortio.org/fortio/log"
+)
+
+type Noop struct{}
+
+func (n *Noop) Run(t int) {
+}
+
+// used for when we don't actually run periodic test/want to initialize
+// watchers
+var bogusTestChan = NewAborter()
+
+func TestNewPeriodicRunner(t *testing.T) {
+ var tests = []struct {
+ qps float64 // input
+ numThreads int // input
+ expectedQPS float64 // expected
+ expectedNumThreads int // expected
+ }{
+ {qps: 0.1, numThreads: 1, expectedQPS: 0.1, expectedNumThreads: 1},
+ {qps: 1, numThreads: 3, expectedQPS: 1, expectedNumThreads: 3},
+ {qps: 10, numThreads: 10, expectedQPS: 10, expectedNumThreads: 10},
+ {qps: 100000, numThreads: 10, expectedQPS: 100000, expectedNumThreads: 10},
+ {qps: 0.5, numThreads: 1, expectedQPS: 0.5, expectedNumThreads: 1},
+ // Error cases negative qps same as -1 qps == max speed
+ {qps: -10, numThreads: 0, expectedQPS: -1, expectedNumThreads: 4},
+ // Need at least 1 thread
+ {qps: 0, numThreads: -6, expectedQPS: DefaultRunnerOptions.QPS, expectedNumThreads: 1},
+ }
+ for _, tst := range tests {
+ o := RunnerOptions{
+ QPS: tst.qps,
+ NumThreads: tst.numThreads,
+ Stop: bogusTestChan, //TODO: use bogusTestChan so gOutstandingRuns does reach 0
+ }
+ r := newPeriodicRunner(&o)
+ r.MakeRunners(&Noop{})
+ if r.QPS != tst.expectedQPS {
+ t.Errorf("qps: got %f, not as expected %f", r.QPS, tst.expectedQPS)
+ }
+ if r.NumThreads != tst.expectedNumThreads {
+ t.Errorf("threads: with %d input got %d, not as expected %d",
+ tst.numThreads, r.NumThreads, tst.expectedNumThreads)
+ }
+ r.ReleaseRunners()
+ }
+}
+
+type TestCount struct {
+ count *int64
+ lock *sync.Mutex
+}
+
+func (c *TestCount) Run(i int) {
+ c.lock.Lock()
+ (*c.count)++
+ c.lock.Unlock()
+ time.Sleep(50 * time.Millisecond)
+}
+
+func TestStart(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ o := RunnerOptions{
+ QPS: 11.4,
+ NumThreads: 1,
+ Duration: 1 * time.Second,
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ r.Run()
+ if count != 11 {
+ t.Errorf("Test executed unexpected number of times %d instead %d", count, 11)
+ }
+ count = 0
+ oo := r.Options()
+ oo.NumThreads = 10 // will be lowered to 5 so 10 calls (2 in each thread)
+ r.Run()
+ if count != 10 {
+ t.Errorf("MT Test executed unexpected number of times %d instead %d", count, 10)
+ }
+ // note: it's kind of a bug this only works after Run() and not before
+ if oo.NumThreads != 5 {
+ t.Errorf("Lowering of thread count broken, got %d instead of 5", oo.NumThreads)
+ }
+ count = 0
+ oo.Duration = 1 * time.Nanosecond
+ r.Run()
+ if count != 2 {
+ t.Errorf("Test executed unexpected number of times %d instead minimum 2", count)
+ }
+ r.Options().ReleaseRunners()
+}
+
+func TestStartMaxQps(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ o := RunnerOptions{
+ QPS: -1, // max speed (0 is default qps, not max)
+ NumThreads: 4,
+ Duration: 140 * time.Millisecond,
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ var res1 HasRunnerResult // test that interface
+ res := r.Run()
+ res1 = res.Result()
+ expected := int64(3 * 4) // can start 3 50ms in 140ms * 4 threads
+ // Check the count both from the histogram and from our own test counter:
+ actual := res1.Result().DurationHistogram.Count
+ if actual != expected {
+ t.Errorf("MaxQpsTest executed unexpected number of times %d instead %d", actual, expected)
+ }
+ if count != expected {
+ t.Errorf("MaxQpsTest executed unexpected number of times %d instead %d", count, expected)
+ }
+ r.Options().ReleaseRunners()
+}
+
+func TestExactlyLargeDur(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ o := RunnerOptions{
+ QPS: 3,
+ NumThreads: 4,
+ Duration: 100 * time.Hour, // will not be used, large to catch if it would
+ Exactly: 9, // exactly 9 times, so 2 per thread + 1
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ res := r.Run()
+ expected := o.Exactly
+ // Check the count both from the histogram and from our own test counter:
+ actual := res.DurationHistogram.Count
+ if actual != expected {
+ t.Errorf("Exact count executed unexpected number of times %d instead %d", actual, expected)
+ }
+ if count != expected {
+ t.Errorf("Exact count executed unexpected number of times %d instead %d", count, expected)
+ }
+ r.Options().ReleaseRunners()
+}
+
+func TestExactlySmallDur(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ expected := int64(11)
+ o := RunnerOptions{
+ QPS: 3,
+ NumThreads: 4,
+ Duration: 1 * time.Second, // would do only 3 calls without Exactly
+ Exactly: expected, // exactly 11 times, so 2 per thread + 3
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ res := r.Run()
+ // Check the count both from the histogram and from our own test counter:
+ actual := res.DurationHistogram.Count
+ if actual != expected {
+ t.Errorf("Exact count executed unexpected number of times %d instead %d", actual, expected)
+ }
+ if count != expected {
+ t.Errorf("Exact count executed unexpected number of times %d instead %d", count, expected)
+ }
+ r.Options().ReleaseRunners()
+}
+
+func TestExactlyMaxQps(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ expected := int64(503)
+ o := RunnerOptions{
+ QPS: -1, // max qps
+ NumThreads: 4,
+ Duration: -1, // infinite but should not be used
+ Exactly: expected, // exactly 503 times, so 125 per thread + 3
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ res := r.Run()
+ // Check the count both from the histogram and from our own test counter:
+ actual := res.DurationHistogram.Count
+ if actual != expected {
+ t.Errorf("Exact count executed unexpected number of times %d instead %d", actual, expected)
+ }
+ if count != expected {
+ t.Errorf("Exact count executed unexpected number of times %d instead %d", count, expected)
+ }
+ r.Options().ReleaseRunners()
+}
+
+func TestID(t *testing.T) {
+ var tests = []struct {
+ labels string // input
+ id string // expected suffix after the date
+ }{
+ {"", ""},
+ {"abcDEF123", "_abcDEF123"},
+ {"A!@#$%^&*()-+=/'B", "_A_B"},
+ // Ends with non alpha, skip last _
+ {"A ", "_A"},
+ {" ", ""},
+ // truncated to fit 64 (17 from date/time + _ + 46 from labels)
+ {"123456789012345678901234567890123456789012345678901234567890", "_1234567890123456789012345678901234567890123456"},
+ }
+ startTime := time.Date(2001, time.January, 2, 3, 4, 5, 0, time.Local)
+ prefix := "2001-01-02-030405"
+ for _, tst := range tests {
+ o := RunnerResults{
+ StartTime: startTime,
+ Labels: tst.labels,
+ }
+ id := o.ID()
+ expected := prefix + tst.id
+ if id != expected {
+ t.Errorf("id: got %s, not as expected %s", id, expected)
+ }
+ }
+}
+
+func TestInfiniteDurationAndAbort(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ o := RunnerOptions{
+ QPS: 10,
+ NumThreads: 1,
+ Duration: -1, // infinite but we'll abort after 1sec
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ go func() {
+ time.Sleep(1 * time.Second)
+ log.LogVf("Calling abort after 1 sec")
+ r.Options().Abort()
+ }()
+ r.Run()
+ if count < 9 || count > 13 {
+ t.Errorf("Test executed unexpected number of times %d instead of 9-13", count)
+ }
+ // Same with infinite qps
+ count = 0
+ o.QPS = -1 // infinite qps
+ r.Options().ReleaseRunners()
+ r = NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ go func() {
+ time.Sleep(140 * time.Millisecond)
+ log.LogVf("Sending global interrupt after 0.14 sec")
+ gAbortMutex.Lock()
+ gAbortChan <- os.Interrupt
+ gAbortMutex.Unlock()
+ }()
+ r.Run()
+ r.Options().ReleaseRunners()
+ if count < 2 || count > 4 { // should get 3 in 140ms
+ t.Errorf("Test executed unexpected number of times %d instead of 3 (2-4)", count)
+ }
+}
+
+func TestExactlyAndAbort(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ o := RunnerOptions{
+ QPS: 10,
+ NumThreads: 1,
+ Exactly: 100, // would take 10s we'll abort after 1sec
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ go func() {
+ time.Sleep(1 * time.Second)
+ log.LogVf("Calling abort after 1 sec")
+ r.Options().Abort()
+ }()
+ res := r.Run()
+ r.Options().ReleaseRunners()
+ if count < 9 || count > 13 {
+ t.Errorf("Test executed unexpected number of times %d instead of 9-13", count)
+ }
+ if !strings.Contains(res.RequestedDuration, "exactly 100 calls, interrupted after") {
+ t.Errorf("Got '%s' and didn't find expected aborted", res.RequestedDuration)
+ }
+}
+
+func TestSleepFallingBehind(t *testing.T) {
+ var count int64
+ var lock sync.Mutex
+ c := TestCount{&count, &lock}
+ o := RunnerOptions{
+ QPS: 1000000, // similar to max qps but with sleep falling behind
+ NumThreads: 4,
+ Duration: 140 * time.Millisecond,
+ }
+ r := NewPeriodicRunner(&o)
+ r.Options().MakeRunners(&c)
+ count = 0
+ res := r.Run()
+ r.Options().ReleaseRunners()
+ expected := int64(3 * 4) // can start 3 50ms in 140ms * 4 threads
+ // Check the count both from the histogram and from our own test counter:
+ actual := res.DurationHistogram.Count
+ if actual > expected+2 || actual < expected-2 {
+ t.Errorf("Extra high qps executed unexpected number of times %d instead %d", actual, expected)
+ }
+ // check histogram and our counter got same result
+ if count != actual {
+ t.Errorf("Extra high qps internal counter %d doesn't match histogram %d for expected %d", count, actual, expected)
+ }
+}
+
+func Test2Watchers(t *testing.T) {
+ // Wait for previous test to cleanup watchers
+ time.Sleep(200 * time.Millisecond)
+ o1 := RunnerOptions{}
+ r1 := newPeriodicRunner(&o1)
+ o2 := RunnerOptions{}
+ r2 := newPeriodicRunner(&o2)
+ time.Sleep(200 * time.Millisecond)
+ gAbortMutex.Lock()
+ if gOutstandingRuns != 2 {
+ t.Errorf("found %d watches while expecting 2 for (%v %v)", gOutstandingRuns, r1, r2)
+ }
+ gAbortMutex.Unlock()
+ gAbortChan <- os.Interrupt
+ // wait for interrupt to propagate
+ time.Sleep(200 * time.Millisecond)
+ gAbortMutex.Lock()
+ if gOutstandingRuns != 0 {
+ t.Errorf("found %d watches while expecting 0", gOutstandingRuns)
+ }
+ gAbortMutex.Unlock()
+}
diff --git a/vendor/fortio.org/fortio/release/.gitignore b/vendor/fortio.org/fortio/release/.gitignore
new file mode 100644
index 0000000000..9393f6553e
--- /dev/null
+++ b/vendor/fortio.org/fortio/release/.gitignore
@@ -0,0 +1,8 @@
+tgz/
+*.tar.gz
+*.tgz
+*.deb
+*.rpm
+Dockerfile
+# Makefile is generated by make dist in parent
+Makefile
diff --git a/vendor/fortio.org/fortio/release/Dockerfile.in b/vendor/fortio.org/fortio/release/Dockerfile.in
new file mode 100644
index 0000000000..d6b862199f
--- /dev/null
+++ b/vendor/fortio.org/fortio/release/Dockerfile.in
@@ -0,0 +1,16 @@
+# Concatenated after ../Dockerfile to create the tgz
+FROM docker.io/fortio/fortio.build:v12 as stage
+WORKDIR /stage
+COPY --from=release /usr/bin/fortio usr/bin/fortio
+COPY --from=release /usr/share/fortio usr/share/fortio
+COPY docs/fortio.1 usr/share/man/man1/fortio.1
+RUN mkdir /tgz
+# Make sure the list here is both minimal and complete
+# we could take all of * but that adds system directories to the tar
+RUN tar cvf - usr/share/fortio/* usr/share/man/man1/fortio.1 usr/bin/fortio | gzip --best > /tgz/fortio-linux_x64-$(./usr/bin/fortio version -s).tgz
+WORKDIR /tgz
+COPY release/ffpm.sh /
+RUN bash -x /ffpm.sh deb
+RUN bash -x /ffpm.sh rpm
+FROM scratch
+COPY --from=stage /tgz/ /tgz/
diff --git a/vendor/fortio.org/fortio/release/Makefile.dist b/vendor/fortio.org/fortio/release/Makefile.dist
new file mode 100644
index 0000000000..570f37178f
--- /dev/null
+++ b/vendor/fortio.org/fortio/release/Makefile.dist
@@ -0,0 +1,15 @@
+# template for top level Makefile in tar ball distribution. used by "make dist" with variables prepended
+# so the rest of the build works for debian without a git repo / from a tar ball
+
+# dh_auto_build calls the default target, we don't need one
+default:
+ @echo "This is the build dist for fortio $(GIT_TAG)"
+ @echo "please use the Makefile in src/fortio.org/fortio"
+
+ifeq ($(FORTIO_SKIP_TESTS),Y)
+test:
+ @echo "FORTIO_SKIP_TESTS set to Y, skipping running tests."
+endif
+
+%:
+ cd src/fortio.org/fortio; GOPATH=$(CURDIR) $(MAKE) $@ GIT_TAG=$(GIT_TAG) GIT_STATUS=$(GIT_STATUS) GIT_SHA=$(GIT_SHA)
diff --git a/vendor/fortio.org/fortio/release/README.md b/vendor/fortio.org/fortio/release/README.md
new file mode 100644
index 0000000000..5b99bd49af
--- /dev/null
+++ b/vendor/fortio.org/fortio/release/README.md
@@ -0,0 +1,67 @@
+# How to make a fortio release
+
+- Make sure `version/version.go`'s `major`/`minor`/`patch` is newer than the most recent [release](https://github.com/fortio/fortio/releases)
+
+- Update debian/changelog to match said upcoming release
+
+- Make a release there and document the changes since the previous release
+
+- Make sure to use the same git tag format (e.g "v0.7.1" - note that there is `v` prefix in the tag, like many projects but unlike the rest of istio). Docker and internal version/tag is "0.7.1", the `v` is only for git tags.
+
+- Make sure your git status is clean, and the tag is present (git pull) before the next step or it will get marked dirty/pre
+
+- Create the binary tgz, deb and rpm packages: `make release` (from/in the toplevel directory)
+
+- Upload the release/fortio-\*.tgz, .orig.tar.gz, .deb and .rpm to GitHub
+
+- Push the dist to Debian/Ubuntu
+
+- The docker official builds are done automatically based on tag, check [fortio's cloud docker build page](https://cloud.docker.com/app/fortio/repository/docker/fortio/fortio/builds)
+
+- Increment the `patch` and commit that right away so the first point is true next time and so master/latest docker images have the correct next-pre version.
+
+- Once the release is deemed good/stable: move the git tag `latest_release` to the same as the release.
+
+ ```Shell
+ # for instance for 0.11.0:
+ git fetch
+ git checkout v0.11.0
+ git tag -f latest_release
+ git push -f --tags
+ ```
+
+- Also push `latest_release` docker tag/image: wait for the autobuild to make it and then:
+
+ ```Shell
+ # for instance for 0.11.0:
+ docker image pull fortio/fortio:1.1.1
+ docker tag fortio/fortio:1.1.1 fortio/fortio:latest_release
+ docker push fortio/fortio:latest_release
+ ```
+
+- To update the command line flags in the ../README.md; go install the right version of fortio so it is in your path and run updateFlags.sh
+
+- Update the homebrew tap
+
+## How to change the build image
+
+Update [../Dockerfile.build](../Dockerfile.build)
+
+Edit the `BUILD_IMAGE_TAG := v5` line in the Makefile, set it to `v6`
+for instance (replace `v6` by whichever is the next one at the time)
+
+run
+
+```Shell
+make update-build-image
+```
+
+Make sure it gets successfully pushed to the fortio registry (requires org access)
+
+run
+
+```Shell
+make update-build-image-tag
+```
+
+Check the diff and make lint, webtest, etc and PR
diff --git a/vendor/fortio.org/fortio/release/ffpm.sh b/vendor/fortio.org/fortio/release/ffpm.sh
new file mode 100755
index 0000000000..20ddc5b7c6
--- /dev/null
+++ b/vendor/fortio.org/fortio/release/ffpm.sh
@@ -0,0 +1,21 @@
+#! /bin/bash
+# Copyright 2017 Istio Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Ran from the release/Dockerfile[.in] to invoke fpm with common arguments
+# Assumes the layout from the Dockerfiles (/stage source, /tgz destination etc)
+fpm -v $(/stage/usr/bin/fortio version -s) -n fortio --license "Apache License, Version 2.0" \
+ --category utils --url https://fortio.org/ --maintainer fortio@fortio.org \
+ --description "Fortio is a load testing library, command line tool, advanced echo server and web UI in go (golang)." \
+ -s tar -t $1 /tgz/*.tgz
diff --git a/vendor/fortio.org/fortio/release/release.sh b/vendor/fortio.org/fortio/release/release.sh
new file mode 100755
index 0000000000..7c5850474d
--- /dev/null
+++ b/vendor/fortio.org/fortio/release/release.sh
@@ -0,0 +1,36 @@
+#! /bin/bash
+# Copyright 2017 Istio Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# To be run by ../Makefile as release/release.sh
+set -x
+set -e
+# Release tgz Dockerfile is based on the normal docker one
+cat Dockerfile release/Dockerfile.in > release/Dockerfile
+docker build -f release/Dockerfile -t fortio/fortio:release .
+DOCKERID=$(docker create --name fortio_release fortio/fortio:release x)
+function cleanup {
+ docker rm fortio_release
+}
+trap cleanup EXIT
+set -o pipefail
+# docker cp will create 2 level of dir if first one exists, make sure it doesnt
+rm -f release/tgz/*
+rmdir release/tgz || true
+docker cp -a fortio_release:/tgz/ release/tgz
+# Check the tar ball
+tar tvfz release/tgz/*.tgz
+# then save the results 1 level up
+mv release/tgz/* release/
+rmdir release/tgz
diff --git a/vendor/fortio.org/fortio/release/updateFlags.sh b/vendor/fortio.org/fortio/release/updateFlags.sh
new file mode 100755
index 0000000000..c0d231f4e0
--- /dev/null
+++ b/vendor/fortio.org/fortio/release/updateFlags.sh
@@ -0,0 +1,18 @@
+#! /bin/bash
+# Copyright 2017 Istio Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Extract fortio's help and rewrap it to 80 cols
+# TODO: do like fmt does to keep leading identation
+fortio help | expand | fold -s | sed -e "s/ $//"
diff --git a/vendor/fortio.org/fortio/stats/stats.go b/vendor/fortio.org/fortio/stats/stats.go
new file mode 100644
index 0000000000..ea02151bf3
--- /dev/null
+++ b/vendor/fortio.org/fortio/stats/stats.go
@@ -0,0 +1,530 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stats // import "fortio.org/fortio/stats"
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ "fortio.org/fortio/log"
+)
+
+// Counter is a type whose instances record values
+// and calculate stats (count,average,min,max,stddev).
+type Counter struct {
+ Count int64
+ Min float64
+ Max float64
+ Sum float64
+ sumOfSquares float64
+}
+
+// Record records a data point.
+func (c *Counter) Record(v float64) {
+ c.RecordN(v, 1)
+}
+
+// RecordN efficiently records the same value N times
+func (c *Counter) RecordN(v float64, n int) {
+ isFirst := (c.Count == 0)
+ c.Count += int64(n)
+ if isFirst {
+ c.Min = v
+ c.Max = v
+ } else if v < c.Min {
+ c.Min = v
+ } else if v > c.Max {
+ c.Max = v
+ }
+ s := v * float64(n)
+ c.Sum += s
+ c.sumOfSquares += (s * s)
+}
+
+// Avg returns the average.
+func (c *Counter) Avg() float64 {
+ return c.Sum / float64(c.Count)
+}
+
+// StdDev returns the standard deviation.
+func (c *Counter) StdDev() float64 {
+ fC := float64(c.Count)
+ sigma := (c.sumOfSquares - c.Sum*c.Sum/fC) / fC
+ // should never happen but it does
+ if sigma < 0 {
+ log.Warnf("Unexpected negative sigma for %+v: %g", c, sigma)
+ return 0
+ }
+ return math.Sqrt(sigma)
+}
+
+// Print prints stats.
+func (c *Counter) Print(out io.Writer, msg string) {
+ _, _ = fmt.Fprintf(out, "%s : count %d avg %.8g +/- %.4g min %g max %g sum %.9g\n", // nolint(errorcheck)
+ msg, c.Count, c.Avg(), c.StdDev(), c.Min, c.Max, c.Sum)
+}
+
+// Log outputs the stats to the logger.
+func (c *Counter) Log(msg string) {
+ log.Infof("%s : count %d avg %.8g +/- %.4g min %g max %g sum %.9g",
+ msg, c.Count, c.Avg(), c.StdDev(), c.Min, c.Max, c.Sum)
+}
+
+// Reset clears the counter to reset it to original 'no data' state.
+func (c *Counter) Reset() {
+ var empty Counter
+ *c = empty
+}
+
+// Transfer merges the data from src into this Counter and clears src.
+func (c *Counter) Transfer(src *Counter) {
+ if src.Count == 0 {
+ return // nothing to do
+ }
+ if c.Count == 0 {
+ *c = *src // copy everything at once
+ src.Reset()
+ return
+ }
+ c.Count += src.Count
+ if src.Min < c.Min {
+ c.Min = src.Min
+ }
+ if src.Max > c.Max {
+ c.Max = src.Max
+ }
+ c.Sum += src.Sum
+ c.sumOfSquares += src.sumOfSquares
+ src.Reset()
+}
+
+// Histogram - written in go with inspiration from https://github.com/facebook/wdt/blob/master/util/Stats.h
+
+// The intervals are ]prev,current] so for "90" (previous is 80) the values in that bucket are >80 and <=90
+// that way a cumulative % up to that bucket means X% of the data <= 90 (or 100-X% > 90), works well for max too
+// There are 2 special buckets - the first one is from min to and including 0,
+// one after the last for value > last and up to max
+var (
+ histogramBucketValues = []int32{
+ 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, // initially increment buckets by 1, my amp goes to 11 !
+ 12, 14, 16, 18, 20, // then by 2
+ 25, 30, 35, 40, 45, 50, // then by 5
+ 60, 70, 80, 90, 100, // then by 10
+ 120, 140, 160, 180, 200, // line3 *10
+ 250, 300, 350, 400, 450, 500, // line4 *10
+ 600, 700, 800, 900, 1000, // line5 *10
+ 2000, 3000, 4000, 5000, 7500, 10000, // another order of magnitude coarsly covered
+ 20000, 30000, 40000, 50000, 75000, 100000, // ditto, the end
+ }
+ numValues = len(histogramBucketValues)
+ numBuckets = numValues + 1 // 1 special first bucket is <= 0; and 1 extra last bucket is > 100000
+ firstValue = float64(histogramBucketValues[0])
+ lastValue = float64(histogramBucketValues[numValues-1])
+ val2Bucket []int // ends at 1000. Remaining values will not be received in constant time.
+
+ maxArrayValue = int32(1000) // Last value looked up as O(1) array, the rest is linear search
+ maxArrayValueIndex = -1 // Index of maxArrayValue
+)
+
+// Histogram extends Counter and adds an histogram.
+// Must be created using NewHistogram or anotherHistogram.Clone()
+// and not directly.
+type Histogram struct {
+ Counter
+ Offset float64 // offset applied to data before fitting into buckets
+ Divider float64 // divider applied to data before fitting into buckets
+ // Don't access directly (outside of this package):
+ Hdata []int32 // numValues buckets (one more than values, for last one)
+}
+
+// For export of the data:
+
+// Interval is a range from start to end.
+// Interval are left closed, open right expect the last one which includes Max.
+// ie [Start, End[ with the next one being [PrevEnd, NextEnd[.
+type Interval struct {
+ Start float64
+ End float64
+}
+
+// Bucket is the data for 1 bucket: an Interval and the occurrence Count for
+// that interval.
+type Bucket struct {
+ Interval
+ Percent float64 // Cumulative percentile
+ Count int64 // How many in this bucket
+}
+
+// Percentile value for the percentile
+type Percentile struct {
+ Percentile float64 // For this Percentile
+ Value float64 // value at that Percentile
+}
+
+// HistogramData is the exported Histogram data, a sorted list of intervals
+// covering [Min, Max]. Pure data, so Counter for instance is flattened
+type HistogramData struct {
+ Count int64
+ Min float64
+ Max float64
+ Sum float64
+ Avg float64
+ StdDev float64
+ Data []Bucket
+ Percentiles []Percentile
+}
+
+// NewHistogram creates a new histogram (sets up the buckets).
+// Divider value can not be zero, otherwise returns zero
+func NewHistogram(Offset float64, Divider float64) *Histogram {
+ h := new(Histogram)
+ h.Offset = Offset
+ if Divider == 0 {
+ return nil
+ }
+ h.Divider = Divider
+ h.Hdata = make([]int32, numBuckets)
+ return h
+}
+
+// Val2Bucket values are kept in two different structure
+// val2Bucket allows you reach between 0 and 1000 in constant time
+func init() {
+ val2Bucket = make([]int, maxArrayValue)
+ maxArrayValueIndex = -1
+ for i, value := range histogramBucketValues {
+ if value == maxArrayValue {
+ maxArrayValueIndex = i
+ break
+ }
+ }
+ if maxArrayValueIndex == -1 {
+ log.Fatalf("Bug boundary maxArrayValue=%d not found in bucket list %v", maxArrayValue, histogramBucketValues)
+ }
+ idx := 0
+ for i := int32(0); i < maxArrayValue; i++ {
+ if i >= histogramBucketValues[idx] {
+ idx++
+ }
+ val2Bucket[i] = idx
+ }
+ // coding bug detection (aka impossible if it works once) until 1000
+ if idx != maxArrayValueIndex {
+ log.Fatalf("Bug in creating histogram index idx %d vs index %d up to %d", idx, int(maxArrayValue), maxArrayValue)
+ }
+}
+
+// lookUpIdx looks for scaledValue's index in histogramBucketValues
+// TODO: change linear time to O(log(N)) with binary search
+func lookUpIdx(scaledValue int) int {
+ scaledValue32 := int32(scaledValue)
+ if scaledValue32 < maxArrayValue { //constant
+ return val2Bucket[scaledValue]
+ }
+ for i := maxArrayValueIndex; i < numValues; i++ {
+ if histogramBucketValues[i] > scaledValue32 {
+ return i
+ }
+ }
+ log.Fatalf("never reached/bug")
+ return 0
+}
+
+// Record records a data point.
+func (h *Histogram) Record(v float64) {
+ h.RecordN(v, 1)
+}
+
+// RecordN efficiently records a data point N times.
+func (h *Histogram) RecordN(v float64, n int) {
+ h.Counter.RecordN(v, n)
+ h.record(v, n)
+}
+
+// Records v value to count times
+func (h *Histogram) record(v float64, count int) {
+ // Scaled value to bucketize - we subtract epsilon because the interval
+ // is open to the left ] start, end ] so when exactly on start it has
+ // to fall on the previous bucket. TODO add boundary tests
+ scaledVal := (v-h.Offset)/h.Divider - 0.0001
+ var idx int
+ if scaledVal <= firstValue {
+ idx = 0
+ } else if scaledVal > lastValue {
+ idx = numBuckets - 1 // last bucket is for > last value
+ } else {
+ // else we look it up
+ idx = lookUpIdx(int(scaledVal))
+ }
+ h.Hdata[idx] += int32(count)
+}
+
+// CalcPercentile returns the value for an input percentile
+// e.g. for 90. as input returns an estimate of the original value threshold
+// where 90.0% of the data is below said threshold.
+// with 3 data points 10, 20, 30; p0-p33.33 == 10, p 66.666 = 20, p100 = 30
+// p33.333 - p66.666 = linear between 10 and 20; so p50 = 15
+// TODO: consider spreading the count of the bucket evenly from start to end
+// so the % grows by at least to 1/N on start of range, and for last range
+// when start == end we should get to that % faster
+func (e *HistogramData) CalcPercentile(percentile float64) float64 {
+ if len(e.Data) == 0 {
+ log.Errf("Unexpected call to CalcPercentile(%g) with no data", percentile)
+ return 0
+ }
+ if percentile >= 100 {
+ return e.Max
+ }
+ // We assume Min is at least a single point so at least covers 1/Count %
+ pp := 100. / float64(e.Count) // previous percentile
+ if percentile <= pp {
+ return e.Min
+ }
+ for _, cur := range e.Data {
+ if percentile <= cur.Percent {
+ return cur.Start + (percentile-pp)/(cur.Percent-pp)*(cur.End-cur.Start)
+ }
+ pp = cur.Percent
+ }
+ return e.Max // not reached
+}
+
+// Export translate the internal representation of the histogram data in
+// an externally usable one. Calculates the request Percentiles.
+func (h *Histogram) Export() *HistogramData {
+ var res HistogramData
+ res.Count = h.Counter.Count
+ res.Min = h.Counter.Min
+ res.Max = h.Counter.Max
+ res.Sum = h.Counter.Sum
+ res.Avg = h.Counter.Avg()
+ res.StdDev = h.Counter.StdDev()
+ multiplier := h.Divider
+ offset := h.Offset
+ // calculate the last bucket index
+ lastIdx := -1
+ for i := numBuckets - 1; i >= 0; i-- {
+ if h.Hdata[i] > 0 {
+ lastIdx = i
+ break
+ }
+ }
+ if lastIdx == -1 {
+ return &res
+ }
+
+ // previous bucket value:
+ prev := histogramBucketValues[0]
+ var total int64
+ ctrTotal := float64(h.Count)
+ // export the data of each bucket of the histogram
+ for i := 0; i <= lastIdx; i++ {
+ if h.Hdata[i] == 0 {
+ // empty bucket: skip it but update prev which is needed for next iter
+ if i < numValues {
+ prev = histogramBucketValues[i]
+ }
+ continue
+ }
+ var b Bucket
+ total += int64(h.Hdata[i])
+ if len(res.Data) == 0 {
+ // First entry, start is min
+ b.Start = h.Min
+ } else {
+ b.Start = multiplier*float64(prev) + offset
+ }
+ b.Percent = 100. * float64(total) / ctrTotal
+ if i < numValues {
+ cur := histogramBucketValues[i]
+ b.End = multiplier*float64(cur) + offset
+ prev = cur
+ } else {
+ // Last Entry
+ b.Start = multiplier*float64(prev) + offset
+ b.End = h.Max
+ }
+ b.Count = int64(h.Hdata[i])
+ res.Data = append(res.Data, b)
+ }
+ res.Data[len(res.Data)-1].End = h.Max
+ return &res
+}
+
+// CalcPercentiles calculates the requested percentile and add them to the
+// HistogramData. Potential TODO: sort or assume sorting and calculate all
+// the percentiles in 1 pass (greater and greater values).
+func (e *HistogramData) CalcPercentiles(percentiles []float64) *HistogramData {
+ if e.Count == 0 {
+ return e
+ }
+ for _, p := range percentiles {
+ e.Percentiles = append(e.Percentiles, Percentile{p, e.CalcPercentile(p)})
+ }
+ return e
+}
+
+// Print dumps the histogram (and counter) to the provided writer.
+// Also calculates the percentile.
+func (e *HistogramData) Print(out io.Writer, msg string) {
+ if len(e.Data) == 0 {
+ _, _ = fmt.Fprintf(out, "%s : no data\n", msg) // nolint: gas
+ return
+ }
+ // the base counter part:
+ _, _ = fmt.Fprintf(out, "%s : count %d avg %.8g +/- %.4g min %g max %g sum %.9g\n",
+ msg, e.Count, e.Avg, e.StdDev, e.Min, e.Max, e.Sum)
+ _, _ = fmt.Fprintln(out, "# range, mid point, percentile, count")
+ sep := ">="
+ for i, b := range e.Data {
+ if i > 0 {
+ sep = ">" // last interval is inclusive (of max value)
+ }
+ _, _ = fmt.Fprintf(out, "%s %.6g <= %.6g , %.6g , %.2f, %d\n", sep, b.Start, b.End, (b.Start+b.End)/2., b.Percent, b.Count)
+ }
+
+ // print the information of target percentiles
+ for _, p := range e.Percentiles {
+ _, _ = fmt.Fprintf(out, "# target %g%% %.6g\n", p.Percentile, p.Value) // nolint: gas
+ }
+}
+
+// Print dumps the histogram (and counter) to the provided writer.
+// Also calculates the percentiles. Use Export() once and Print if you
+// are going to need the Export results too.
+func (h *Histogram) Print(out io.Writer, msg string, percentiles []float64) {
+ h.Export().CalcPercentiles(percentiles).Print(out, msg)
+}
+
+// Log Logs the histogram to the counter.
+func (h *Histogram) Log(msg string, percentiles []float64) {
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ h.Print(w, msg, percentiles)
+ w.Flush() // nolint: gas,errcheck
+ log.Infof("%s", b.Bytes())
+}
+
+// Reset clears the data. Reset it to NewHistogram state.
+func (h *Histogram) Reset() {
+ h.Counter.Reset()
+ // Leave Offset and Divider alone
+ for i := 0; i < len(h.Hdata); i++ {
+ h.Hdata[i] = 0
+ }
+}
+
+// Clone returns a copy of the histogram.
+func (h *Histogram) Clone() *Histogram {
+ copy := NewHistogram(h.Offset, h.Divider)
+ copy.CopyFrom(h)
+ return copy
+}
+
+// CopyFrom sets the content of this object to a copy of the src.
+func (h *Histogram) CopyFrom(src *Histogram) {
+ h.Counter = src.Counter
+ h.copyHDataFrom(src)
+}
+
+// copyHDataFrom appends histogram data values to this object from the src.
+// Src histogram data values will be appended according to this object's
+// offset and divider
+func (h *Histogram) copyHDataFrom(src *Histogram) {
+ if h.Divider == src.Divider && h.Offset == src.Offset {
+ for i := 0; i < len(h.Hdata); i++ {
+ h.Hdata[i] += src.Hdata[i]
+ }
+ return
+ }
+
+ hData := src.Export()
+ for _, data := range hData.Data {
+ h.record((data.Start+data.End)/2, int(data.Count))
+ }
+}
+
+// Merge two different histogram with different scale parameters
+// Lowest offset and highest divider value will be selected on new Histogram as scale parameters
+func Merge(h1 *Histogram, h2 *Histogram) *Histogram {
+ divider := h1.Divider
+ offset := h1.Offset
+ if h2.Divider > h1.Divider {
+ divider = h2.Divider
+ }
+ if h2.Offset < h1.Offset {
+ offset = h2.Offset
+ }
+ newH := NewHistogram(offset, divider)
+ newH.Transfer(h1)
+ newH.Transfer(h2)
+ return newH
+}
+
+// Transfer merges the data from src into this Histogram and clears src.
+func (h *Histogram) Transfer(src *Histogram) {
+ if src.Count == 0 {
+ return
+ }
+ if h.Count == 0 {
+ h.CopyFrom(src)
+ src.Reset()
+ return
+ }
+ h.copyHDataFrom(src)
+ h.Counter.Transfer(&src.Counter)
+ src.Reset()
+}
+
+// ParsePercentiles extracts the percentiles from string (flag).
+func ParsePercentiles(percentiles string) ([]float64, error) {
+ percs := strings.Split(percentiles, ",") // will make a size 1 array for empty input!
+ res := make([]float64, 0, len(percs))
+ for _, pStr := range percs {
+ pStr = strings.TrimSpace(pStr)
+ if len(pStr) == 0 {
+ continue
+ }
+ p, err := strconv.ParseFloat(pStr, 64)
+ if err != nil {
+ return res, err
+ }
+ res = append(res, p)
+ }
+ if len(res) == 0 {
+ return res, errors.New("list can't be empty")
+ }
+ log.LogVf("Will use %v for percentiles", res)
+ return res, nil
+}
+
+// RoundToDigits rounds the input to digits number of digits after decimal point.
+// Note this incorrectly rounds the last digit of negative numbers.
+func RoundToDigits(v float64, digits int) float64 {
+ p := math.Pow(10, float64(digits))
+ return math.Floor(v*p+0.5) / p
+}
+
+// Round rounds to 4 digits after the decimal point.
+func Round(v float64) float64 {
+ return RoundToDigits(v, 4)
+}
diff --git a/vendor/fortio.org/fortio/stats/stats_test.go b/vendor/fortio.org/fortio/stats/stats_test.go
new file mode 100644
index 0000000000..0a2aff2840
--- /dev/null
+++ b/vendor/fortio.org/fortio/stats/stats_test.go
@@ -0,0 +1,814 @@
+// Copyright 2017 Istio Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stats
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+
+ "fortio.org/fortio/log"
+)
+
+func TestCounter(t *testing.T) {
+ c := NewHistogram(22, 0.1)
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ c.Counter.Print(w, "test1c")
+ expected := "test1c : count 0 avg NaN +/- NaN min 0 max 0 sum 0\n"
+ c.Print(w, "test1h", []float64{50.0})
+ expected += "test1h : no data\n"
+ *log.LogFileAndLine = false
+ log.SetFlags(0)
+ log.SetOutput(w)
+ c.Export().CalcPercentile(50)
+ expected += "E > Unexpected call to CalcPercentile(50) with no data\n"
+ c.Record(23.1)
+ c.Counter.Print(w, "test2")
+ expected += "test2 : count 1 avg 23.1 +/- 0 min 23.1 max 23.1 sum 23.1\n"
+ c.Record(22.9)
+ c.Counter.Print(w, "test3")
+ expected += "test3 : count 2 avg 23 +/- 0.1 min 22.9 max 23.1 sum 46\n"
+ c.Record(23.1)
+ c.Record(22.9)
+ c.Counter.Print(w, "test4")
+ expected += "test4 : count 4 avg 23 +/- 0.1 min 22.9 max 23.1 sum 92\n"
+ c.Record(1023)
+ c.Record(-977)
+ c.Counter.Print(w, "test5")
+ // note that stddev of 577.4 below is... whatever the code said
+ finalExpected := " : count 6 avg 23 +/- 577.4 min -977 max 1023 sum 138\n"
+ expected += "test5" + finalExpected
+ // Try the Log() function too:
+ log.SetOutput(w)
+ log.SetFlags(0)
+ *log.LogFileAndLine = false
+ *log.LogPrefix = ""
+ c.Counter.Log("testLogC")
+ expected += "I testLogC" + finalExpected
+ w.Flush() // nolint: errcheck
+ actual := b.String()
+ if actual != expected {
+ t.Errorf("unexpected1:\n%s\nvs:\n%s\n", actual, expected)
+ }
+ b.Reset()
+ c.Log("testLogH", nil)
+ w.Flush() // nolint: errcheck
+ actual = b.String()
+ expected = "I testLogH" + finalExpected + `# range, mid point, percentile, count
+>= -977 <= 22 , -477.5 , 16.67, 1
+> 22.8 <= 22.9 , 22.85 , 50.00, 2
+> 23 <= 23.1 , 23.05 , 83.33, 2
+> 1022 <= 1023 , 1022.5 , 100.00, 1
+`
+ if actual != expected {
+ t.Errorf("unexpected2:\n%s\nvs:\n%s\n", actual, expected)
+ }
+ log.SetOutput(os.Stderr)
+}
+
+func TestTransferCounter(t *testing.T) {
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ var c1 Counter
+ c1.Record(10)
+ c1.Record(20)
+ var c2 Counter
+ c2.Record(80)
+ c2.Record(90)
+ c1a := c1
+ c2a := c2
+ var c3 Counter
+ c1.Print(w, "c1 before merge")
+ c2.Print(w, "c2 before merge")
+ c1.Transfer(&c2)
+ c1.Print(w, "mergedC1C2")
+ c2.Print(w, "c2 after merge")
+ // reverse (exercise min if)
+ c2a.Transfer(&c1a)
+ c2a.Print(w, "mergedC2C1")
+ // test transfer into empty - min should be set
+ c3.Transfer(&c1)
+ c1.Print(w, "c1 should now be empty")
+ c3.Print(w, "c3 after merge - 1")
+ // test empty transfer - shouldn't reset min/no-op
+ c3.Transfer(&c2)
+ c3.Print(w, "c3 after merge - 2")
+ w.Flush() // nolint: errcheck
+ actual := b.String()
+ expected := `c1 before merge : count 2 avg 15 +/- 5 min 10 max 20 sum 30
+c2 before merge : count 2 avg 85 +/- 5 min 80 max 90 sum 170
+mergedC1C2 : count 4 avg 50 +/- 35.36 min 10 max 90 sum 200
+c2 after merge : count 0 avg NaN +/- NaN min 0 max 0 sum 0
+mergedC2C1 : count 4 avg 50 +/- 35.36 min 10 max 90 sum 200
+c1 should now be empty : count 0 avg NaN +/- NaN min 0 max 0 sum 0
+c3 after merge - 1 : count 4 avg 50 +/- 35.36 min 10 max 90 sum 200
+c3 after merge - 2 : count 4 avg 50 +/- 35.36 min 10 max 90 sum 200
+`
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\tvs:\n%s", actual, expected)
+ }
+}
+
+func TestZeroDivider(t *testing.T) {
+ h := NewHistogram(0, 0)
+ if h != nil {
+ t.Errorf("Histogram can not be created when divider is zero")
+ }
+}
+
+func TestHistogram(t *testing.T) {
+ h := NewHistogram(0, 10)
+ h.Record(1)
+ h.Record(251)
+ h.Record(501)
+ h.Record(751)
+ h.Record(1001)
+ h.Print(os.Stdout, "TestHistogram", []float64{50})
+ e := h.Export()
+ for i := 25; i <= 100; i += 25 {
+ fmt.Printf("%d%% at %g\n", i, e.CalcPercentile(float64(i)))
+ }
+ var tests = []struct {
+ actual float64
+ expected float64
+ msg string
+ }{
+ {h.Avg(), 501, "avg"},
+ {e.CalcPercentile(-1), 1, "p-1"}, // not valid but should return min
+ {e.CalcPercentile(0), 1, "p0"},
+ {e.CalcPercentile(0.1), 1, "p0.1"},
+ {e.CalcPercentile(1), 1, "p1"},
+ {e.CalcPercentile(20), 1, "p20"}, // 20% = first point, 1st bucket is 1-10
+ {e.CalcPercentile(20.01), 250.025, "p20.01"}, // near beginning of bucket of 2nd pt
+ {e.CalcPercentile(39.99), 299.975, "p39.99"},
+ {e.CalcPercentile(40), 300, "p40"},
+ {e.CalcPercentile(50), 550, "p50"},
+ {e.CalcPercentile(75), 775, "p75"},
+ {e.CalcPercentile(90), 1000.5, "p90"},
+ {e.CalcPercentile(99), 1000.95, "p99"},
+ {e.CalcPercentile(99.9), 1000.995, "p99.9"},
+ {e.CalcPercentile(100), 1001, "p100"},
+ {e.CalcPercentile(101), 1001, "p101"},
+ }
+ for _, tst := range tests {
+ if tst.actual != tst.expected {
+ t.Errorf("%s: got %g, not as expected %g", tst.msg, tst.actual, tst.expected)
+ }
+ }
+}
+
+func TestPercentiles1(t *testing.T) {
+ h := NewHistogram(0, 10)
+ h.Record(10)
+ h.Record(20)
+ h.Record(30)
+ h.Print(os.Stdout, "TestPercentiles1", []float64{50})
+ e := h.Export()
+ for i := 0; i <= 100; i += 10 {
+ fmt.Printf("%d%% at %g\n", i, e.CalcPercentile(float64(i)))
+ }
+ var tests = []struct {
+ actual float64
+ expected float64
+ msg string
+ }{
+ {h.Avg(), 20, "avg"},
+ {e.CalcPercentile(-1), 10, "p-1"}, // not valid but should return min
+ {e.CalcPercentile(0), 10, "p0"},
+ {e.CalcPercentile(0.1), 10, "p0.1"},
+ {e.CalcPercentile(1), 10, "p1"},
+ {e.CalcPercentile(20), 10, "p20"}, // 20% = first point, 1st bucket is 10
+ {e.CalcPercentile(33.33), 10, "p33.33"}, // near beginning of bucket of 2nd pt
+ {e.CalcPercentile(100 / 3), 10, "p100/3"}, // near beginning of bucket of 2nd pt
+ {e.CalcPercentile(33.34), 10.002, "p33.34"}, // near beginning of bucket of 2nd pt
+ {e.CalcPercentile(50), 15, "p50"},
+ {e.CalcPercentile(66.66), 19.998, "p66.66"},
+ {e.CalcPercentile(100. * 2 / 3), 20, "p100*2/3"},
+ {e.CalcPercentile(66.67), 20.001, "p66.67"},
+ {e.CalcPercentile(75), 22.5, "p75"},
+ {e.CalcPercentile(99), 29.7, "p99"},
+ {e.CalcPercentile(99.9), 29.97, "p99.9"},
+ {e.CalcPercentile(100), 30, "p100"},
+ {e.CalcPercentile(101), 30, "p101"},
+ }
+ for _, tst := range tests {
+ actualRounded := float64(int64(tst.actual*100000+0.5)) / 100000.
+ if actualRounded != tst.expected {
+ t.Errorf("%s: got %g (%g), not as expected %g", tst.msg, actualRounded, tst.actual, tst.expected)
+ }
+ }
+}
+
+func TestHistogramData(t *testing.T) {
+ h := NewHistogram(0, 1)
+ h.Record(-1)
+ h.RecordN(0, 3)
+ h.Record(1)
+ h.Record(2)
+ h.Record(3)
+ h.Record(4)
+ h.RecordN(5, 2)
+ percs := []float64{0, 1, 10, 25, 40, 50, 60, 70, 80, 90, 99, 100}
+ e := h.Export().CalcPercentiles(percs)
+ e.Print(os.Stdout, "TestHistogramData")
+ CheckEquals(t, int64(10), e.Count, "10 data points")
+ CheckEquals(t, 1.9, e.Avg, "avg should be 2")
+ CheckEquals(t, e.Percentiles[0], Percentile{0, -1}, "p0 should be -1 (min)")
+ CheckEquals(t, e.Percentiles[1], Percentile{1, -1}, "p1 should be -1 (min)")
+ CheckEquals(t, e.Percentiles[2], Percentile{10, -1}, "p10 should be 1 (1/10 at min)")
+ CheckEquals(t, e.Percentiles[3], Percentile{25, -0.5}, "p25 should be half between -1 and 0")
+ CheckEquals(t, e.Percentiles[4], Percentile{40, 0}, "p40 should still be 0 (4/10 data pts at 0)")
+ CheckEquals(t, e.Percentiles[5], Percentile{50, 1}, "p50 should 1 (5th/10 point is 1)")
+ CheckEquals(t, e.Percentiles[6], Percentile{60, 2}, "p60 should 2 (6th/10 point is 2)")
+ CheckEquals(t, e.Percentiles[7], Percentile{70, 3}, "p70 should 3 (7th/10 point is 3)")
+ CheckEquals(t, e.Percentiles[8], Percentile{80, 4}, "p80 should 4 (8th/10 point is 4)")
+ CheckEquals(t, e.Percentiles[9], Percentile{90, 4.5}, "p90 should between 4 and 5 (2 points in bucket)")
+ CheckEquals(t, e.Percentiles[10], Percentile{99, 4.95}, "p99")
+ CheckEquals(t, e.Percentiles[11], Percentile{100, 5}, "p100 should 5 (10th/10 point is 5 and max is 5)")
+ h.Log("test multi count", percs)
+}
+
+// CheckEquals checks if actual == expect and fails the test and logs
+// failure (including filename:linenum if they are not equal).
+func CheckEquals(t *testing.T, actual interface{}, expected interface{}, msg interface{}) {
+ if expected != actual {
+ _, file, line, _ := runtime.Caller(1)
+ file = file[strings.LastIndex(file, "/")+1:]
+ fmt.Printf("%s:%d mismatch!\nactual:\n%+v\nexpected:\n%+v\nfor %+v\n", file, line, actual, expected, msg)
+ t.Fail()
+ }
+}
+
+func Assert(t *testing.T, cond bool, msg interface{}) {
+ if !cond {
+ _, file, line, _ := runtime.Caller(1)
+ file = file[strings.LastIndex(file, "/")+1:]
+ fmt.Printf("%s:%d assert failure: %+v\n", file, line, msg)
+ t.Fail()
+ }
+}
+
+// Checks properties that should be true for all non empty histograms
+func CheckGenericHistogramDataProperties(t *testing.T, e *HistogramData) {
+ n := len(e.Data)
+ if n <= 0 {
+ t.Error("Unexpected empty histogram")
+ return
+ }
+ CheckEquals(t, e.Data[0].Start, e.Min, "first bucket starts at min")
+ CheckEquals(t, e.Data[n-1].End, e.Max, "end of last bucket is max")
+ CheckEquals(t, e.Data[n-1].Percent, 100., "last bucket is 100%")
+ // All buckets in order
+ var prev Bucket
+ var sum int64
+ for i := 0; i < n; i++ {
+ b := e.Data[i]
+ Assert(t, b.Start <= b.End, "End should always be after Start")
+ Assert(t, b.Count > 0, "Every exported bucket should have data")
+ Assert(t, b.Percent > 0, "Percentage should always be positive")
+ sum += b.Count
+ if i > 0 {
+ Assert(t, b.Start >= prev.End, "Start of next bucket >= end of previous")
+ Assert(t, b.Percent > prev.Percent, "Percentage should be ever increasing")
+ }
+ prev = b
+ }
+ CheckEquals(t, sum, e.Count, "Sum in buckets should add up to Counter's count")
+}
+
+func TestHistogramExport1(t *testing.T) {
+ h := NewHistogram(0, 10)
+ e := h.Export()
+ CheckEquals(t, e.Count, int64(0), "empty is 0 count")
+ CheckEquals(t, len(e.Data), 0, "empty is no bucket data")
+ h.Record(-137.4)
+ h.Record(251)
+ h.Record(501)
+ h.Record(751)
+ h.Record(1001.67)
+ e = h.Export().CalcPercentiles([]float64{50, 99, 99.9})
+ CheckEquals(t, e.Count, int64(5), "count")
+ CheckEquals(t, e.Min, -137.4, "min")
+ CheckEquals(t, e.Max, 1001.67, "max")
+ n := len(e.Data)
+ CheckEquals(t, n, 5, "number of buckets")
+ CheckGenericHistogramDataProperties(t, e)
+ data, err := json.MarshalIndent(e, "", " ")
+ if err != nil {
+ t.Error(err)
+ }
+ CheckEquals(t, string(data), `{
+ "Count": 5,
+ "Min": -137.4,
+ "Max": 1001.67,
+ "Sum": 2367.27,
+ "Avg": 473.454,
+ "StdDev": 394.8242896074151,
+ "Data": [
+ {
+ "Start": -137.4,
+ "End": 0,
+ "Percent": 20,
+ "Count": 1
+ },
+ {
+ "Start": 250,
+ "End": 300,
+ "Percent": 40,
+ "Count": 1
+ },
+ {
+ "Start": 500,
+ "End": 600,
+ "Percent": 60,
+ "Count": 1
+ },
+ {
+ "Start": 700,
+ "End": 800,
+ "Percent": 80,
+ "Count": 1
+ },
+ {
+ "Start": 1000,
+ "End": 1001.67,
+ "Percent": 100,
+ "Count": 1
+ }
+ ],
+ "Percentiles": [
+ {
+ "Percentile": 50,
+ "Value": 550
+ },
+ {
+ "Percentile": 99,
+ "Value": 1001.5865
+ },
+ {
+ "Percentile": 99.9,
+ "Value": 1001.66165
+ }
+ ]
+}`, "Json output")
+}
+
+const (
+ NumRandomHistogram = 2000
+)
+
+func TestHistogramExportRandom(t *testing.T) {
+ for i := 0; i < NumRandomHistogram; i++ {
+ // offset [-500,500[ divisor ]0,100]
+ offset := (rand.Float64() - 0.5) * 1000
+ div := 100 * (1 - rand.Float64())
+ numEntries := 1 + rand.Int31n(10000)
+ //fmt.Printf("new histogram with offset %g, div %g - will insert %d entries\n", offset, div, numEntries)
+ h := NewHistogram(offset, div)
+ var n int32
+ var min float64
+ var max float64
+ for ; n < numEntries; n++ {
+ v := 3000 * (rand.Float64() - 0.25)
+ if n == 0 {
+ min = v
+ max = v
+ } else {
+ if v < min {
+ min = v
+ } else if v > max {
+ max = v
+ }
+ }
+ h.Record(v)
+ }
+ e := h.Export().CalcPercentiles([]float64{0, 50, 100})
+ CheckGenericHistogramDataProperties(t, e)
+ CheckEquals(t, h.Count, int64(numEntries), "num entries should match")
+ CheckEquals(t, h.Min, min, "Min should match")
+ CheckEquals(t, h.Max, max, "Max should match")
+ CheckEquals(t, e.Percentiles[0].Value, min, "p0 should be min")
+ CheckEquals(t, e.Percentiles[2].Value, max, "p100 should be max")
+ }
+}
+
+func TestHistogramLastBucket(t *testing.T) {
+ // Use -1 offset so first bucket is negative values
+ h := NewHistogram( /* offset */ -1 /*scale */, 1)
+ h.Record(-1)
+ h.Record(0)
+ h.Record(1)
+ h.Record(3)
+ h.Record(10)
+ h.Record(99999) // last value of one before last bucket 100k-offset
+ h.Record(100000) // first value of the extra bucket
+ h.Record(200000)
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ h.Print(w, "testLastBucket", []float64{90})
+ w.Flush() // nolint: errcheck
+ actual := b.String()
+ // stdev part is not verified/could be brittle
+ expected := `testLastBucket : count 8 avg 50001.5 +/- 7.071e+04 min -1 max 200000 sum 400012
+# range, mid point, percentile, count
+>= -1 <= -1 , -1 , 12.50, 1
+> -1 <= 0 , -0.5 , 25.00, 1
+> 0 <= 1 , 0.5 , 37.50, 1
+> 2 <= 3 , 2.5 , 50.00, 1
+> 9 <= 10 , 9.5 , 62.50, 1
+> 74999 <= 99999 , 87499 , 75.00, 1
+> 99999 <= 200000 , 150000 , 100.00, 2
+# target 90% 160000
+`
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\tvs:\n%s", actual, expected)
+ }
+}
+
+func TestHistogramNegativeNumbers(t *testing.T) {
+ h := NewHistogram( /* offset */ -10 /*scale */, 1)
+ h.Record(-10)
+ h.Record(10)
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ h.Print(w, "testHistogramWithNegativeNumbers", []float64{1, 50, 75})
+ w.Flush() // nolint: errcheck
+ actual := b.String()
+ // stdev part is not verified/could be brittle
+ expected := `testHistogramWithNegativeNumbers : count 2 avg 0 +/- 10 min -10 max 10 sum 0
+# range, mid point, percentile, count
+>= -10 <= -10 , -10 , 50.00, 1
+> 8 <= 10 , 9 , 100.00, 1
+# target 1% -10
+# target 50% -10
+# target 75% 9
+`
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\tvs:\n%s", actual, expected)
+ }
+}
+
+func TestMergeHistogramsWithDifferentScales(t *testing.T) {
+ tP := []float64{100.}
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ h1 := NewHistogram(0, 10)
+ h1.Record(20)
+ h1.Record(50)
+ h1.Record(90)
+ h2 := NewHistogram(2, 100)
+ h2.Record(30)
+ h2.Record(40)
+ h2.Record(50)
+ newH := Merge(h1, h2)
+ newH.Print(w, "h1 and h2 merged", tP)
+ w.Flush()
+ actual := b.String()
+ expected := `h1 and h2 merged : count 6 avg 46.666667 +/- 22.11 min 20 max 90 sum 280
+# range, mid point, percentile, count
+>= 20 <= 90 , 55 , 100.00, 6
+# target 100% 90
+`
+ if newH.Divider != h2.Divider {
+ t.Errorf("unexpected:\n%f\tvs:\n%f", newH.Divider, h2.Divider)
+ }
+ if newH.Offset != h1.Offset {
+ t.Errorf("unexpected:\n%f\tvs:\n%f", newH.Offset, h1.Offset)
+ }
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\tvs:\n%s", actual, expected)
+ }
+
+ b.Reset()
+ h3 := NewHistogram(5, 200)
+ h3.Record(10000)
+ h3.Record(5000)
+ h3.Record(9000)
+ h4 := NewHistogram(2, 100)
+ h4.Record(300)
+ h4.Record(400)
+ h4.Record(50)
+ newH = Merge(h3, h4)
+ newH.Print(w, "h3 and h4 merged", tP)
+ w.Flush()
+ actual = b.String()
+ expected = `h3 and h4 merged : count 6 avg 4125 +/- 4167 min 50 max 10000 sum 24750
+# range, mid point, percentile, count
+>= 50 <= 202 , 126 , 16.67, 1
+> 202 <= 402 , 302 , 50.00, 2
+> 5002 <= 6002 , 5502 , 66.67, 1
+> 8002 <= 9002 , 8502 , 83.33, 1
+> 9002 <= 10000 , 9501 , 100.00, 1
+# target 100% 10000
+`
+ if newH.Divider != h3.Divider {
+ t.Errorf("unexpected:\n%f\tvs:\n%f", newH.Divider, h3.Divider)
+ }
+ if newH.Offset != h4.Offset {
+ t.Errorf("unexpected:\n%f\tvs:\n%f", newH.Offset, h4.Offset)
+ }
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\tvs:\n%s", actual, expected)
+ }
+}
+
+func TestTransferHistogramWithDifferentScales(t *testing.T) {
+ tP := []float64{75.}
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ h1 := NewHistogram(2, 15)
+ h1.Record(30)
+ h1.Record(40)
+ h1.Record(50)
+ h2 := NewHistogram(0, 10)
+ h2.Record(20)
+ h2.Record(23)
+ h2.Record(90)
+ h1.Print(w, "h1 before merge", tP)
+ h2.Print(w, "h2 before merge", tP)
+ h1.Transfer(h2)
+ h1.Print(w, "merged h2 -> h1", tP)
+ h2.Print(w, "h2 should now be empty", tP)
+ w.Flush()
+ actual := b.String()
+ expected := `h1 before merge : count 3 avg 40 +/- 8.165 min 30 max 50 sum 120
+# range, mid point, percentile, count
+>= 30 <= 32 , 31 , 33.33, 1
+> 32 <= 47 , 39.5 , 66.67, 1
+> 47 <= 50 , 48.5 , 100.00, 1
+# target 75% 47.75
+h2 before merge : count 3 avg 44.333333 +/- 32.31 min 20 max 90 sum 133
+# range, mid point, percentile, count
+>= 20 <= 20 , 20 , 33.33, 1
+> 20 <= 30 , 25 , 66.67, 1
+> 80 <= 90 , 85 , 100.00, 1
+# target 75% 82.5
+merged h2 -> h1 : count 6 avg 42.166667 +/- 23.67 min 20 max 90 sum 253
+# range, mid point, percentile, count
+>= 20 <= 32 , 26 , 50.00, 3
+> 32 <= 47 , 39.5 , 66.67, 1
+> 47 <= 62 , 54.5 , 83.33, 1
+> 77 <= 90 , 83.5 , 100.00, 1
+# target 75% 54.5
+h2 should now be empty : no data
+`
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\tvs:\n%s", actual, expected)
+ }
+}
+
+func TestTransferHistogram(t *testing.T) {
+ tP := []float64{75}
+ var b bytes.Buffer
+ w := bufio.NewWriter(&b)
+ h1 := NewHistogram(0, 10)
+ h1.Record(10)
+ h1.Record(20)
+ h2 := NewHistogram(0, 10)
+ h2.Record(80)
+ h2.Record(90)
+ h1a := h1.Clone()
+ h1a.Record(50) // add extra pt to make sure h1a and h1 are distinct
+ h2a := h2.Clone()
+ h3 := NewHistogram(0, 10)
+ h1.Print(w, "h1 before merge", tP)
+ h2.Print(w, "h2 before merge", tP)
+ h1.Transfer(h2)
+ h1.Print(w, "merged h2 -> h1", tP)
+ h2.Print(w, "h2 after merge", tP)
+ // reverse (exercise min if)
+ h2a.Transfer(h1a)
+ h2a.Print(w, "merged h1a -> h2a", tP)
+ // test transfer into empty - min should be set
+ h3.Transfer(h1)
+ h1.Print(w, "h1 should now be empty", tP)
+ h3.Print(w, "h3 after merge - 1", tP)
+ // test empty transfer - shouldn't reset min/no-op
+ h3.Transfer(h2)
+ h3.Print(w, "h3 after merge - 2", tP)
+ w.Flush() // nolint: errcheck
+ actual := b.String()
+ expected := `h1 before merge : count 2 avg 15 +/- 5 min 10 max 20 sum 30
+# range, mid point, percentile, count
+>= 10 <= 10 , 10 , 50.00, 1
+> 10 <= 20 , 15 , 100.00, 1
+# target 75% 15
+h2 before merge : count 2 avg 85 +/- 5 min 80 max 90 sum 170
+# range, mid point, percentile, count
+>= 80 <= 80 , 80 , 50.00, 1
+> 80 <= 90 , 85 , 100.00, 1
+# target 75% 85
+merged h2 -> h1 : count 4 avg 50 +/- 35.36 min 10 max 90 sum 200
+# range, mid point, percentile, count
+>= 10 <= 10 , 10 , 25.00, 1
+> 10 <= 20 , 15 , 50.00, 1
+> 70 <= 80 , 75 , 75.00, 1
+> 80 <= 90 , 85 , 100.00, 1
+# target 75% 80
+h2 after merge : no data
+merged h1a -> h2a : count 5 avg 50 +/- 31.62 min 10 max 90 sum 250
+# range, mid point, percentile, count
+>= 10 <= 10 , 10 , 20.00, 1
+> 10 <= 20 , 15 , 40.00, 1
+> 40 <= 50 , 45 , 60.00, 1
+> 70 <= 80 , 75 , 80.00, 1
+> 80 <= 90 , 85 , 100.00, 1
+# target 75% 77.5
+h1 should now be empty : no data
+h3 after merge - 1 : count 4 avg 50 +/- 35.36 min 10 max 90 sum 200
+# range, mid point, percentile, count
+>= 10 <= 10 , 10 , 25.00, 1
+> 10 <= 20 , 15 , 50.00, 1
+> 70 <= 80 , 75 , 75.00, 1
+> 80 <= 90 , 85 , 100.00, 1
+# target 75% 80
+h3 after merge - 2 : count 4 avg 50 +/- 35.36 min 10 max 90 sum 200
+# range, mid point, percentile, count
+>= 10 <= 10 , 10 , 25.00, 1
+> 10 <= 20 , 15 , 50.00, 1
+> 70 <= 80 , 75 , 75.00, 1
+> 80 <= 90 , 85 , 100.00, 1
+# target 75% 80
+`
+ if actual != expected {
+ t.Errorf("unexpected:\n%s\tvs:\n%s", actual, expected)
+ }
+}
+
+func TestParsePercentiles(t *testing.T) {
+ var tests = []struct {
+ str string // input
+ list []float64 // expected
+ err bool
+ }{
+ // Good cases
+ {str: "99.9", list: []float64{99.9}},
+ {str: "1,2,3", list: []float64{1, 2, 3}},
+ {str: " 17, -5.3, 78 ", list: []float64{17, -5.3, 78}},
+ // Errors
+ {str: "", list: []float64{}, err: true},
+ {str: " ", list: []float64{}, err: true},
+ {str: "23,a,46", list: []float64{23}, err: true},
+ }
+ log.SetLogLevel(log.Debug) // for coverage
+ for _, tst := range tests {
+ actual, err := ParsePercentiles(tst.str)
+ if !reflect.DeepEqual(actual, tst.list) {
+ t.Errorf("ParsePercentiles got %#v expected %#v", actual, tst.list)
+ }
+ if (err != nil) != tst.err {
+ t.Errorf("ParsePercentiles got %v error while expecting err:%v for %s",
+ err, tst.err, tst.str)
+ }
+ }
+}
+
+func TestRound(t *testing.T) {
+ var tests = []struct {
+ input float64
+ expected float64
+ }{
+ {100.00001, 100},
+ {100.0001, 100.0001},
+ {99.9999999999, 100},
+ {100, 100},
+ {0.1234499, 0.1234},
+ {0.1234567, 0.1235},
+ {-0.0000049, 0},
+ {-0.499999, -0.5},
+ {-0.500001, -0.5},
+ {-0.999999, -1},
+ {-0.123449, -0.1234},
+ {-0.123450, -0.1234}, // should be -0.1235 but we don't deal with <0 specially
+ }
+ for _, tst := range tests {
+ if actual := Round(tst.input); actual != tst.expected {
+ t.Errorf("Got %f, expected %f for Round(%.10f)", actual, tst.expected, tst.input)
+ }
+ }
+}
+
+func TestNaN(t *testing.T) {
+ var c Counter
+ for i := 599713; i > 0; i-- {
+ c.Record(1281)
+ }
+ c.Log("counter with 599713 times 1281 - issue #97")
+ actual := c.StdDev()
+ if actual != 0.0 {
+ t.Errorf("Got %g, expected 0 for stddev/issue #97 c is %+v", actual, c)
+ }
+}
+
+func TestBucketLookUp(t *testing.T) {
+ var tests = []struct {
+ input float64 // input
+ start float64 // start
+ end float64 // end
+
+ }{
+ {input: 11, start: 10, end: 11},
+ {input: 171, start: 160, end: 180},
+ {input: 180, start: 160, end: 180},
+ {input: 801, start: 800, end: 900},
+ {input: 900, start: 800, end: 900},
+ {input: 999, start: 900, end: 1000},
+ {input: 999.99, start: 900, end: 1000},
+ {input: 1000, start: 900, end: 1000},
+ {input: 1000.01, start: 1000, end: 2000},
+ {input: 1001, start: 1000, end: 2000},
+ {input: 1001.1, start: 1000, end: 2000},
+ {input: 1999.99, start: 1000, end: 2000},
+ {input: 2000, start: 1000, end: 2000},
+ {input: 2001, start: 2000, end: 3000},
+ {input: 2001.1, start: 2000, end: 3000},
+ {input: 75000, start: 50000, end: 75000},
+ {input: 75000.01, start: 75000, end: 100000},
+ {input: 99999.99, start: 75000, end: 100000},
+ {input: 100000, start: 75000, end: 100000},
+ {input: 100000.01, start: 100000, end: 1e+06},
+ {input: 100000.99, start: 100000, end: 1e+06},
+ {input: 100001, start: 100000, end: 1e+06},
+ {input: 523477, start: 100000, end: 1e+06},
+ }
+ h := NewHistogram(0, 1)
+ for _, test := range tests {
+ h.Reset()
+ h.Record(1)
+ h.Record(1000000)
+ h.Record(test.input)
+ hData := h.Export()
+ hd := hData.Data[1]
+ if hd.Start != test.start || hd.End != test.end {
+ t.Errorf("Got %+v while expected %+v", hd, test)
+ }
+ }
+}
+
+func TestAllBucketBoundaries(t *testing.T) {
+ h := NewHistogram(0, 1)
+
+ for i, value := range histogramBucketValues {
+ v := float64(value)
+ h.Reset()
+ h.Record(-1)
+ h.RecordN(v-0.01, 50)
+ h.RecordN(v, 700) // so first interval gets a count of 750 + 1 for first
+ h.RecordN(v+0.01, 1003) // second interval gets a count of 1003 + 1 for last
+ h.Record(1e6)
+ hData := h.Export()
+ var firstInterval int64
+ if i == 0 {
+ firstInterval = 1 // fist interval is [min, 0[
+ }
+ var lastInterval int64
+ if i == len(histogramBucketValues)-1 {
+ lastInterval = 1
+ }
+ if hData.Data[1-firstInterval].End != v || hData.Data[1-firstInterval].Count != 750+firstInterval {
+ t.Errorf("= Boundary, got %+v unexpectedly for %d %g", hData, value, v)
+ }
+ if hData.Data[2-firstInterval].Start != v || hData.Data[2-firstInterval].Count != 1003+lastInterval {
+ t.Errorf("> Boundary, got %+v unexpectedly for %d %g", hData, value, v)
+ }
+ }
+}
+
+// TODO: add test with data 1.0 1.0001 1.999 2.0 2.5
+// should get 3 buckets 0-1 with count 1
+// 1-2 with count 3
+// 2-2.5 with count 1
+
+func BenchmarkBucketLookUpWithHighestValue(b *testing.B) {
+ testHistogram := NewHistogram(0, 1)
+ for i := 0; i < b.N; i++ {
+ testHistogram.Record(99999)
+ }
+}
+
+func BenchmarkBucketLookUpWithRandomValues(b *testing.B) {
+ testHistogram := NewHistogram(0, 1)
+ for i := 0; i < b.N; i++ {
+ testHistogram.Record(float64(rand.Intn(100000)))
+ }
+}
diff --git a/vendor/fortio.org/fortio/ui/static/img/favicon.ico b/vendor/fortio.org/fortio/ui/static/img/favicon.ico
new file mode 100644
index 0000000000..3c7300bd4d
Binary files /dev/null and b/vendor/fortio.org/fortio/ui/static/img/favicon.ico differ
diff --git a/vendor/fortio.org/fortio/ui/static/img/logo.svg b/vendor/fortio.org/fortio/ui/static/img/logo.svg
new file mode 100644
index 0000000000..8f2d3dcc9b
--- /dev/null
+++ b/vendor/fortio.org/fortio/ui/static/img/logo.svg
@@ -0,0 +1 @@
+
diff --git a/vendor/fortio.org/fortio/ui/static/js/Chart.min.js b/vendor/fortio.org/fortio/ui/static/js/Chart.min.js
new file mode 100644
index 0000000000..9b7cd9a263
--- /dev/null
+++ b/vendor/fortio.org/fortio/ui/static/js/Chart.min.js
@@ -0,0 +1,11 @@
+/*!
+ * Chart.js
+ * http://chartjs.org/
+ * Version: https://github.com/ldemailly/Chart.js/tree/fortio_min
+ * (Nov 25, 2017 master + PR 4198 - unused modules)
+ *
+ * Copyright 2017 Nick Downie
+ * Released under the MIT license
+ * https://github.com/chartjs/Chart.js/blob/master/LICENSE.md
+ */
+!function(t){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).Chart=t()}}(function(){return function t(e,i,n){function o(r,s){if(!i[r]){if(!e[r]){var l="function"==typeof require&&require;if(!s&&l)return l(r,!0);if(a)return a(r,!0);var u=new Error("Cannot find module '"+r+"'");throw u.code="MODULE_NOT_FOUND",u}var d=i[r]={exports:{}};e[r][0].call(d.exports,function(t){var i=e[r][1][t];return o(i||t)},d,d.exports,t,e,i,n)}return i[r].exports}for(var a="function"==typeof require&&require,r=0;ri?(e+.05)/(i+.05):(i+.05)/(e+.05)},level:function(t){var e=this.contrast(t);return e>=7.1?"AAA":e>=4.5?"AA":""},dark:function(){var t=this.values.rgb;return(299*t[0]+587*t[1]+114*t[2])/1e3<128},light:function(){return!this.dark()},negate:function(){for(var t=[],e=0;e<3;e++)t[e]=255-this.values.rgb[e];return this.setValues("rgb",t),this},lighten:function(t){var e=this.values.hsl;return e[2]+=e[2]*t,this.setValues("hsl",e),this},darken:function(t){var e=this.values.hsl;return e[2]-=e[2]*t,this.setValues("hsl",e),this},saturate:function(t){var e=this.values.hsl;return e[1]+=e[1]*t,this.setValues("hsl",e),this},desaturate:function(t){var e=this.values.hsl;return e[1]-=e[1]*t,this.setValues("hsl",e),this},whiten:function(t){var e=this.values.hwb;return e[1]+=e[1]*t,this.setValues("hwb",e),this},blacken:function(t){var e=this.values.hwb;return e[2]+=e[2]*t,this.setValues("hwb",e),this},greyscale:function(){var t=this.values.rgb,e=.3*t[0]+.59*t[1]+.11*t[2];return this.setValues("rgb",[e,e,e]),this},clearer:function(t){var e=this.values.alpha;return this.setValues("alpha",e-e*t),this},opaquer:function(t){var e=this.values.alpha;return this.setValues("alpha",e+e*t),this},rotate:function(t){var e=this.values.hsl,i=(e[0]+t)%360;return e[0]=i<0?360+i:i,this.setValues("hsl",e),this},mix:function(t,e){var i=t,n=void 0===e?.5:e,o=2*n-1,a=this.alpha()-i.alpha(),r=((o*a==-1?o:(o+a)/(1+o*a))+1)/2,s=1-r;return this.rgb(r*this.red()+s*i.red(),r*this.green()+s*i.green(),r*this.blue()+s*i.blue()).alpha(this.alpha()*n+i.alpha()*(1-n))},toJSON:function(){return this.rgb()},clone:function(){var t,e,i=new a,n=this.values,o=i.values;for(var r in n)n.hasOwnProperty(r)&&(t=n[r],"[object Array]"===(e={}.toString.call(t))?o[r]=t.slice(0):"[object Number]"===e?o[r]=t:console.error("unexpected color value:",t));return i}},a.prototype.spaces={rgb:["red","green","blue"],hsl:["hue","saturation","lightness"],hsv:["hue","saturation","value"],hwb:["hue","whiteness","blackness"],cmyk:["cyan","magenta","yellow","black"]},a.prototype.maxes={rgb:[255,255,255],hsl:[360,100,100],hsv:[360,100,100],hwb:[360,100,100],cmyk:[100,100,100,100]},a.prototype.getValues=function(t){for(var e=this.values,i={},n=0;n.04045?Math.pow((e+.055)/1.055,2.4):e/12.92)+.3576*(i=i>.04045?Math.pow((i+.055)/1.055,2.4):i/12.92)+.1805*(n=n>.04045?Math.pow((n+.055)/1.055,2.4):n/12.92)),100*(.2126*e+.7152*i+.0722*n),100*(.0193*e+.1192*i+.9505*n)]}function d(t){var e,i,n,o=u(t),a=o[0],r=o[1],s=o[2];return a/=95.047,r/=100,s/=108.883,a=a>.008856?Math.pow(a,1/3):7.787*a+16/116,r=r>.008856?Math.pow(r,1/3):7.787*r+16/116,s=s>.008856?Math.pow(s,1/3):7.787*s+16/116,e=116*r-16,i=500*(a-r),n=200*(r-s),[e,i,n]}function c(t){var e,i,n,o,a,r=t[0]/360,s=t[1]/100,l=t[2]/100;if(0==s)return a=255*l,[a,a,a];e=2*l-(i=l<.5?l*(1+s):l+s-l*s),o=[0,0,0];for(var u=0;u<3;u++)(n=r+1/3*-(u-1))<0&&n++,n>1&&n--,a=6*n<1?e+6*(i-e)*n:2*n<1?i:3*n<2?e+(i-e)*(2/3-n)*6:e,o[u]=255*a;return o}function h(t){var e=t[0]/60,i=t[1]/100,n=t[2]/100,o=Math.floor(e)%6,a=e-Math.floor(e),r=255*n*(1-i),s=255*n*(1-i*a),l=255*n*(1-i*(1-a)),n=255*n;switch(o){case 0:return[n,l,r];case 1:return[s,n,r];case 2:return[r,n,l];case 3:return[r,s,n];case 4:return[l,r,n];case 5:return[n,r,s]}}function f(t){var e,i,n,o,a=t[0]/360,s=t[1]/100,l=t[2]/100,u=s+l;switch(u>1&&(s/=u,l/=u),e=Math.floor(6*a),i=1-l,n=6*a-e,0!=(1&e)&&(n=1-n),o=s+n*(i-s),e){default:case 6:case 0:r=i,g=o,b=s;break;case 1:r=o,g=i,b=s;break;case 2:r=s,g=i,b=o;break;case 3:r=s,g=o,b=i;break;case 4:r=o,g=s,b=i;break;case 5:r=i,g=s,b=o}return[255*r,255*g,255*b]}function p(t){var e,i,n,o=t[0]/100,a=t[1]/100,r=t[2]/100,s=t[3]/100;return e=1-Math.min(1,o*(1-s)+s),i=1-Math.min(1,a*(1-s)+s),n=1-Math.min(1,r*(1-s)+s),[255*e,255*i,255*n]}function v(t){var e,i,n,o=t[0]/100,a=t[1]/100,r=t[2]/100;return e=3.2406*o+-1.5372*a+-.4986*r,i=-.9689*o+1.8758*a+.0415*r,n=.0557*o+-.204*a+1.057*r,e=e>.0031308?1.055*Math.pow(e,1/2.4)-.055:e*=12.92,i=i>.0031308?1.055*Math.pow(i,1/2.4)-.055:i*=12.92,n=n>.0031308?1.055*Math.pow(n,1/2.4)-.055:n*=12.92,e=Math.min(Math.max(0,e),1),i=Math.min(Math.max(0,i),1),n=Math.min(Math.max(0,n),1),[255*e,255*i,255*n]}function m(t){var e,i,n,o=t[0],a=t[1],r=t[2];return o/=95.047,a/=100,r/=108.883,o=o>.008856?Math.pow(o,1/3):7.787*o+16/116,a=a>.008856?Math.pow(a,1/3):7.787*a+16/116,r=r>.008856?Math.pow(r,1/3):7.787*r+16/116,e=116*a-16,i=500*(o-a),n=200*(a-r),[e,i,n]}function x(t){var e,i,n,o,a=t[0],r=t[1],s=t[2];return a<=8?o=(i=100*a/903.3)/100*7.787+16/116:(i=100*Math.pow((a+16)/116,3),o=Math.pow(i/100,1/3)),e=e/95.047<=.008856?e=95.047*(r/500+o-16/116)/7.787:95.047*Math.pow(r/500+o,3),n=n/108.883<=.008859?n=108.883*(o-s/200-16/116)/7.787:108.883*Math.pow(o-s/200,3),[e,i,n]}function y(t){var e,i,n,o=t[0],a=t[1],r=t[2];return e=Math.atan2(r,a),(i=360*e/2/Math.PI)<0&&(i+=360),n=Math.sqrt(a*a+r*r),[o,n,i]}function k(t){return v(x(t))}function w(t){var e,i,n,o=t[0],a=t[1];return n=t[2]/360*2*Math.PI,e=a*Math.cos(n),i=a*Math.sin(n),[o,e,i]}function S(t){return M[t]}e.exports={rgb2hsl:n,rgb2hsv:o,rgb2hwb:a,rgb2cmyk:s,rgb2keyword:l,rgb2xyz:u,rgb2lab:d,rgb2lch:function(t){return y(d(t))},hsl2rgb:c,hsl2hsv:function(t){var e,i,n=t[0],o=t[1]/100,a=t[2]/100;return 0===a?[0,0,0]:(a*=2,o*=a<=1?a:2-a,i=(a+o)/2,e=2*o/(a+o),[n,100*e,100*i])},hsl2hwb:function(t){return a(c(t))},hsl2cmyk:function(t){return s(c(t))},hsl2keyword:function(t){return l(c(t))},hsv2rgb:h,hsv2hsl:function(t){var e,i,n=t[0],o=t[1]/100,a=t[2]/100;return i=(2-o)*a,e=o*a,e/=i<=1?i:2-i,e=e||0,i/=2,[n,100*e,100*i]},hsv2hwb:function(t){return a(h(t))},hsv2cmyk:function(t){return s(h(t))},hsv2keyword:function(t){return l(h(t))},hwb2rgb:f,hwb2hsl:function(t){return n(f(t))},hwb2hsv:function(t){return o(f(t))},hwb2cmyk:function(t){return s(f(t))},hwb2keyword:function(t){return l(f(t))},cmyk2rgb:p,cmyk2hsl:function(t){return n(p(t))},cmyk2hsv:function(t){return o(p(t))},cmyk2hwb:function(t){return a(p(t))},cmyk2keyword:function(t){return l(p(t))},keyword2rgb:S,keyword2hsl:function(t){return n(S(t))},keyword2hsv:function(t){return o(S(t))},keyword2hwb:function(t){return a(S(t))},keyword2cmyk:function(t){return s(S(t))},keyword2lab:function(t){return d(S(t))},keyword2xyz:function(t){return u(S(t))},xyz2rgb:v,xyz2lab:m,xyz2lch:function(t){return y(m(t))},lab2xyz:x,lab2rgb:k,lab2lch:y,lch2lab:w,lch2xyz:function(t){return x(w(t))},lch2rgb:function(t){return k(w(t))}};var M={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]},C={};for(var _ in M)C[JSON.stringify(M[_])]=_},{}],4:[function(t,e,i){var n=t(3),o=function(){return new u};for(var a in n){o[a+"Raw"]=function(t){return function(e){return"number"==typeof e&&(e=Array.prototype.slice.call(arguments)),n[t](e)}}(a);var r=/(\w+)2(\w+)/.exec(a),s=r[1],l=r[2];(o[s]=o[s]||{})[l]=o[a]=function(t){return function(e){"number"==typeof e&&(e=Array.prototype.slice.call(arguments));var i=n[t](e);if("string"==typeof i||void 0===i)return i;for(var o=0;o0&&(t[0].yLabel?i=t[0].yLabel:e.labels.length>0&&t[0].index=0&&o>0)&&(p+=o));return a=d.getPixelForValue(p),r=d.getPixelForValue(p+h),s=(r-a)/2,{size:s,base:a,head:r,center:r+s/2}},calculateBarIndexPixels:function(t,e,i){var n,o,r,s,l,u,d=i.scale.options,c=this.getMeta(),h=this.getStackIndex(t,c.stack),f=i.pixels,g=f[e],p=f.length,v=i.start,m=i.end;return 1===p?(n=g>v?g-v:m-g,o=g0&&(n=(g-f[e-1])/2,e===p-1&&(o=n)),e1&&(i=Math.floor(t.dropFrames),t.dropFrames=t.dropFrames%1),t.advance(1+i);var n=Date.now();t.dropFrames+=(n-e)/t.frameDuration,t.animations.length>0&&t.requestAnimationFrame()},advance:function(t){for(var e,i,n=this.animations,o=0;o=e.numSteps?(a.callback(e.onAnimationComplete,[e],i),i.animating=!1,n.splice(o,1)):++o}},Object.defineProperty(t.Animation.prototype,"animationObject",{get:function(){return this}}),Object.defineProperty(t.Animation.prototype,"chartInstance",{get:function(){return this.chart},set:function(t){this.chart=t}})}},{14:14,15:15,34:34}],12:[function(t,e,i){"use strict";var n=t(14),o=t(34),a=t(17),r=t(37);e.exports=function(t){function e(t){return"top"===t||"bottom"===t}var i=t.plugins;t.types={},t.instances={},t.controllers={},o.extend(t.prototype,{construct:function(e,i){var a=this;i=function(t){var e=(t=t||{}).data=t.data||{};return e.datasets=e.datasets||[],e.labels=e.labels||[],t.options=o.configMerge(n.global,n[t.type],t.options||{}),t}(i);var s=r.acquireContext(e,i),l=s&&s.canvas,u=l&&l.height,d=l&&l.width;a.id=o.uid(),a.ctx=s,a.canvas=l,a.config=i,a.width=d,a.height=u,a.aspectRatio=u?d/u:null,a.options=i.options,a._bufferedRender=!1,a.chart=a,a.controller=a,t.instances[a.id]=a,Object.defineProperty(a,"data",{get:function(){return a.config.data},set:function(t){a.config.data=t}}),s&&l?(a.initialize(),a.update()):console.error("Failed to create chart: can't acquire context from the given item")},initialize:function(){var t=this;return i.notify(t,"beforeInit"),o.retinaScale(t,t.options.devicePixelRatio),t.bindEvents(),t.options.responsive&&t.resize(!0),t.ensureScalesHaveIDs(),t.buildOrUpdateScales(),t.initToolTip(),i.notify(t,"afterInit"),t},clear:function(){return o.canvas.clear(this),this},stop:function(){return t.animationService.cancelAnimation(this),this},resize:function(t){var e=this,n=e.options,a=e.canvas,r=n.maintainAspectRatio&&e.aspectRatio||null,s=Math.max(0,Math.floor(o.getMaximumWidth(a))),l=Math.max(0,Math.floor(r?s/r:o.getMaximumHeight(a)));if((e.width!==s||e.height!==l)&&(a.width=e.width=s,a.height=e.height=l,a.style.width=s+"px",a.style.height=l+"px",o.retinaScale(e,n.devicePixelRatio),!t)){var u={width:s,height:l};i.notify(e,"resize",[u]),e.options.onResize&&e.options.onResize(e,u),e.stop(),e.update(e.options.responsiveAnimationDuration)}},ensureScalesHaveIDs:function(){var t=this.options,e=t.scales||{},i=t.scale;o.each(e.xAxes,function(t,e){t.id=t.id||"x-axis-"+e}),o.each(e.yAxes,function(t,e){t.id=t.id||"y-axis-"+e}),i&&(i.id=i.id||"scale")},buildOrUpdateScales:function(){var i=this,n=i.options,a=i.scales||{},r=[],s=Object.keys(a).reduce(function(t,e){return t[e]=!1,t},{});n.scales&&(r=r.concat((n.scales.xAxes||[]).map(function(t){return{options:t,dtype:"category",dposition:"bottom"}}),(n.scales.yAxes||[]).map(function(t){return{options:t,dtype:"linear",dposition:"left"}}))),n.scale&&r.push({options:n.scale,dtype:"radialLinear",isDefault:!0,dposition:"chartArea"}),o.each(r,function(n){var r=n.options,l=r.id,u=o.valueOrDefault(r.type,n.dtype);e(r.position)!==e(n.dposition)&&(r.position=n.dposition),s[l]=!0;var d=null;if(l in a&&a[l].type===u)(d=a[l]).options=r,d.ctx=i.ctx,d.chart=i;else{var c=t.scaleService.getScaleConstructor(u);if(!c)return;d=new c({id:l,type:u,options:r,ctx:i.ctx,chart:i}),a[d.id]=d}d.mergeTicksOptions(),n.isDefault&&(i.scale=d)}),o.each(s,function(t,e){t||delete a[e]}),i.scales=a,t.scaleService.addScalesToLayout(this)},buildOrUpdateControllers:function(){var e=this,i=[],n=[];return o.each(e.data.datasets,function(o,a){var r=e.getDatasetMeta(a),s=o.type||e.config.type;if(r.type&&r.type!==s&&(e.destroyDatasetMeta(a),r=e.getDatasetMeta(a)),r.type=s,i.push(r.type),r.controller)r.controller.updateIndex(a),r.controller.linkScales();else{var l=t.controllers[r.type];if(void 0===l)throw new Error('"'+r.type+'" is not a chart type.');r.controller=new l(e,a),n.push(r.controller)}},e),n},resetElements:function(){var t=this;o.each(t.data.datasets,function(e,i){t.getDatasetMeta(i).controller.reset()},t)},reset:function(){this.resetElements(),this.tooltip.initialize()},update:function(e){var n=this;if(e&&"object"==typeof e||(e={duration:e,lazy:arguments[1]}),function(e){var i=e.options;o.each(e.scales,function(i){t.layoutService.removeBox(e,i)}),i=o.configMerge(t.defaults.global,t.defaults[e.config.type],i),e.options=e.config.options=i,e.ensureScalesHaveIDs(),e.buildOrUpdateScales(),e.tooltip._options=i.tooltips,e.tooltip.initialize()}(n),!1!==i.notify(n,"beforeUpdate")){n.tooltip._data=n.data;var a=n.buildOrUpdateControllers();o.each(n.data.datasets,function(t,e){n.getDatasetMeta(e).controller.buildOrUpdateElements()},n),n.updateLayout(),n.options.animation&&n.options.animation.duration&&o.each(a,function(t){t.reset()}),n.updateDatasets(),n.tooltip.initialize(),n.lastActive=[],i.notify(n,"afterUpdate"),n._bufferedRender?n._bufferedRequest={duration:e.duration,easing:e.easing,lazy:e.lazy}:n.render(e)}},updateLayout:function(){!1!==i.notify(this,"beforeLayout")&&(t.layoutService.update(this,this.width,this.height),i.notify(this,"afterScaleUpdate"),i.notify(this,"afterLayout"))},updateDatasets:function(){if(!1!==i.notify(this,"beforeDatasetsUpdate")){for(var t=0,e=this.data.datasets.length;t=0;--n)e.isDatasetVisible(n)&&e.drawDataset(n,t);i.notify(e,"afterDatasetsDraw",[t])}},drawDataset:function(t,e){var n=this.getDatasetMeta(t),o={meta:n,index:t,easingValue:e};!1!==i.notify(this,"beforeDatasetDraw",[o])&&(n.controller.draw(e),i.notify(this,"afterDatasetDraw",[o]))},_drawTooltip:function(t){var e=this.tooltip,n={tooltip:e,easingValue:t};!1!==i.notify(this,"beforeTooltipDraw",[n])&&(e.draw(),i.notify(this,"afterTooltipDraw",[n]))},getElementAtEvent:function(t){return a.modes.single(this,t)},getElementsAtEvent:function(t){return a.modes.label(this,t,{intersect:!0})},getElementsAtXAxis:function(t){return a.modes["x-axis"](this,t,{intersect:!0})},getElementsAtEventForMode:function(t,e,i){var n=a.modes[e];return"function"==typeof n?n(this,t,i):[]},getDatasetAtEvent:function(t){return a.modes.dataset(this,t,{intersect:!0})},getDatasetMeta:function(t){var e=this.data.datasets[t];e._meta||(e._meta={});var i=e._meta[this.id];return i||(i=e._meta[this.id]={type:null,data:[],dataset:null,controller:null,hidden:null,xAxisID:null,yAxisID:null}),i},getVisibleDatasetCount:function(){for(var t=0,e=0,i=this.data.datasets.length;e0||(i.forEach(function(e){delete t[e]}),delete t._chartjs)}}var i=["push","pop","shift","splice","unshift"];t.DatasetController=function(t,e){this.initialize(t,e)},n.extend(t.DatasetController.prototype,{datasetElementType:null,dataElementType:null,initialize:function(t,e){this.chart=t,this.index=e,this.linkScales(),this.addElements()},updateIndex:function(t){this.index=t},linkScales:function(){var t=this,e=t.getMeta(),i=t.getDataset();null!==e.xAxisID&&e.xAxisID in t.chart.scales||(e.xAxisID=i.xAxisID||t.chart.options.scales.xAxes[0].id),null!==e.yAxisID&&e.yAxisID in t.chart.scales||(e.yAxisID=i.yAxisID||t.chart.options.scales.yAxes[0].id)},getDataset:function(){return this.chart.data.datasets[this.index]},getMeta:function(){return this.chart.getDatasetMeta(this.index)},getScaleForId:function(t){return this.chart.scales[t]},reset:function(){this.update(!0)},destroy:function(){this._data&&e(this._data,this)},createMetaDataset:function(){var t=this.datasetElementType;return t&&new t({_chart:this.chart,_datasetIndex:this.index})},createMetaData:function(t){var e=this.dataElementType;return e&&new e({_chart:this.chart,_datasetIndex:this.index,_index:t})},addElements:function(){var t,e,i=this.getMeta(),n=this.getDataset().data||[],o=i.data;for(t=0,e=n.length;ti&&this.insertElements(i,n-i)},insertElements:function(t,e){for(var i=0;i=i[e].length&&i[e].push({}),!i[e][r].type||l.type&&l.type!==i[e][r].type?a.merge(i[e][r],[t.scaleService.getScaleDefaults(s),l]):a.merge(i[e][r],l)}else a._merger(e,i,n,o)}})},a.where=function(t,e){if(a.isArray(t)&&Array.prototype.filter)return t.filter(e);var i=[];return a.each(t,function(t){e(t)&&i.push(t)}),i},a.findIndex=Array.prototype.findIndex?function(t,e,i){return t.findIndex(e,i)}:function(t,e,i){i=void 0===i?t:i;for(var n=0,o=t.length;n=0;n--){var o=t[n];if(e(o))return o}},a.isNumber=function(t){return!isNaN(parseFloat(t))&&isFinite(t)},a.almostEquals=function(t,e,i){return Math.abs(t-e)t},a.max=function(t){return t.reduce(function(t,e){return isNaN(e)?t:Math.max(t,e)},Number.NEGATIVE_INFINITY)},a.min=function(t){return t.reduce(function(t,e){return isNaN(e)?t:Math.min(t,e)},Number.POSITIVE_INFINITY)},a.sign=Math.sign?function(t){return Math.sign(t)}:function(t){return 0==(t=+t)||isNaN(t)?t:t>0?1:-1},a.log10=Math.log10?function(t){return Math.log10(t)}:function(t){return Math.log(t)/Math.LN10},a.toRadians=function(t){return t*(Math.PI/180)},a.toDegrees=function(t){return t*(180/Math.PI)},a.getAngleFromPoint=function(t,e){var i=e.x-t.x,n=e.y-t.y,o=Math.sqrt(i*i+n*n),a=Math.atan2(n,i);return a<-.5*Math.PI&&(a+=2*Math.PI),{angle:a,distance:o}},a.distanceBetweenPoints=function(t,e){return Math.sqrt(Math.pow(e.x-t.x,2)+Math.pow(e.y-t.y,2))},a.aliasPixel=function(t){return t%2==0?0:.5},a.splineCurve=function(t,e,i,n){var o=t.skip?e:t,a=e,r=i.skip?e:i,s=Math.sqrt(Math.pow(a.x-o.x,2)+Math.pow(a.y-o.y,2)),l=Math.sqrt(Math.pow(r.x-a.x,2)+Math.pow(r.y-a.y,2)),u=s/(s+l),d=l/(s+l),c=n*(u=isNaN(u)?0:u),h=n*(d=isNaN(d)?0:d);return{previous:{x:a.x-c*(r.x-o.x),y:a.y-c*(r.y-o.y)},next:{x:a.x+h*(r.x-o.x),y:a.y+h*(r.y-o.y)}}},a.EPSILON=Number.EPSILON||1e-14,a.splineCurveMonotone=function(t){var e,i,n,o,r=(t||[]).map(function(t){return{model:t._model,deltaK:0,mK:0}}),s=r.length;for(e=0;e0?r[e-1]:null,(o=e0?r[e-1]:null,o=e=t.length-1?t[0]:t[e+1]:e>=t.length-1?t[t.length-1]:t[e+1]},a.previousItem=function(t,e,i){return i?e<=0?t[t.length-1]:t[e-1]:e<=0?t[0]:t[e-1]},a.niceNum=function(t,e){var i=Math.floor(a.log10(t)),n=t/Math.pow(10,i);return(e?n<1.5?1:n<3?2:n<7?5:10:n<=1?1:n<=2?2:n<=5?5:10)*Math.pow(10,i)},a.requestAnimFrame="undefined"==typeof window?function(t){t()}:window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(t){return window.setTimeout(t,1e3/60)},a.getRelativePosition=function(t,e){var i,n,o=t.originalEvent||t,r=t.currentTarget||t.srcElement,s=r.getBoundingClientRect(),l=o.touches;l&&l.length>0?(i=l[0].clientX,n=l[0].clientY):(i=o.clientX,n=o.clientY);var u=parseFloat(a.getStyle(r,"padding-left")),d=parseFloat(a.getStyle(r,"padding-top")),c=parseFloat(a.getStyle(r,"padding-right")),h=parseFloat(a.getStyle(r,"padding-bottom")),f=s.right-s.left-u-c,g=s.bottom-s.top-d-h;return i=Math.round((i-s.left-u)/f*r.width/e.currentDevicePixelRatio),n=Math.round((n-s.top-d)/g*r.height/e.currentDevicePixelRatio),{x:i,y:n}},a.getConstraintWidth=function(t){return r(t,"max-width","clientWidth")},a.getConstraintHeight=function(t){return r(t,"max-height","clientHeight")},a.getMaximumWidth=function(t){var e=t.parentNode;if(!e)return t.clientWidth;var i=parseInt(a.getStyle(e,"padding-left"),10),n=parseInt(a.getStyle(e,"padding-right"),10),o=e.clientWidth-i-n,r=a.getConstraintWidth(t);return isNaN(r)?o:Math.min(o,r)},a.getMaximumHeight=function(t){var e=t.parentNode;if(!e)return t.clientHeight;var i=parseInt(a.getStyle(e,"padding-top"),10),n=parseInt(a.getStyle(e,"padding-bottom"),10),o=e.clientHeight-i-n,r=a.getConstraintHeight(t);return isNaN(r)?o:Math.min(o,r)},a.getStyle=function(t,e){return t.currentStyle?t.currentStyle[e]:document.defaultView.getComputedStyle(t,null).getPropertyValue(e)},a.retinaScale=function(t,e){var i=t.currentDevicePixelRatio=e||window.devicePixelRatio||1;if(1!==i){var n=t.canvas,o=t.height,a=t.width;n.height=o*i,n.width=a*i,t.ctx.scale(i,i),n.style.height||n.style.width||(n.style.height=o+"px",n.style.width=a+"px")}},a.fontString=function(t,e,i){return e+" "+t+"px "+i},a.longestText=function(t,e,i,n){var o=(n=n||{}).data=n.data||{},r=n.garbageCollect=n.garbageCollect||[];n.font!==e&&(o=n.data={},r=n.garbageCollect=[],n.font=e),t.font=e;var s=0;a.each(i,function(e){void 0!==e&&null!==e&&!0!==a.isArray(e)?s=a.measureText(t,o,r,s,e):a.isArray(e)&&a.each(e,function(e){void 0===e||null===e||a.isArray(e)||(s=a.measureText(t,o,r,s,e))})});var l=r.length/2;if(l>i.length){for(var u=0;un&&(n=a),n},a.numberOfLabelLines=function(t){var e=1;return a.each(t,function(t){a.isArray(t)&&t.length>e&&(e=t.length)}),e},a.color=n?function(t){return t instanceof CanvasGradient&&(t=o.global.defaultColor),n(t)}:function(t){return console.error("Color.js not found!"),t},a.getHoverColor=function(t){return t instanceof CanvasPattern?t:a.color(t).saturate(.5).darken(.1).rgbString()}}},{14:14,2:2,34:34}],17:[function(t,e,i){"use strict";function n(t,e){return t.native?{x:t.x,y:t.y}:u.getRelativePosition(t,e)}function o(t,e){var i,n,o,a,r;for(n=0,a=t.data.datasets.length;n0&&(u=t.getDatasetMeta(u[0]._datasetIndex).data),u},"x-axis":function(t,e){return l(t,e,{intersect:!1})},point:function(t,e){return a(t,n(e,t))},nearest:function(t,e,i){var o=n(e,t);i.axis=i.axis||"xy";var a=s(i.axis),l=r(t,o,i.intersect,a);return l.length>1&&l.sort(function(t,e){var i=t.getArea()-e.getArea();return 0===i&&(i=t._datasetIndex-e._datasetIndex),i}),l.slice(0,1)},x:function(t,e,i){var a=n(e,t),r=[],s=!1;return o(t,function(t){t.inXRange(a.x)&&r.push(t),t.inRange(a.x,a.y)&&(s=!0)}),i.intersect&&!s&&(r=[]),r},y:function(t,e,i){var a=n(e,t),r=[],s=!1;return o(t,function(t){t.inYRange(a.y)&&r.push(t),t.inRange(a.x,a.y)&&(s=!0)}),i.intersect&&!s&&(r=[]),r}}}},{34:34}],18:[function(t,e,i){"use strict";t(14)._set("global",{responsive:!0,responsiveAnimationDuration:0,maintainAspectRatio:!0,events:["mousemove","mouseout","click","touchstart","touchmove"],hover:{onHover:null,mode:"nearest",intersect:!0,animationDuration:400},onClick:null,defaultColor:"rgba(0,0,0,0.1)",defaultFontColor:"#666",defaultFontFamily:"'Helvetica Neue', 'Helvetica', 'Arial', sans-serif",defaultFontSize:12,defaultFontStyle:"normal",showLines:!0,elements:{},layout:{padding:{top:0,right:0,bottom:0,left:0}}}),e.exports=function(){var t=function(t,e){return this.construct(t,e),this};return t.Chart=t,t}},{14:14}],19:[function(t,e,i){"use strict";var n=t(34);e.exports=function(t){function e(t,e){return n.where(t,function(t){return t.position===e})}function i(t,e){t.forEach(function(t,e){return t._tmpIndex_=e,t}),t.sort(function(t,i){var n=e?i:t,o=e?t:i;return n.weight===o.weight?n._tmpIndex_-o._tmpIndex_:n.weight-o.weight}),t.forEach(function(t){delete t._tmpIndex_})}t.layoutService={defaults:{},addBox:function(t,e){t.boxes||(t.boxes=[]),e.fullWidth=e.fullWidth||!1,e.position=e.position||"top",e.weight=e.weight||0,t.boxes.push(e)},removeBox:function(t,e){var i=t.boxes?t.boxes.indexOf(e):-1;-1!==i&&t.boxes.splice(i,1)},configure:function(t,e,i){for(var n,o=["fullWidth","position","weight"],a=o.length,r=0;r h&<.maxHeight){l--;break}l++,c=u*d}t.labelRotation=l},afterCalculateTickRotation:function(){s.callback(this.options.afterCalculateTickRotation,[this])},beforeFit:function(){s.callback(this.options.beforeFit,[this])},fit:function(){var t=this,o=t.minSize={width:0,height:0},a=n(t._ticks),r=t.options,u=r.ticks,d=r.scaleLabel,c=r.gridLines,h=r.display,f=t.isHorizontal(),g=i(u),p=r.gridLines.tickMarkLength;if(o.width=f?t.isFullWidth()?t.maxWidth-t.margins.left-t.margins.right:t.maxWidth:h&&c.drawTicks?p:0,o.height=f?h&&c.drawTicks?p:0:t.maxHeight,d.display&&h){var v=l(d)+s.options.toPadding(d.padding).height;f?o.height+=v:o.width+=v}if(u.display&&h){var m=s.longestText(t.ctx,g.font,a,t.longestTextCache),b=s.numberOfLabelLines(a),x=.5*g.size,y=t.options.ticks.padding;if(f){t.longestLabelWidth=m;var k=s.toRadians(t.labelRotation),w=Math.cos(k),S=Math.sin(k)*m+g.size*b+x*(b-1)+x;o.height=Math.min(t.maxHeight,o.height+S+y),t.ctx.font=g.font;var M=e(t.ctx,a[0],g.font),C=e(t.ctx,a[a.length-1],g.font);0!==t.labelRotation?(t.paddingLeft="bottom"===r.position?w*M+3:w*x+3,t.paddingRight="bottom"===r.position?w*x+3:w*C+3):(t.paddingLeft=M/2+3,t.paddingRight=C/2+3)}else u.mirror?m=0:m+=y+x,o.width=Math.min(t.maxWidth,o.width+m),t.paddingTop=g.size/2,t.paddingBottom=g.size/2}t.handleMargins(),t.width=o.width,t.height=o.height},handleMargins:function(){var t=this;t.margins&&(t.paddingLeft=Math.max(t.paddingLeft-t.margins.left,0),t.paddingTop=Math.max(t.paddingTop-t.margins.top,0),t.paddingRight=Math.max(t.paddingRight-t.margins.right,0),t.paddingBottom=Math.max(t.paddingBottom-t.margins.bottom,0))},afterFit:function(){s.callback(this.options.afterFit,[this])},isHorizontal:function(){return"top"===this.options.position||"bottom"===this.options.position},isFullWidth:function(){return this.options.fullWidth},getRightValue:function(t){if(s.isNullOrUndef(t))return NaN;if("number"==typeof t&&!isFinite(t))return NaN;if(t)if(this.isHorizontal()){if(void 0!==t.x)return this.getRightValue(t.x)}else if(void 0!==t.y)return this.getRightValue(t.y);return t},getLabelForIndex:s.noop,getPixelForValue:s.noop,getValueForPixel:s.noop,getPixelForTick:function(t){var e=this,i=e.options.offset;if(e.isHorizontal()){var n=(e.width-(e.paddingLeft+e.paddingRight))/Math.max(e._ticks.length-(i?0:1),1),o=n*t+e.paddingLeft;i&&(o+=n/2);var a=e.left+Math.round(o);return a+=e.isFullWidth()?e.margins.left:0}var r=e.height-(e.paddingTop+e.paddingBottom);return e.top+t*(r/(e._ticks.length-1))},getPixelForDecimal:function(t){var e=this;if(e.isHorizontal()){var i=(e.width-(e.paddingLeft+e.paddingRight))*t+e.paddingLeft,n=e.left+Math.round(i);return n+=e.isFullWidth()?e.margins.left:0}return e.top+t*e.height},getBasePixel:function(){return this.getPixelForValue(this.getBaseValue())},getBaseValue:function(){var t=this.min,e=this.max;return this.beginAtZero?0:t<0&&e<0?e:t>0&&e>0?t:0},_autoSkip:function(t){var e,i,n,o,a=this,r=a.isHorizontal(),l=a.options.ticks.minor,u=t.length,d=s.toRadians(a.labelRotation),c=Math.cos(d),h=a.longestLabelWidth*c,f=[];for(l.maxTicksLimit&&(o=l.maxTicksLimit),r&&(e=!1,(h+l.autoSkipPadding)*u>a.width-(a.paddingLeft+a.paddingRight)&&(e=1+Math.floor((h+l.autoSkipPadding)*u/(a.width-(a.paddingLeft+a.paddingRight)))),o&&u>o&&(e=Math.max(e,Math.floor(u/o)))),i=0;i1&&i%e>0||i%e==0&&i+e>=u)&&i!==u-1&&delete n.label,f.push(n);return f},draw:function(t){var e=this,n=e.options;if(n.display){var r=e.ctx,u=a.global,d=n.ticks.minor,c=n.ticks.major||d,h=n.gridLines,f=n.scaleLabel,g=0!==e.labelRotation,p=e.isHorizontal(),v=d.autoSkip?e._autoSkip(e.getTicks()):e.getTicks(),m=s.valueOrDefault(d.fontColor,u.defaultFontColor),b=i(d),x=s.valueOrDefault(c.fontColor,u.defaultFontColor),y=i(c),k=h.drawTicks?h.tickMarkLength:0,w=s.valueOrDefault(f.fontColor,u.defaultFontColor),S=i(f),M=s.options.toPadding(f.padding),C=s.toRadians(e.labelRotation),_=[],D="right"===n.position?e.left:e.right-k,A="right"===n.position?e.left+k:e.right,I="bottom"===n.position?e.top:e.bottom-k,P="bottom"===n.position?e.top+k:e.bottom;if(s.each(v,function(i,a){if(!s.isNullOrUndef(i.label)){var r,l,c,f,m=i.label;a===e.zeroLineIndex&&n.offset===h.offsetGridLines?(r=h.zeroLineWidth,l=h.zeroLineColor,c=h.zeroLineBorderDash,f=h.zeroLineBorderDashOffset):(r=s.valueAtIndexOrDefault(h.lineWidth,a),l=s.valueAtIndexOrDefault(h.color,a),c=s.valueOrDefault(h.borderDash,u.borderDash),f=s.valueOrDefault(h.borderDashOffset,u.borderDashOffset));var b,x,y,w,S,M,T,F,O,L,z="middle",R="middle",B=d.padding;if(p){var N=k+B;"bottom"===n.position?(R=g?"middle":"top",z=g?"right":"center",L=e.top+N):(R=g?"middle":"bottom",z=g?"left":"center",L=e.bottom-N);var W=o(e,a,h.offsetGridLines&&v.length>1);W1);E0)i=t.stepSize;else{var a=n.niceNum(e.max-e.min,!1);i=n.niceNum(a/(t.maxTicks-1),!0)}var r=Math.floor(e.min/i)*i,s=Math.ceil(e.max/i)*i;t.min&&t.max&&t.stepSize&&n.almostWhole((t.max-t.min)/t.stepSize,i/1e3)&&(r=t.min,s=t.max);var l=(s-r)/i;l=n.almostEquals(l,Math.round(l),i/1e3)?Math.round(l):Math.ceil(l),o.push(void 0!==t.min?t.min:r);for(var u=1;u3?i[2]-i[1]:i[1]-i[0];Math.abs(o)>1&&t!==Math.floor(t)&&(o=t-Math.floor(t));var a=n.log10(Math.abs(o)),r="";if(0!==t){var s=-1*Math.floor(a);s=Math.max(Math.min(s,20),0),r=t.toFixed(s)}else r="0";return r},logarithmic:function(t,e,i){var o=t/Math.pow(10,Math.floor(n.log10(t)));return 0===t?"0":1===o||2===o||5===o||0===e||e===i.length-1?t.toExponential():""}}}},{34:34}],24:[function(t,e,i){"use strict";var n=t(14),o=t(15),a=t(34);n._set("global",{tooltips:{enabled:!0,custom:null,mode:"nearest",position:"average",intersect:!0,backgroundColor:"rgba(0,0,0,0.8)",titleFontStyle:"bold",titleSpacing:2,titleMarginBottom:6,titleFontColor:"#fff",titleAlign:"left",bodySpacing:2,bodyFontColor:"#fff",bodyAlign:"left",footerFontStyle:"bold",footerSpacing:2,footerMarginTop:6,footerFontColor:"#fff",footerAlign:"left",yPadding:6,xPadding:6,caretPadding:2,caretSize:5,cornerRadius:6,multiKeyBackground:"#fff",displayColors:!0,borderColor:"rgba(0,0,0,0)",borderWidth:0,callbacks:{beforeTitle:a.noop,title:function(t,e){var i="",n=e.labels,o=n?n.length:0;if(t.length>0){var a=t[0];a.xLabel?i=a.xLabel:o>0&&a.indexn.width&&(o=n.width-e.width),o<0&&(o=0)),"top"===d?a+=c:a-="bottom"===d?e.height+c:e.height/2,"center"===d?"left"===u?o+=c:"right"===u&&(o-=c):"left"===u?o-=h:"right"===u&&(o+=h),{x:o,y:a}}(d,p=function(t,e){var i=t._chart.ctx,n=2*e.yPadding,o=0,r=e.body,s=r.reduce(function(t,e){return t+e.before.length+e.lines.length+e.after.length},0);s+=e.beforeBody.length+e.afterBody.length;var l=e.title.length,u=e.footer.length,d=e.titleFontSize,c=e.bodyFontSize,h=e.footerFontSize;n+=l*d,n+=l?(l-1)*e.titleSpacing:0,n+=l?e.titleMarginBottom:0,n+=s*c,n+=s?(s-1)*e.bodySpacing:0,n+=u?e.footerMarginTop:0,n+=u*h,n+=u?(u-1)*e.footerSpacing:0;var f=0,g=function(t){o=Math.max(o,i.measureText(t).width+f)};return i.font=a.fontString(d,e._titleFontStyle,e._titleFontFamily),a.each(e.title,g),i.font=a.fontString(c,e._bodyFontStyle,e._bodyFontFamily),a.each(e.beforeBody.concat(e.afterBody),g),f=e.displayColors?c+2:0,a.each(r,function(t){a.each(t.before,g),a.each(t.lines,g),a.each(t.after,g)}),f=0,i.font=a.fontString(h,e._footerFontStyle,e._footerFontFamily),a.each(e.footer,g),o+=2*e.xPadding,{width:o,height:n}}(this,d),f=function(t,e){var i=t._model,n=t._chart,o=t._chart.chartArea,a="center",r="center";i.yn.height-e.height&&(r="bottom");var s,l,u,d,c,h=(o.left+o.right)/2,f=(o.top+o.bottom)/2;"center"===r?(s=function(t){return t<=h},l=function(t){return t>h}):(s=function(t){return t<=e.width/2},l=function(t){return t>=n.width-e.width/2}),u=function(t){return t+e.width+i.caretSize+i.caretPadding>n.width},d=function(t){return t-e.width-i.caretSize-i.caretPadding<0},c=function(t){return t<=f?"top":"bottom"},s(i.x)?(a="left",u(i.x)&&(a="center",r=c(i.y))):l(i.x)&&(a="right",d(i.x)&&(a="center",r=c(i.y)));var g=t._options;return{xAlign:g.xAlign?g.xAlign:a,yAlign:g.yAlign?g.yAlign:r}}(this,p),o._chart)}else d.opacity=0;return d.xAlign=f.xAlign,d.yAlign=f.yAlign,d.x=g.x,d.y=g.y,d.width=p.width,d.height=p.height,d.caretX=v.x,d.caretY=v.y,o._model=d,e&&l.custom&&l.custom.call(o,d),o},drawCaret:function(t,e){var i=this._chart.ctx,n=this._view,o=this.getCaretPosition(t,e,n);i.lineTo(o.x1,o.y1),i.lineTo(o.x2,o.y2),i.lineTo(o.x3,o.y3)},getCaretPosition:function(t,e,i){var n,o,a,r,s,l,u=i.caretSize,d=i.cornerRadius,c=i.xAlign,h=i.yAlign,f=t.x,g=t.y,p=e.width,v=e.height;if("center"===h)s=g+v/2,"left"===c?(o=(n=f)-u,a=n,r=s+u,l=s-u):(o=(n=f+p)+u,a=n,r=s-u,l=s+u);else if("left"===c?(n=(o=f+d+u)-u,a=o+u):"right"===c?(n=(o=f+p-d-u)-u,a=o+u):(n=(o=i.caretX)-u,a=o+u),"top"===h)s=(r=g)-u,l=r;else{s=(r=g+v)+u,l=r;var m=a;a=n,n=m}return{x1:n,x2:o,x3:a,y1:r,y2:s,y3:l}},drawTitle:function(t,i,n,o){var r=i.title;if(r.length){n.textAlign=i._titleAlign,n.textBaseline="top";var s=i.titleFontSize,l=i.titleSpacing;n.fillStyle=e(i.titleFontColor,o),n.font=a.fontString(s,i._titleFontStyle,i._titleFontFamily);var u,d;for(u=0,d=r.length;u0&&n.stroke()},draw:function(){var t=this._chart.ctx,e=this._view;if(0!==e.opacity){var i={width:e.width,height:e.height},n={x:e.x,y:e.y},o=Math.abs(e.opacity<.001)?0:e.opacity,a=e.title.length||e.beforeBody.length||e.body.length||e.afterBody.length||e.footer.length;this._options.enabled&&a&&(this.drawBackground(n,e,t,i,o),n.x+=e.xPadding,n.y+=e.yPadding,this.drawTitle(n,e,t,o),this.drawBody(n,e,t,o),this.drawFooter(n,e,t,o))}},handleEvent:function(t){var e=this,i=e._options,n=!1;if(e._lastActive=e._lastActive||[],"mouseout"===t.type?e._active=[]:e._active=e._chart.getElementsAtEventForMode(t,i.mode,i),!(n=!a.arrayEquals(e._active,e._lastActive)))return!1;if(e._lastActive=e._active,i.enabled||i.custom){e._eventPosition={x:t.x,y:t.y};var o=e._model;e.update(!0),e.pivot(),n|=o.x!==e._model.x||o.y!==e._model.y}return n}}),t.Tooltip.positioners={average:function(t){if(!t.length)return!1;var e,i,n=0,o=0,a=0;for(e=0,i=t.length;el;)o-=2*Math.PI;for(;o=s&&o<=l,d=r>=i.innerRadius&&r<=i.outerRadius;return u&&d}return!1},getCenterPoint:function(){var t=this._view,e=(t.startAngle+t.endAngle)/2,i=(t.innerRadius+t.outerRadius)/2;return{x:t.x+Math.cos(e)*i,y:t.y+Math.sin(e)*i}},getArea:function(){var t=this._view;return Math.PI*((t.endAngle-t.startAngle)/(2*Math.PI))*(Math.pow(t.outerRadius,2)-Math.pow(t.innerRadius,2))},tooltipPosition:function(){var t=this._view,e=t.startAngle+(t.endAngle-t.startAngle)/2,i=(t.outerRadius-t.innerRadius)/2+t.innerRadius;return{x:t.x+Math.cos(e)*i,y:t.y+Math.sin(e)*i}},draw:function(){var t=this._chart.ctx,e=this._view,i=e.startAngle,n=e.endAngle;t.beginPath(),t.arc(e.x,e.y,e.outerRadius,i,n),t.arc(e.x,e.y,e.innerRadius,n,i,!0),t.closePath(),t.strokeStyle=e.borderColor,t.lineWidth=e.borderWidth,t.fillStyle=e.backgroundColor,t.fill(),t.lineJoin="bevel",e.borderWidth&&t.stroke()}})},{14:14,15:15,34:34}],26:[function(t,e,i){"use strict";var n=t(14),o=t(15),a=t(34),r=n.global;n._set("global",{elements:{line:{tension:.4,backgroundColor:r.defaultColor,borderWidth:3,borderColor:r.defaultColor,borderCapStyle:"butt",borderDash:[],borderDashOffset:0,borderJoinStyle:"miter",capBezierPoints:!0,fill:!0}}}),e.exports=o.extend({draw:function(){var t,e,i,n,o=this._view,s=this._chart.ctx,l=o.spanGaps,u=this._children.slice(),d=r.elements.line,c=-1;for(this._loop&&u.length&&u.push(u[0]),s.save(),s.lineCap=o.borderCapStyle||d.borderCapStyle,s.setLineDash&&s.setLineDash(o.borderDash||d.borderDash),s.lineDashOffset=o.borderDashOffset||d.borderDashOffset,s.lineJoin=o.borderJoinStyle||d.borderJoinStyle,s.lineWidth=o.borderWidth||d.borderWidth,s.strokeStyle=o.borderColor||r.defaultColor,s.beginPath(),c=-1,t=0;te?1:-1,r=1,s=u.borderSkipped||"left"):(e=u.x-u.width/2,i=u.x+u.width/2,n=u.y,a=1,r=(o=u.base)>n?1:-1,s=u.borderSkipped||"bottom"),d){var c=Math.min(Math.abs(e-i),Math.abs(n-o)),h=(d=d>c?c:d)/2,f=e+("left"!==s?h*a:0),g=i+("right"!==s?-h*a:0),p=n+("top"!==s?h*r:0),v=o+("bottom"!==s?-h*r:0);f!==g&&(n=p,o=v),p!==v&&(e=f,i=g)}l.beginPath(),l.fillStyle=u.backgroundColor,l.strokeStyle=u.borderColor,l.lineWidth=d;var m=[[e,o],[e,n],[i,n],[i,o]],b=["bottom","left","top","right"].indexOf(s,0);-1===b&&(b=0);var x=t(0);l.moveTo(x[0],x[1]);for(var y=1;y<4;y++)x=t(y),l.lineTo(x[0],x[1]);l.fill(),d&&l.stroke()},height:function(){var t=this._view;return t.base-t.y},inRange:function(t,e){var i=!1;if(this._view){var n=o(this);i=t>=n.left&&t<=n.right&&e>=n.top&&e<=n.bottom}return i},inLabelRange:function(t,e){if(!this._view)return!1;var i=o(this);return n(this)?t>=i.left&&t<=i.right:e>=i.top&&e<=i.bottom},inXRange:function(t){var e=o(this);return t>=e.left&&t<=e.right},inYRange:function(t){var e=o(this);return t>=e.top&&t<=e.bottom},getCenterPoint:function(){var t,e,i=this._view;return n(this)?(t=i.x,e=(i.y+i.base)/2):(t=(i.x+i.base)/2,e=i.y),{x:t,y:e}},getArea:function(){var t=this._view;return t.width*Math.abs(t.y-t.base)},tooltipPosition:function(){var t=this._view;return{x:t.x,y:t.y}}})},{14:14,15:15}],29:[function(t,e,i){"use strict";e.exports={},e.exports.Arc=t(25),e.exports.Line=t(26),e.exports.Point=t(27),e.exports.Rectangle=t(28)},{25:25,26:26,27:27,28:28}],30:[function(t,e,i){"use strict";var n=t(31),i=e.exports={clear:function(t){t.ctx.clearRect(0,0,t.width,t.height)},roundedRect:function(t,e,i,n,o,a){if(a){var r=Math.min(a,n/2),s=Math.min(a,o/2);t.moveTo(e+r,i),t.lineTo(e+n-r,i),t.quadraticCurveTo(e+n,i,e+n,i+s),t.lineTo(e+n,i+o-s),t.quadraticCurveTo(e+n,i+o,e+n-r,i+o),t.lineTo(e+r,i+o),t.quadraticCurveTo(e,i+o,e,i+o-s),t.lineTo(e,i+s),t.quadraticCurveTo(e,i,e+r,i)}else t.rect(e,i,n,o)},drawPoint:function(t,e,i,n,o){var a,r,s,l,u,d;if(!e||"object"!=typeof e||"[object HTMLImageElement]"!==(a=e.toString())&&"[object HTMLCanvasElement]"!==a){if(!(isNaN(i)||i<=0)){switch(e){default:t.beginPath(),t.arc(n,o,i,0,2*Math.PI),t.closePath(),t.fill();break;case"triangle":t.beginPath(),u=(r=3*i/Math.sqrt(3))*Math.sqrt(3)/2,t.moveTo(n-r/2,o+u/3),t.lineTo(n+r/2,o+u/3),t.lineTo(n,o-2*u/3),t.closePath(),t.fill();break;case"rect":d=1/Math.SQRT2*i,t.beginPath(),t.fillRect(n-d,o-d,2*d,2*d),t.strokeRect(n-d,o-d,2*d,2*d);break;case"rectRounded":var c=i/Math.SQRT2,h=n-c,f=o-c,g=Math.SQRT2*i;t.beginPath(),this.roundedRect(t,h,f,g,g,i/2),t.closePath(),t.fill();break;case"rectRot":d=1/Math.SQRT2*i,t.beginPath(),t.moveTo(n-d,o),t.lineTo(n,o+d),t.lineTo(n+d,o),t.lineTo(n,o-d),t.closePath(),t.fill();break;case"cross":t.beginPath(),t.moveTo(n,o+i),t.lineTo(n,o-i),t.moveTo(n-i,o),t.lineTo(n+i,o),t.closePath();break;case"crossRot":t.beginPath(),s=Math.cos(Math.PI/4)*i,l=Math.sin(Math.PI/4)*i,t.moveTo(n-s,o-l),t.lineTo(n+s,o+l),t.moveTo(n-s,o+l),t.lineTo(n+s,o-l),t.closePath();break;case"star":t.beginPath(),t.moveTo(n,o+i),t.lineTo(n,o-i),t.moveTo(n-i,o),t.lineTo(n+i,o),s=Math.cos(Math.PI/4)*i,l=Math.sin(Math.PI/4)*i,t.moveTo(n-s,o-l),t.lineTo(n+s,o+l),t.moveTo(n-s,o+l),t.lineTo(n+s,o-l),t.closePath();break;case"line":t.beginPath(),t.moveTo(n-i,o),t.lineTo(n+i,o),t.closePath();break;case"dash":t.beginPath(),t.moveTo(n,o),t.lineTo(n+i,o),t.closePath()}t.stroke()}}else t.drawImage(e,n-e.width/2,o-e.height/2,e.width,e.height)},clipArea:function(t,e){t.save(),t.beginPath(),t.rect(e.left,e.top,e.right-e.left,e.bottom-e.top),t.clip()},unclipArea:function(t){t.restore()},lineTo:function(t,e,i,n){if(i.steppedLine)return"after"===i.steppedLine&&!n||"after"!==i.steppedLine&&n?t.lineTo(e.x,i.y):t.lineTo(i.x,e.y),void t.lineTo(i.x,i.y);i.tension?t.bezierCurveTo(n?e.controlPointPreviousX:e.controlPointNextX,n?e.controlPointPreviousY:e.controlPointNextY,n?i.controlPointNextX:i.controlPointPreviousX,n?i.controlPointNextY:i.controlPointPreviousY,i.x,i.y):t.lineTo(i.x,i.y)}};n.clear=i.clear,n.drawRoundedRectangle=function(t){t.beginPath(),i.roundedRect.apply(i,arguments),t.closePath()}},{31:31}],31:[function(t,e,i){"use strict";var n={noop:function(){},uid:function(){var t=0;return function(){return t++}}(),isNullOrUndef:function(t){return null===t||void 0===t},isArray:Array.isArray?Array.isArray:function(t){return"[object Array]"===Object.prototype.toString.call(t)},isObject:function(t){return null!==t&&"[object Object]"===Object.prototype.toString.call(t)},valueOrDefault:function(t,e){return void 0===t?e:t},valueAtIndexOrDefault:function(t,e,i){return n.valueOrDefault(n.isArray(t)?t[e]:t,i)},callback:function(t,e,i){if(t&&"function"==typeof t.call)return t.apply(i,e)},each:function(t,e,i,o){var a,r,s;if(n.isArray(t))if(r=t.length,o)for(a=r-1;a>=0;a--)e.call(i,t[a],a);else for(a=0;a=1?t:-(Math.sqrt(1-t*t)-1)},easeOutCirc:function(t){return Math.sqrt(1-(t-=1)*t)},easeInOutCirc:function(t){return(t/=.5)<1?-.5*(Math.sqrt(1-t*t)-1):.5*(Math.sqrt(1-(t-=2)*t)+1)},easeInElastic:function(t){var e=1.70158,i=0,n=1;return 0===t?0:1===t?1:(i||(i=.3),n<1?(n=1,e=i/4):e=i/(2*Math.PI)*Math.asin(1/n),-n*Math.pow(2,10*(t-=1))*Math.sin((t-e)*(2*Math.PI)/i))},easeOutElastic:function(t){var e=1.70158,i=0,n=1;return 0===t?0:1===t?1:(i||(i=.3),n<1?(n=1,e=i/4):e=i/(2*Math.PI)*Math.asin(1/n),n*Math.pow(2,-10*t)*Math.sin((t-e)*(2*Math.PI)/i)+1)},easeInOutElastic:function(t){var e=1.70158,i=0,n=1;return 0===t?0:2==(t/=.5)?1:(i||(i=.45),n<1?(n=1,e=i/4):e=i/(2*Math.PI)*Math.asin(1/n),t<1?n*Math.pow(2,10*(t-=1))*Math.sin((t-e)*(2*Math.PI)/i)*-.5:n*Math.pow(2,-10*(t-=1))*Math.sin((t-e)*(2*Math.PI)/i)*.5+1)},easeInBack:function(t){var e=1.70158;return t*t*((e+1)*t-e)},easeOutBack:function(t){var e=1.70158;return(t-=1)*t*((e+1)*t+e)+1},easeInOutBack:function(t){var e=1.70158;return(t/=.5)<1?t*t*((1+(e*=1.525))*t-e)*.5:.5*((t-=2)*t*((1+(e*=1.525))*t+e)+2)},easeInBounce:function(t){return 1-o.easeOutBounce(1-t)},easeOutBounce:function(t){return t<1/2.75?7.5625*t*t:t<2/2.75?7.5625*(t-=1.5/2.75)*t+.75:t<2.5/2.75?7.5625*(t-=2.25/2.75)*t+.9375:7.5625*(t-=2.625/2.75)*t+.984375},easeInOutBounce:function(t){return t<.5?.5*o.easeInBounce(2*t):.5*o.easeOutBounce(2*t-1)+.5}};e.exports={effects:o},n.easingEffects=o},{31:31}],33:[function(t,e,i){"use strict";var n=t(31);e.exports={toLineHeight:function(t,e){var i=(""+t).match(/^(normal|(\d+(?:\.\d+)?)(px|em|%)?)$/);if(!i||"normal"===i[1])return 1.2*e;switch(t=+i[2],i[3]){case"px":return t;case"%":t/=100}return e*t},toPadding:function(t){var e,i,o,a;return n.isObject(t)?(e=+t.top||0,i=+t.right||0,o=+t.bottom||0,a=+t.left||0):e=i=o=a=+t||0,{top:e,right:i,bottom:o,left:a,height:e+o,width:a+i}},resolve:function(t,e,i){var o,a,r;for(o=0,a=t.length;o
';var a=e.childNodes[0],r=e.childNodes[1];e._reset=function(){a.scrollLeft=1e6,a.scrollTop=1e6,r.scrollLeft=1e6,r.scrollTop=1e6};var s=function(){e._reset(),t()};return o(a,"scroll",s.bind(a,"expand")),o(r,"scroll",s.bind(r,"shrink")),e}(function(t,e){var i=!1,n=[];return function(){n=Array.prototype.slice.call(arguments),e=e||this,i||(i=!0,u.requestAnimFrame.call(window,function(){i=!1,t.apply(e,n)}))}}(function(){if(n.resizer)return e(r("resize",i))}));!function(t,e){var i=t[d]||(t[d]={}),n=i.renderProxy=function(t){t.animationName===f&&e()};u.each(g,function(e){o(t,e,n)}),i.reflow=!!t.offsetParent,t.classList.add(h)}(t,function(){if(n.resizer){var e=t.parentNode;e&&e!==a.parentNode&&e.insertBefore(a,e.firstChild),a._reset()}})}function l(t){var e=t[d]||{},i=e.resizer;delete e.resizer,function(t){var e=t[d]||{},i=e.renderProxy;i&&(u.each(g,function(e){a(t,e,i)}),delete e.renderProxy),t.classList.remove(h)}(t),i&&i.parentNode&&i.parentNode.removeChild(i)}var u=t(34),d="$chartjs",c="chartjs-",h=c+"render-monitor",f=c+"render-animation",g=["animationstart","webkitAnimationStart"],p={touchstart:"mousedown",touchmove:"mousemove",touchend:"mouseup",pointerenter:"mouseenter",pointerdown:"mousedown",pointermove:"mousemove",pointerup:"mouseup",pointerleave:"mouseout",pointerout:"mouseout"},v=!!function(){var t=!1;try{var e=Object.defineProperty({},"passive",{get:function(){t=!0}});window.addEventListener("e",null,e)}catch(t){}return t}()&&{passive:!0};e.exports={_enabled:"undefined"!=typeof window&&"undefined"!=typeof document,initialize:function(){var t="from{opacity:0.99}to{opacity:1}";!function(t,e){var i=t._style||document.createElement("style");t._style||(t._style=i,e="/* Chart.js */\n"+e,i.setAttribute("type","text/css"),document.getElementsByTagName("head")[0].appendChild(i)),i.appendChild(document.createTextNode(e))}(this,"@-webkit-keyframes "+f+"{"+t+"}@keyframes "+f+"{"+t+"}."+h+"{-webkit-animation:"+f+" 0.001s;animation:"+f+" 0.001s;}")},acquireContext:function(t,e){"string"==typeof t?t=document.getElementById(t):t.length&&(t=t[0]),t&&t.canvas&&(t=t.canvas);var i=t&&t.getContext&&t.getContext("2d");return i&&i.canvas===t?(function(t,e){var i=t.style,o=t.getAttribute("height"),a=t.getAttribute("width");if(t[d]={initial:{height:o,width:a,style:{display:i.display,height:i.height,width:i.width}}},i.display=i.display||"block",null===a||""===a){var r=n(t,"width");void 0!==r&&(t.width=r)}if(null===o||""===o)if(""===t.style.height)t.height=t.width/(e.options.aspectRatio||2);else{var s=n(t,"height");void 0!==r&&(t.height=s)}}(t,e),i):null},releaseContext:function(t){var e=t.canvas;if(e[d]){var i=e[d].initial;["height","width"].forEach(function(t){var n=i[t];u.isNullOrUndef(n)?e.removeAttribute(t):e.setAttribute(t,n)}),u.each(i.style||{},function(t,i){e.style[i]=t}),e.width=e.width,delete e[d]}},addEventListener:function(t,e,i){var n=t.canvas;if("resize"!==e){var a=i[d]||(i[d]={});o(n,e,(a.proxies||(a.proxies={}))[t.id+"_"+e]=function(e){i(function(t,e){var i=p[t.type]||t.type,n=u.getRelativePosition(t,e);return r(i,e,n.x,n.y,t)}(e,t))})}else s(n,i,t)},removeEventListener:function(t,e,i){var n=t.canvas;if("resize"!==e){var o=((i[d]||{}).proxies||{})[t.id+"_"+e];o&&a(n,e,o)}else l(n)}},u.addEvent=o,u.removeEvent=a},{34:34}],37:[function(t,e,i){"use strict";var n=t(34),o=t(35),a=t(36),r=a._enabled?a:o;e.exports=n.extend({initialize:function(){},acquireContext:function(){},releaseContext:function(){},addEventListener:function(){},removeEventListener:function(){}},r)},{34:34,35:35,36:36}],38:[function(t,e,i){"use strict";var n=t(14),o=t(29),a=t(34);n._set("global",{plugins:{filler:{propagate:!0}}}),e.exports=function(){function t(t,e,i){var n,o=t._model||{},a=o.fill;if(void 0===a&&(a=!!o.backgroundColor),!1===a||null===a)return!1;if(!0===a)return"origin";if(n=parseFloat(a,10),isFinite(n)&&Math.floor(n)===n)return"-"!==a[0]&&"+"!==a[0]||(n=e+n),!(n===e||n<0||n>=i)&&n;switch(a){case"bottom":return"start";case"top":return"end";case"zero":return"origin";case"origin":case"start":case"end":return a;default:return!1}}function e(t){var e,i=t.el._model||{},n=t.el._scale||{},o=t.fill,a=null;if(isFinite(o))return null;if("start"===o?a=void 0===i.scaleBottom?n.bottom:i.scaleBottom:"end"===o?a=void 0===i.scaleTop?n.top:i.scaleTop:void 0!==i.scaleZero?a=i.scaleZero:n.getBasePosition?a=n.getBasePosition():n.getBasePixel&&(a=n.getBasePixel()),void 0!==a&&null!==a){if(void 0!==a.x&&void 0!==a.y)return a;if("number"==typeof a&&isFinite(a))return e=n.isHorizontal(),{x:e?a:null,y:e?null:a}}return null}function i(t,e,i){var n,o=t[e].fill,a=[e];if(!i)return o;for(;!1!==o&&-1===a.indexOf(o);){if(!isFinite(o))return o;if(!(n=t[o]))return!1;if(n.visible)return o;a.push(o),o=n.fill}return!1}function r(t){var e=t.fill,i="dataset";return!1===e?null:(isFinite(e)||(i="boundary"),u[i](t))}function s(t){return t&&!t.skip}function l(t,e,i,n,o){var r;if(n&&o){for(t.moveTo(e[0].x,e[0].y),r=1;r0;--r)a.canvas.lineTo(t,i[r],i[r-1],!0)}}var u={dataset:function(t){var e=t.fill,i=t.chart,n=i.getDatasetMeta(e),o=n&&i.isDatasetVisible(e)&&n.dataset._children||[],a=o.length||0;return a?function(t,e){return e');for(var i=0;i '),t.data.datasets[i].label&&e.push(t.data.datasets[i].label),e.push("");return e.push(""),e.join("")}}),e.exports=function(t){function e(t,e){return t.usePointStyle?e*Math.SQRT2:t.boxWidth}function i(e,i){var n=new t.Legend({ctx:e.ctx,options:i,chart:e});r.configure(e,n,i),r.addBox(e,n),e.legend=n}var r=t.layoutService,s=a.noop;return t.Legend=o.extend({initialize:function(t){a.extend(this,t),this.legendHitBoxes=[],this.doughnutMode=!1},beforeUpdate:s,update:function(t,e,i){var n=this;return n.beforeUpdate(),n.maxWidth=t,n.maxHeight=e,n.margins=i,n.beforeSetDimensions(),n.setDimensions(),n.afterSetDimensions(),n.beforeBuildLabels(),n.buildLabels(),n.afterBuildLabels(),n.beforeFit(),n.fit(),n.afterFit(),n.afterUpdate(),n.minSize},afterUpdate:s,beforeSetDimensions:s,setDimensions:function(){var t=this;t.isHorizontal()?(t.width=t.maxWidth,t.left=0,t.right=t.width):(t.height=t.maxHeight,t.top=0,t.bottom=t.height),t.paddingLeft=0,t.paddingTop=0,t.paddingRight=0,t.paddingBottom=0,t.minSize={width:0,height:0}},afterSetDimensions:s,beforeBuildLabels:s,buildLabels:function(){var t=this,e=t.options.labels||{},i=a.callback(e.generateLabels,[t.chart],t)||[];e.filter&&(i=i.filter(function(i){return e.filter(i,t.chart.data)})),t.options.reverse&&i.reverse(),t.legendItems=i},afterBuildLabels:s,beforeFit:s,fit:function(){var t=this,i=t.options,o=i.labels,r=i.display,s=t.ctx,l=n.global,u=a.valueOrDefault,d=u(o.fontSize,l.defaultFontSize),c=u(o.fontStyle,l.defaultFontStyle),h=u(o.fontFamily,l.defaultFontFamily),f=a.fontString(d,c,h),g=t.legendHitBoxes=[],p=t.minSize,v=t.isHorizontal();if(v?(p.width=t.maxWidth,p.height=r?10:0):(p.width=r?10:0,p.height=t.maxHeight),r)if(s.font=f,v){var m=t.lineWidths=[0],b=t.legendItems.length?d+o.padding:0;s.textAlign="left",s.textBaseline="top",a.each(t.legendItems,function(i,n){var a=e(o,d)+d/2+s.measureText(i.text).width;m[m.length-1]+a+o.padding>=t.width&&(b+=d+o.padding,m[m.length]=t.left),g[n]={left:0,top:0,width:a,height:d},m[m.length-1]+=a+o.padding}),p.height+=b}else{var x=o.padding,y=t.columnWidths=[],k=o.padding,w=0,S=0,M=d+x;a.each(t.legendItems,function(t,i){var n=e(o,d)+d/2+s.measureText(t.text).width;S+M>p.height&&(k+=w+o.padding,y.push(w),w=0,S=0),w=Math.max(w,n),S+=M,g[i]={left:0,top:0,width:n,height:d}}),k+=w,y.push(w),p.width+=k}t.width=p.width,t.height=p.height},afterFit:s,isHorizontal:function(){return"top"===this.options.position||"bottom"===this.options.position},draw:function(){var t=this,i=t.options,o=i.labels,r=n.global,s=r.elements.line,l=t.width,u=t.lineWidths;if(i.display){var d,c=t.ctx,h=a.valueOrDefault,f=h(o.fontColor,r.defaultFontColor),g=h(o.fontSize,r.defaultFontSize),p=h(o.fontStyle,r.defaultFontStyle),v=h(o.fontFamily,r.defaultFontFamily),m=a.fontString(g,p,v);c.textAlign="left",c.textBaseline="middle",c.lineWidth=.5,c.strokeStyle=f,c.fillStyle=f,c.font=m;var b=e(o,g),x=t.legendHitBoxes,y=t.isHorizontal();d=y?{x:t.left+(l-u[0])/2,y:t.top+o.padding,line:0}:{x:t.left+o.padding,y:t.top+o.padding,line:0};var k=g+o.padding;a.each(t.legendItems,function(e,n){var f=c.measureText(e.text).width,p=b+g/2+f,v=d.x,m=d.y;y?v+p>=l&&(m=d.y+=k,d.line++,v=d.x=t.left+(l-u[d.line])/2):m+k>t.bottom&&(v=d.x=v+t.columnWidths[d.line]+o.padding,m=d.y=t.top+o.padding,d.line++),function(t,e,n){if(!(isNaN(b)||b<=0)){c.save(),c.fillStyle=h(n.fillStyle,r.defaultColor),c.lineCap=h(n.lineCap,s.borderCapStyle),c.lineDashOffset=h(n.lineDashOffset,s.borderDashOffset),c.lineJoin=h(n.lineJoin,s.borderJoinStyle),c.lineWidth=h(n.lineWidth,s.borderWidth),c.strokeStyle=h(n.strokeStyle,r.defaultColor);var o=0===h(n.lineWidth,s.borderWidth);if(c.setLineDash&&c.setLineDash(h(n.lineDash,s.borderDash)),i.labels&&i.labels.usePointStyle){var l=g*Math.SQRT2/2,u=l/Math.SQRT2,d=t+u,f=e+u;a.canvas.drawPoint(c,n.pointStyle,l,d,f)}else o||c.strokeRect(t,e,b,g),c.fillRect(t,e,b,g);c.restore()}}(v,m,e),x[n].left=v,x[n].top=m,function(t,e,i,n){var o=g/2,a=b+o+t,r=e+o;c.fillText(i.text,a,r),i.hidden&&(c.beginPath(),c.lineWidth=2,c.moveTo(a,r),c.lineTo(a+n,r),c.stroke())}(v,m,e,f),y?d.x+=p+o.padding:d.y+=k})}},handleEvent:function(t){var e=this,i=e.options,n="mouseup"===t.type?"click":t.type,o=!1;if("mousemove"===n){if(!i.onHover)return}else{if("click"!==n)return;if(!i.onClick)return}var a=t.x,r=t.y;if(a>=e.left&&a<=e.right&&r>=e.top&&r<=e.bottom)for(var s=e.legendHitBoxes,l=0;l=u.left&&a<=u.left+u.width&&r>=u.top&&r<=u.top+u.height){if("click"===n){i.onClick.call(e,t.native,e.legendItems[l]),o=!0;break}if("mousemove"===n){i.onHover.call(e,t.native,e.legendItems[l]),o=!0;break}}}return o}}),{id:"legend",beforeInit:function(t){var e=t.options.legend;e&&i(t,e)},beforeUpdate:function(t){var e=t.options.legend,o=t.legend;e?(a.mergeIf(e,n.global.legend),o?(r.configure(t,o,e),o.options=e):i(t,e)):o&&(r.removeBox(t,o),delete t.legend)},afterEvent:function(t,e){var i=t.legend;i&&i.handleEvent(e)}}}},{14:14,15:15,34:34}],40:[function(t,e,i){"use strict";var n=t(14),o=t(15),a=t(34);n._set("global",{title:{display:!1,fontStyle:"bold",fullWidth:!0,lineHeight:1.2,padding:10,position:"top",text:"",weight:2e3}}),e.exports=function(t){function e(e,n){var o=new t.Title({ctx:e.ctx,options:n,chart:e});i.configure(e,o,n),i.addBox(e,o),e.titleBlock=o}var i=t.layoutService,r=a.noop;return t.Title=o.extend({initialize:function(t){a.extend(this,t),this.legendHitBoxes=[]},beforeUpdate:r,update:function(t,e,i){var n=this;return n.beforeUpdate(),n.maxWidth=t,n.maxHeight=e,n.margins=i,n.beforeSetDimensions(),n.setDimensions(),n.afterSetDimensions(),n.beforeBuildLabels(),n.buildLabels(),n.afterBuildLabels(),n.beforeFit(),n.fit(),n.afterFit(),n.afterUpdate(),n.minSize},afterUpdate:r,beforeSetDimensions:r,setDimensions:function(){var t=this;t.isHorizontal()?(t.width=t.maxWidth,t.left=0,t.right=t.width):(t.height=t.maxHeight,t.top=0,t.bottom=t.height),t.paddingLeft=0,t.paddingTop=0,t.paddingRight=0,t.paddingBottom=0,t.minSize={width:0,height:0}},afterSetDimensions:r,beforeBuildLabels:r,buildLabels:r,afterBuildLabels:r,beforeFit:r,fit:function(){var t=this,e=a.valueOrDefault,i=t.options,o=i.display,r=e(i.fontSize,n.global.defaultFontSize),s=t.minSize,l=a.isArray(i.text)?i.text.length:1,u=a.options.toLineHeight(i.lineHeight,r),d=o?l*u+2*i.padding:0;t.isHorizontal()?(s.width=t.maxWidth,s.height=d):(s.width=d,s.height=t.maxHeight),t.width=s.width,t.height=s.height},afterFit:r,isHorizontal:function(){var t=this.options.position;return"top"===t||"bottom"===t},draw:function(){var t=this,e=t.ctx,i=a.valueOrDefault,o=t.options,r=n.global;if(o.display){var s,l,u,d=i(o.fontSize,r.defaultFontSize),c=i(o.fontStyle,r.defaultFontStyle),h=i(o.fontFamily,r.defaultFontFamily),f=a.fontString(d,c,h),g=a.options.toLineHeight(o.lineHeight,d),p=g/2+o.padding,v=0,m=t.top,b=t.left,x=t.bottom,y=t.right;e.fillStyle=i(o.fontColor,r.defaultFontColor),e.font=f,t.isHorizontal()?(l=b+(y-b)/2,u=m+p,s=y-b):(l="left"===o.position?b+p:y-p,u=m+(x-m)/2,s=x-m,v=Math.PI*("left"===o.position?-.5:.5)),e.save(),e.translate(l,u),e.rotate(v),e.textAlign="center",e.textBaseline="middle";var k=o.text;if(a.isArray(k))for(var w=0,S=0;Se.max&&(e.max=n))})});e.min=isFinite(e.min)&&!isNaN(e.min)?e.min:0,e.max=isFinite(e.max)&&!isNaN(e.max)?e.max:1,this.handleTickRangeOptions()},getTickLimit:function(){var t,e=this.options.ticks;if(this.isHorizontal())t=Math.min(e.maxTicksLimit?e.maxTicksLimit:11,Math.ceil(this.width/50));else{var i=o.valueOrDefault(e.fontSize,n.global.defaultFontSize);t=Math.min(e.maxTicksLimit?e.maxTicksLimit:11,Math.ceil(this.height/(2*i)))}return t},handleDirectionalChanges:function(){this.isHorizontal()||this.ticks.reverse()},getLabelForIndex:function(t,e){return+this.getRightValue(this.chart.data.datasets[e].data[t])},getPixelForValue:function(t){var e=this,i=e.start,n=+e.getRightValue(t),o=e.end-i;return e.isHorizontal()?e.left+e.width/o*(n-i):e.bottom-e.height/o*(n-i)},getValueForPixel:function(t){var e=this,i=e.isHorizontal(),n=i?e.width:e.height,o=(i?t-e.left:e.bottom-t)/n;return e.start+(e.end-e.start)*o},getPixelForTick:function(t){return this.getPixelForValue(this.ticksAsNumbers[t])}});t.scaleService.registerScaleType("linear",i,e)}},{14:14,23:23,34:34}],43:[function(t,e,i){"use strict";var n=t(34),o=t(23);e.exports=function(t){var e=n.noop;t.LinearScaleBase=t.Scale.extend({getRightValue:function(e){return"string"==typeof e?+e:t.Scale.prototype.getRightValue.call(this,e)},handleTickRangeOptions:function(){var t=this,e=t.options.ticks;if(e.beginAtZero){var i=n.sign(t.min),o=n.sign(t.max);i<0&&o<0?t.max=0:i>0&&o>0&&(t.min=0)}var a=void 0!==e.min||void 0!==e.suggestedMin,r=void 0!==e.max||void 0!==e.suggestedMax;void 0!==e.min?t.min=e.min:void 0!==e.suggestedMin&&(null===t.min?t.min=e.suggestedMin:t.min=Math.min(t.min,e.suggestedMin)),void 0!==e.max?t.max=e.max:void 0!==e.suggestedMax&&(null===t.max?t.max=e.suggestedMax:t.max=Math.max(t.max,e.suggestedMax)),a!==r&&t.min>=t.max&&(a?t.max=t.min+1:t.min=t.max-1),t.min===t.max&&(t.max++,e.beginAtZero||t.min--)},getTickLimit:e,handleDirectionalChanges:e,buildTicks:function(){var t=this,e=t.options.ticks,i=t.getTickLimit(),a={maxTicks:i=Math.max(2,i),min:e.min,max:e.max,stepSize:n.valueOrDefault(e.fixedStepSize,e.stepSize)},r=t.ticks=o.generators.linear(a,t);t.handleDirectionalChanges(),t.max=n.max(r),t.min=n.min(r),e.reverse?(r.reverse(),t.start=t.max,t.end=t.min):(t.start=t.min,t.end=t.max)},convertTicksToLabels:function(){var e=this;e.ticksAsNumbers=e.ticks.slice(),e.zeroLineIndex=e.ticks.indexOf(0),t.Scale.prototype.convertTicksToLabels.call(e)}})}},{23:23,34:34}],44:[function(t,e,i){"use strict";var n=t(34),o=t(23);e.exports=function(t){var e={position:"left",ticks:{callback:o.formatters.logarithmic}},i=t.Scale.extend({determineDataLimits:function(){function t(t){return l?t.xAxisID===e.id:t.yAxisID===e.id}var e=this,i=e.options,o=i.ticks,a=e.chart,r=a.data.datasets,s=n.valueOrDefault,l=e.isHorizontal();e.min=null,e.max=null,e.minNotZero=null;var u=i.stacked;if(void 0===u&&n.each(r,function(e,i){if(!u){var n=a.getDatasetMeta(i);a.isDatasetVisible(i)&&t(n)&&void 0!==n.stack&&(u=!0)}}),i.stacked||u){var d={};n.each(r,function(o,r){var s=a.getDatasetMeta(r),l=[s.type,void 0===i.stacked&&void 0===s.stack?r:"",s.stack].join(".");a.isDatasetVisible(r)&&t(s)&&(void 0===d[l]&&(d[l]=[]),n.each(o.data,function(t,n){var o=d[l],a=+e.getRightValue(t);isNaN(a)||s.data[n].hidden||(o[n]=o[n]||0,i.relativePoints?o[n]=100:o[n]+=a)}))}),n.each(d,function(t){var i=n.min(t),o=n.max(t);e.min=null===e.min?i:Math.min(e.min,i),e.max=null===e.max?o:Math.max(e.max,o)})}else n.each(r,function(i,o){var r=a.getDatasetMeta(o);a.isDatasetVisible(o)&&t(r)&&n.each(i.data,function(t,i){var n=+e.getRightValue(t);isNaN(n)||r.data[i].hidden||(null===e.min?e.min=n:ne.max&&(e.max=n),0!==n&&(null===e.minNotZero||n 0
+}
+
+function getCurrentChart () {
+ var currentChart
+ if (objHasProps(chart)) {
+ currentChart = chart
+ } else if (objHasProps(overlayChart)) {
+ currentChart = overlayChart
+ } else if (objHasProps(mchart)) {
+ currentChart = mchart
+ } else {
+ currentChart = undefined
+ }
+ return currentChart
+}
+
+var timeoutID = 0
+function updateChart (chart = getCurrentChart()) {
+ updateChartOptions(chart)
+ if (timeoutID > 0) {
+ clearTimeout(timeoutID)
+ }
+ timeoutID = setTimeout("updateQueryString()", 750)
+}
+
+function multiLabel (res) {
+ var l = formatDate(res.StartTime)
+ if (res.Labels !== '') {
+ l += ' - ' + res.Labels
+ }
+ return l
+}
+
+function findData (slot, idx, res, p) {
+ // Not very efficient but there are only a handful of percentiles
+ var pA = res.DurationHistogram.Percentiles
+ if (!pA) {
+// console.log('No percentiles in res', res)
+ return
+ }
+ var pN = Number(p)
+ for (var i = 0; i < pA.length; i++) {
+ if (pA[i].Percentile === pN) {
+ mchart.data.datasets[slot].data[idx] = 1000.0 * pA[i].Value
+ return
+ }
+ }
+ console.log('Not Found', p, pN, pA)
+ // not found, not set
+}
+
+function fortioAddToMultiResult (i, res) {
+ mchart.data.labels[i] = multiLabel(res)
+ mchart.data.datasets[0].data[i] = 1000.0 * res.DurationHistogram.Min
+ findData(1, i, res, '50')
+ mchart.data.datasets[2].data[i] = 1000.0 * res.DurationHistogram.Avg
+ findData(3, i, res, '75')
+ findData(4, i, res, '90')
+ findData(5, i, res, '99')
+ findData(6, i, res, '99.9')
+ mchart.data.datasets[7].data[i] = 1000.0 * res.DurationHistogram.Max
+ mchart.data.datasets[8].data[i] = res.ActualQPS
+}
+
+function endMultiChart (len) {
+ mchart.data.labels = mchart.data.labels.slice(0, len)
+ for (var i = 0; i < mchart.data.datasets.length; i++) {
+ mchart.data.datasets[i].data = mchart.data.datasets[i].data.slice(0, len)
+ }
+ mchart.update()
+}
+
+function deleteOverlayChart () {
+ if (Object.keys(overlayChart).length === 0) {
+ return
+ }
+ overlayChart.destroy()
+ overlayChart = {}
+}
+
+function deleteMultiChart () {
+ if (Object.keys(mchart).length === 0) {
+ return
+ }
+ mchart.destroy()
+ mchart = {}
+}
+
+function deleteSingleChart () {
+ if (Object.keys(chart).length === 0) {
+ return
+ }
+ chart.destroy()
+ chart = {}
+}
+
+function makeMultiChart () {
+ document.getElementById('running').style.display = 'none'
+ document.getElementById('update').style.visibility = 'hidden'
+ var chartEl = document.getElementById('chart1')
+ chartEl.style.visibility = 'visible'
+ if (Object.keys(mchart).length !== 0) {
+ return
+ }
+ deleteSingleChart()
+ deleteOverlayChart()
+ var ctx = chartEl.getContext('2d')
+ mchart = new Chart(ctx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [
+ {
+ label: 'Min',
+ fill: false,
+ stepped: true,
+ borderColor: 'hsla(111, 100%, 40%, .8)',
+ backgroundColor: 'hsla(111, 100%, 40%, .8)'
+ },
+ {
+ label: 'Median',
+ fill: false,
+ stepped: true,
+ borderDash: [5, 5],
+ borderColor: 'hsla(220, 100%, 40%, .8)',
+ backgroundColor: 'hsla(220, 100%, 40%, .8)'
+ },
+ {
+ label: 'Avg',
+ fill: false,
+ stepped: true,
+ backgroundColor: 'hsla(266, 100%, 40%, .8)',
+ borderColor: 'hsla(266, 100%, 40%, .8)'
+ },
+ {
+ label: 'p75',
+ fill: false,
+ stepped: true,
+ backgroundColor: 'hsla(60, 100%, 40%, .8)',
+ borderColor: 'hsla(60, 100%, 40%, .8)'
+ },
+ {
+ label: 'p90',
+ fill: false,
+ stepped: true,
+ backgroundColor: 'hsla(45, 100%, 40%, .8)',
+ borderColor: 'hsla(45, 100%, 40%, .8)'
+ },
+ {
+ label: 'p99',
+ fill: false,
+ stepped: true,
+ backgroundColor: 'hsla(30, 100%, 40%, .8)',
+ borderColor: 'hsla(30, 100%, 40%, .8)'
+ },
+ {
+ label: 'p99.9',
+ fill: false,
+ stepped: true,
+ backgroundColor: 'hsla(15, 100%, 40%, .8)',
+ borderColor: 'hsla(15, 100%, 40%, .8)'
+ },
+ {
+ label: 'Max',
+ fill: false,
+ stepped: true,
+ borderColor: 'hsla(0, 100%, 40%, .8)',
+ backgroundColor: 'hsla(0, 100%, 40%, .8)'
+ },
+ {
+ label: 'QPS',
+ yAxisID: 'qps',
+ fill: false,
+ stepped: true,
+ borderColor: 'rgba(0, 0, 0, .8)',
+ backgroundColor: 'rgba(0, 0, 0, .8)'
+ }
+ ]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ title: {
+ display: true,
+ fontStyle: 'normal',
+ text: ['Latency in milliseconds']
+ },
+ elements: {
+ line: {
+ tension: 0 // disables bezier curves
+ }
+ },
+ scales: {
+ yAxes: [{
+ id: 'ms',
+ ticks: {
+ beginAtZero: true
+ },
+ scaleLabel: {
+ display: true,
+ labelString: 'ms'
+ }
+ }, {
+ id: 'qps',
+ position: 'right',
+ ticks: {
+ beginAtZero: true
+ },
+ scaleLabel: {
+ display: true,
+ labelString: 'QPS'
+ }
+ }]
+ }
+ }
+ })
+ // Hide QPS axis on clicking QPS dataset.
+ mchart.options.legend.onClick = (event, legendItem) => {
+ // Toggle dataset hidden (default behavior).
+ var dataset = mchart.data.datasets[legendItem.datasetIndex]
+ dataset.hidden = !dataset.hidden
+ if (dataset.label === 'QPS') {
+ // Toggle QPS y-axis.
+ var qpsYAxis = mchart.options.scales.yAxes[1]
+ qpsYAxis.display = !qpsYAxis.display
+ }
+ mchart.update()
+ }
+}
+
+function runTestForDuration (durationInSeconds) {
+ var progressBar = document.getElementById('progressBar')
+ if (durationInSeconds <= 0) {
+ // infinite case
+ progressBar.removeAttribute('value')
+ return
+ }
+ var startTimeMillis = Date.now()
+ var updatePercentage = function () {
+ var barPercentage = Math.min(100, (Date.now() - startTimeMillis) / (10 * durationInSeconds))
+ progressBar.value = barPercentage
+ if (barPercentage < 100) {
+ setTimeout(updatePercentage, 50 /* milliseconds */) // 20fps
+ }
+ }
+ updatePercentage()
+}
+
+var lastDuration = ''
+
+function toggleDuration (el) {
+ var d = document.getElementById('duration')
+ if (el.checked) {
+ lastDuration = d.value
+ d.value = ''
+ } else {
+ d.value = lastDuration
+ }
+}
+
+let customHeaderElement = ' ';
+
+function addCustomHeader() {
+ let customHeaderElements = document.getElementsByName("H");
+ let lastElement = customHeaderElements[customHeaderElements.length - 1];
+ lastElement.nextElementSibling.insertAdjacentHTML('afterend', customHeaderElement)
+}
+
diff --git a/vendor/fortio.org/fortio/ui/templates/browse.html b/vendor/fortio.org/fortio/ui/templates/browse.html
new file mode 100644
index 0000000000..c610a0df6b
--- /dev/null
+++ b/vendor/fortio.org/fortio/ui/templates/browse.html
@@ -0,0 +1,155 @@
+Φορτίο v{{.Version}}
+
+
+
+
+
+
+
+
+Φορτίο (fortio) v{{.Version}}{{.Extra}}
+
+
+{{if .DoRender}}
+
Loading {{.URL}}...
+
+
+
+
+
+{{else}}
+
+List of saved results:
+
+Filter:
+
+
+{{range .PreselectedDataList}}
+ {{.Value}}
+{{end}}
+
+
+Graph link: ...
+
+
+{{end}}
+
+
+
+
+
+Select or multi select to graph...
+
+
+
+
+{{if .DoSearch}}
+
+{{end}}
+{{if .DoLoadSelected}}
+
+{{end}}
+Go to Top .
+
+
diff --git a/vendor/fortio.org/fortio/ui/templates/main.html b/vendor/fortio.org/fortio/ui/templates/main.html
new file mode 100644
index 0000000000..ec14833eb3
--- /dev/null
+++ b/vendor/fortio.org/fortio/ui/templates/main.html
@@ -0,0 +1,96 @@
+Φορτίο v{{.Version}}
+
+
+
+
+
+
+
+
+Φορτίο (fortio) v{{.Version}}{{if not .DoLoad}} control UI{{end}}
+Up for {{.UpTime}} (since {{.StartTime}}).
+{{if .DoLoad}}
+
{{.Labels}} {{.TargetURL}}
+
+Running load test... Results pending...
+
+
+
+
Interrupt
+
+
+
+
+
+
+
+
+{{else}}
+{{if .DoStop}}
+Stoping runs as per request.
+{{else}}
+
+Or
+
+Or
+
+Or
+debug and debug with env dump and Internal PPROF
+Or
+
+Or
+
+{{end}}
+
+
+{{end}}
diff --git a/vendor/fortio.org/fortio/ui/templates/sync.html b/vendor/fortio.org/fortio/ui/templates/sync.html
new file mode 100644
index 0000000000..6565002656
--- /dev/null
+++ b/vendor/fortio.org/fortio/ui/templates/sync.html
@@ -0,0 +1,28 @@
+Φορτίο v{{.Version}}
+
+
+
+
+
+
+
+
+Φορτίο (fortio) v{{.Version}} syncing
+Syncing {{.URL}}
+
+
+
diff --git a/vendor/fortio.org/fortio/ui/uihandler.go b/vendor/fortio.org/fortio/ui/uihandler.go
new file mode 100644
index 0000000000..2e622111af
--- /dev/null
+++ b/vendor/fortio.org/fortio/ui/uihandler.go
@@ -0,0 +1,977 @@
+// Copyright 2017 Istio Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package ui // import "fortio.org/fortio/ui"
+
+import (
+ "bytes"
+ // md5 is mandated, not our choice
+ "crypto/md5" // nolint: gas
+ "encoding/base64"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "html"
+ "html/template"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "fortio.org/fortio/fgrpc"
+ "fortio.org/fortio/fhttp"
+ "fortio.org/fortio/fnet"
+ "fortio.org/fortio/log"
+ "fortio.org/fortio/periodic"
+ "fortio.org/fortio/stats"
+ "fortio.org/fortio/version"
+)
+
+// TODO: move some of those in their own files/package (e.g data transfer TSV)
+// and add unit tests.
+
+var (
+ // UI and Debug prefix/paths (read in ui handler).
+ uiPath string // absolute (base)
+ logoPath string // relative
+ chartJSPath string // relative
+ debugPath string // mostly relative
+ fetchPath string // this one is absolute
+ // Used to construct default URL to self.
+ urlHostPort string
+ // Start time of the UI Server (for uptime info).
+ startTime time.Time
+ // Directory where the static content and templates are to be loaded from.
+ // This is replaced at link time to the packaged directory (e.g /usr/share/fortio/)
+ // but when fortio is installed with go get we use RunTime to find that directory.
+ // (see Dockerfile for how to set it)
+ resourcesDir string
+ extraBrowseLabel string // Extra label for report only
+ // Directory where results are written to/read from
+ dataDir string
+ mainTemplate *template.Template
+ browseTemplate *template.Template
+ syncTemplate *template.Template
+ uiRunMapMutex = &sync.Mutex{}
+ id int64
+ runs = make(map[int64]*periodic.RunnerOptions)
+ // Base URL used for index - useful when running under an ingress with prefix
+ baseURL string
+
+ defaultPercentileList []float64
+)
+
+const (
+ fetchURI = "fetch/"
+ faviconPath = "/favicon.ico"
+ modegrpc = "grpc"
+)
+
+// Gets the resources directory from one of 3 sources:
+func getResourcesDir(override string) string {
+ if override != "" {
+ log.Infof("Using resources directory from override: %s", override)
+ return override
+ }
+ if resourcesDir != "" {
+ log.LogVf("Using resources directory set at link time: %s", resourcesDir)
+ return resourcesDir
+ }
+ _, filename, _, ok := runtime.Caller(0)
+ log.LogVf("Guessing resources directory from runtime source location: %v - %s", ok, filename)
+ if ok {
+ return path.Dir(filename)
+ }
+ log.Errf("Unable to get source tree location. Failing to serve static contents.")
+ return ""
+}
+
+// TODO: auto map from (Http)RunnerOptions to form generation and/or accept
+// JSON serialized options as input.
+
+// TODO: unit tests, allow additional data sets.
+
+type mode int
+
+// The main html has 3 principal modes:
+const (
+ // Default: renders the forms/menus
+ menu mode = iota
+ // Trigger a run
+ run
+ // Request abort
+ stop
+)
+
+// Handler is the main UI handler creating the web forms and processing them.
+func Handler(w http.ResponseWriter, r *http.Request) {
+ fhttp.LogRequest(r, "UI")
+ mode := menu
+ JSONOnly := false
+ DoSave := (r.FormValue("save") == "on")
+ url := r.FormValue("url")
+ runid := int64(0)
+ runner := r.FormValue("runner")
+ if r.FormValue("load") == "Start" {
+ mode = run
+ if r.FormValue("json") == "on" {
+ JSONOnly = true
+ log.Infof("Starting JSON only %s load request from %v for %s", runner, r.RemoteAddr, url)
+ } else {
+ log.Infof("Starting %s load request from %v for %s", runner, r.RemoteAddr, url)
+ }
+ } else {
+ if r.FormValue("stop") == "Stop" {
+ runid, _ = strconv.ParseInt(r.FormValue("runid"), 10, 64) // nolint: gas
+ log.Critf("Stop request from %v for %d", r.RemoteAddr, runid)
+ mode = stop
+ }
+ }
+ // Those only exist/make sense on run mode but go variable declaration...
+ labels := r.FormValue("labels")
+ resolution, _ := strconv.ParseFloat(r.FormValue("r"), 64) // nolint: gas
+ percList, _ := stats.ParsePercentiles(r.FormValue("p")) // nolint: gas
+ qps, _ := strconv.ParseFloat(r.FormValue("qps"), 64) // nolint: gas
+ durStr := r.FormValue("t")
+ grpcSecure := (r.FormValue("grpc-secure") == "on")
+ grpcPing := (r.FormValue("ping") == "on")
+ grpcPingDelay, _ := time.ParseDuration(r.FormValue("grpc-ping-delay"))
+
+ stdClient := (r.FormValue("stdclient") == "on")
+ var dur time.Duration
+ if durStr == "on" || ((len(r.Form["t"]) > 1) && r.Form["t"][1] == "on") {
+ dur = -1
+ } else {
+ var err error
+ dur, err = time.ParseDuration(durStr)
+ if mode == run && err != nil {
+ log.Errf("Error parsing duration '%s': %v", durStr, err)
+ }
+ }
+ c, _ := strconv.Atoi(r.FormValue("c")) // nolint: gas
+ flusher, ok := w.(http.Flusher)
+ if !ok {
+ log.Fatalf("expected http.ResponseWriter to be an http.Flusher")
+ }
+ out := io.Writer(os.Stderr)
+ if len(percList) == 0 && !strings.Contains(r.URL.RawQuery, "p=") {
+ percList = defaultPercentileList
+ }
+ if !JSONOnly {
+ out = fhttp.NewHTMLEscapeWriter(w)
+ }
+ n, _ := strconv.ParseInt(r.FormValue("n"), 10, 64) // nolint: gas
+ if strings.TrimSpace(url) == "" {
+ url = "http://url.needed" // just because url validation doesn't like empty urls
+ }
+ ro := periodic.RunnerOptions{
+ QPS: qps,
+ Duration: dur,
+ Out: out,
+ NumThreads: c,
+ Resolution: resolution,
+ Percentiles: percList,
+ Labels: labels,
+ Exactly: n,
+ }
+ if mode == run {
+ ro.Normalize()
+ uiRunMapMutex.Lock()
+ id++ // start at 1 as 0 means interrupt all
+ runid = id
+ runs[runid] = &ro
+ uiRunMapMutex.Unlock()
+ log.Infof("New run id %d", runid)
+ }
+ httpopts := fhttp.NewHTTPOptions(url)
+ httpopts.DisableFastClient = stdClient
+ if !JSONOnly {
+ // Normal html mode
+ if mainTemplate == nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ log.Critf("Nil template")
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=UTF-8")
+ durSeconds := dur.Seconds()
+ if n > 0 {
+ if qps > 0 {
+ durSeconds = float64(n) / qps
+ } else {
+ durSeconds = -1
+ }
+ log.Infof("Estimating fixed #call %d duration to %g seconds %g", n, durSeconds, qps)
+ }
+ err := mainTemplate.Execute(w, &struct {
+ R *http.Request
+ Headers http.Header
+ Version string
+ LogoPath string
+ DebugPath string
+ ChartJSPath string
+ StartTime string
+ TargetURL string
+ Labels string
+ RunID int64
+ UpTime time.Duration
+ TestExpectedDurationSeconds float64
+ URLHostPort string
+ DoStop bool
+ DoLoad bool
+ }{r, httpopts.AllHeaders(), version.Short(), logoPath, debugPath, chartJSPath,
+ startTime.Format(time.ANSIC), url, labels, runid,
+ fhttp.RoundDuration(time.Since(startTime)), durSeconds, urlHostPort, mode == stop, mode == run})
+ if err != nil {
+ log.Critf("Template execution failed: %v", err)
+ }
+ }
+ switch mode {
+ case menu:
+ // nothing more to do
+ case stop:
+ if runid <= 0 { // Stop all
+ i := 0
+ uiRunMapMutex.Lock()
+ for _, v := range runs {
+ v.Abort()
+ i++
+ }
+ uiRunMapMutex.Unlock()
+ log.Infof("Interrupted all %d runs", i)
+ } else { // Stop one
+ uiRunMapMutex.Lock()
+ v, found := runs[runid]
+ if found {
+ v.Abort()
+ }
+ uiRunMapMutex.Unlock()
+ }
+ case run:
+ // mode == run case:
+ firstHeader := true
+ for _, header := range r.Form["H"] {
+ if len(header) == 0 {
+ continue
+ }
+ log.LogVf("adding header %v", header)
+ if firstHeader {
+ // If there is at least 1 non empty H passed, reset the header list
+ httpopts.ResetHeaders()
+ firstHeader = false
+ }
+ err := httpopts.AddAndValidateExtraHeader(header)
+ if err != nil {
+ log.Errf("Error adding custom headers: %v", err)
+ }
+ }
+ fhttp.OnBehalfOf(httpopts, r)
+ if !JSONOnly {
+ flusher.Flush()
+ }
+ var res periodic.HasRunnerResult
+ var err error
+ if runner == modegrpc {
+ o := fgrpc.GRPCRunnerOptions{
+ RunnerOptions: ro,
+ Destination: url,
+ UsePing: grpcPing,
+ Delay: grpcPingDelay,
+ }
+ if grpcSecure {
+ o.Destination = fhttp.AddHTTPS(url)
+ }
+ res, err = fgrpc.RunGRPCTest(&o)
+ } else {
+ o := fhttp.HTTPRunnerOptions{
+ HTTPOptions: *httpopts,
+ RunnerOptions: ro,
+ AllowInitialErrors: true,
+ }
+ res, err = fhttp.RunHTTPTest(&o)
+ }
+ if err != nil {
+ log.Errf("Init error for %s mode with url %s and options %+v : %v", runner, url, ro, err)
+ // nolint: errcheck,gas
+ w.Write([]byte(fmt.Sprintf(
+ "Aborting because %s\n\n",
+ html.EscapeString(err.Error()))))
+ return
+ }
+ json, err := json.MarshalIndent(res, "", " ")
+ if err != nil {
+ log.Fatalf("Unable to json serialize result: %v", err)
+ }
+ savedAs := ""
+ id := res.Result().ID()
+ if DoSave {
+ savedAs = SaveJSON(id, json)
+ }
+ if JSONOnly {
+ w.Header().Set("Content-Type", "application/json")
+ _, err = w.Write(json)
+ if err != nil {
+ log.Errf("Unable to write json output for %v: %v", r.RemoteAddr, err)
+ }
+ return
+ }
+ if savedAs != "" {
+ // nolint: errcheck, gas
+ w.Write([]byte(fmt.Sprintf("Saved result to %s "+
+ " (graph link )\n", savedAs, savedAs, id)))
+ }
+ // nolint: errcheck, gas
+ w.Write([]byte(fmt.Sprintf("All done %d calls %.3f ms avg, %.1f qps\n\n
Go to Top .