diff --git a/.bazelrc b/.bazelrc index f39cde75ecc8..eff468555e2e 100644 --- a/.bazelrc +++ b/.bazelrc @@ -277,22 +277,18 @@ build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds -# -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION is passed in in the bazel build target -# rules for fuzz tests. Passing it in the CLI will cause dependencies to be build -# with the macro. Causing issues in RouteMatcherTest.TestRoutes that expect prod -# behavior from RE2 library. -build:asan-fuzzer --config=asan -build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer -build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link -build:asan-fuzzer --copt=-fno-omit-frame-pointer -# Remove UBSAN halt_on_error to avoid crashing on protobuf errors. -build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 - # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link build:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link +build:plain-fuzzer --copt=-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + +build:asan-fuzzer --config=plain-fuzzer +build:asan-fuzzer --config=asan +build:asan-fuzzer --copt=-fno-omit-frame-pointer +# Remove UBSAN halt_on_error to avoid crashing on protobuf errors. +build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 # Compile database generation config build:compdb --build_tag_filters=-nocompdb diff --git a/api/envoy/api/v2/route/route_components.proto b/api/envoy/api/v2/route/route_components.proto index c1e84a5618a7..d73fbb8674c9 100644 --- a/api/envoy/api/v2/route/route_components.proto +++ b/api/envoy/api/v2/route/route_components.proto @@ -1133,13 +1133,18 @@ message HedgePolicy { // [#not-implemented-hide:] type.FractionalPercent additional_request_chance = 2; - // Indicates that a hedged request should be sent when the per-try timeout - // is hit. This will only occur if the retry policy also indicates that a - // timed out request should be retried. - // Once a timed out request is retried due to per try timeout, the router - // filter will ensure that it is not retried again even if the returned - // response headers would otherwise be retried according the specified - // :ref:`RetryPolicy `. + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // // Defaults to false. bool hedge_on_per_try_timeout = 3; } diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 62633012cf47..6915c62922fa 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1310,13 +1310,18 @@ message HedgePolicy { // [#not-implemented-hide:] type.v3.FractionalPercent additional_request_chance = 2; - // Indicates that a hedged request should be sent when the per-try timeout - // is hit. This will only occur if the retry policy also indicates that a - // timed out request should be retried. - // Once a timed out request is retried due to per try timeout, the router - // filter will ensure that it is not retried again even if the returned - // response headers would otherwise be retried according the specified - // :ref:`RetryPolicy `. + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // // Defaults to false. bool hedge_on_per_try_timeout = 3; } diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 9f3da5376ae0..3d6dbfd41130 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -1257,13 +1257,18 @@ message HedgePolicy { // [#not-implemented-hide:] type.v3.FractionalPercent additional_request_chance = 2; - // Indicates that a hedged request should be sent when the per-try timeout - // is hit. This will only occur if the retry policy also indicates that a - // timed out request should be retried. - // Once a timed out request is retried due to per try timeout, the router - // filter will ensure that it is not retried again even if the returned - // response headers would otherwise be retried according the specified - // :ref:`RetryPolicy `. + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // // Defaults to false. bool hedge_on_per_try_timeout = 3; } diff --git a/bazel/PPROF.md b/bazel/PPROF.md index 97e1c0541181..fa0a4f012555 100644 --- a/bazel/PPROF.md +++ b/bazel/PPROF.md @@ -1,7 +1,8 @@ # CPU or memory consumption testing with `pprof` To use `pprof` to analyze performance and memory consumption in Envoy, you can -use the built-in statically linked profiler, or dynamically link it in to a +use the built-in statically linked profiler provided by +[gperftools](https://github.com/gperftools/gperftools), or dynamically link it in to a specific place yourself. ## Collecting CPU or heap profile for a full execution of envoy @@ -14,7 +15,7 @@ inside Build the static binary using bazel: - $ bazel build //source/exe:envoy-static + $ bazel build --define tcmalloc=gperftools //source/exe:envoy-static ### Collecting the profile @@ -41,15 +42,15 @@ The profiler library is automatically linked into envoy_cc_test targets. Run a test with heap profiling enabled, like so: - $ bazel test --test_env=HEAPPROFILE=/tmp/heapprof + $ bazel test --test_env=HEAPPROFILE=/tmp/heapprof --define tcmalloc=gperftools Run a test with CPU profiling enabled, like so: - $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof + $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof --define tcmalloc=gperftools Note that heap checks and heap profile collection in tests have noticiable performance implications. Use the following command to collect a CPU profile from a test target with heap check and heap profile collection disabled: - $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof --test_env=HEAPPROFILE= --test_env=HEAPCHECK= + $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof --test_env=HEAPPROFILE= --test_env=HEAPCHECK= --define tcmalloc=gperftools ## Starting and stopping profile programmatically diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 43dbfe3459ae..a374a7d905de 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -91,8 +91,8 @@ envoy_cmake_external( "//conditions:default": [], }), postfix_script = select({ - "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/nameser.h $INSTALLDIR/include/nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_dns.h $INSTALLDIR/include/ares_dns.h", - "//conditions:default": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/ares_dns.h $INSTALLDIR/include/ares_dns.h", + "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/src/lib/nameser.h $INSTALLDIR/include/nameser.h && cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", + "//conditions:default": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/include/ares_dns.h $INSTALLDIR/include/ares_dns.h", }), static_libraries = select({ "//bazel:windows_x86_64": ["cares.lib"], diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index e3bca8398e0d..aeaf9de4bfae 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -89,22 +89,22 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Abseil", project_desc = "Open source collection of C++ libraries drawn from the most fundamental pieces of Google’s internal codebase", project_url = "https://abseil.io/", - version = "8f1c34a77a2ba04512b7f9cbc6013d405e6a0b31", - sha256 = "635367c5cac4bbab95d0485ba9e68fa422546b06ce050190c99be7e23aba3ce3", + version = "5d8fc9192245f0ea67094af57399d7931d6bd53f", + sha256 = "e3812f256dd7347a33bf9d93a950cf356c61c0596842ff07d8154cd415145d83", strip_prefix = "abseil-cpp-{version}", urls = ["https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], - release_date = "2020-10-17", + release_date = "2020-11-24", cpe = "N/A", ), com_github_c_ares_c_ares = dict( project_name = "c-ares", project_desc = "C library for asynchronous DNS requests", project_url = "https://c-ares.haxx.se/", - version = "c15f403875ababb1149215d49683d720b3d035c7", - sha256 = "0ad4f9748752909b00a0ca8d2b6a075e0a7a06ee922d9dcf9625d2562d7c894a", + version = "1.17.1", + sha256 = "d73dd0f6de824afd407ce10750ea081af47eba52b8a6cb307d220131ad93fc40", strip_prefix = "c-ares-{version}", - urls = ["https://github.com/c-ares/c-ares/archive/{version}.tar.gz"], + urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], release_date = "2020-10-21", cpe = "cpe:2.3:a:c-ares_project:c-ares:*", diff --git a/ci/repokitteh/modules/ownerscheck.star b/ci/repokitteh/modules/ownerscheck.star index e93010f89a7f..08b728a9dacc 100644 --- a/ci/repokitteh/modules/ownerscheck.star +++ b/ci/repokitteh/modules/ownerscheck.star @@ -8,7 +8,8 @@ # "path": "api/", # "label": "api", # "allow_global_approval": True, -# "github_status_label" = "any API change", +# "github_status_label": "any API change", +# "auto_assign": True, # }, # ], # ) @@ -27,8 +28,13 @@ # # 'label' refers to a GitHub label applied to any matching PR. The GitHub check status # can be customized with `github_status_label`. +# +# If 'auto_assign' is set True, a randomly selected reviwer from the owner team will +# be selected and set as a reviewer on the PR if there is not already a member of the +# owner team set as reviewer or assignee for the PR. load("text", "match") +load("time", "now") load("github.com/repokitteh/modules/lib/utils.star", "react") def _store_partial_approval(who, files): @@ -64,7 +70,8 @@ def _get_relevant_specs(specs, changed_files): label=spec.get("label", None), path_match=path_match, allow_global_approval=allow_global_approval, - status_label=status_label)) + status_label=status_label, + auto_assign=spec.get("auto_assign", False))) print("specs: %s" % relevant) @@ -152,20 +159,19 @@ def _reconcile(config, specs=None): return results -def _comment(config, results, force=False): +def _comment(config, results, assignees, sender, force=False): lines = [] for spec, approved in results: if approved: continue - mention = spec.owner + owner = spec.owner - if mention[0] != '@': - mention = '@' + mention + if owner[-1] == '!': + owner = owner[:-1] - if mention[-1] == '!': - mention = mention[:-1] + mention = '@' + owner match_description = spec.path_match if match_description: @@ -185,21 +191,40 @@ def _comment(config, results, force=False): elif mode == 'fyi': lines.append('CC %s: FYI only%s.' % (mention, match_description)) + if mode != 'skip' and spec.auto_assign: + api_assignee = None + # Find owners via github.team_get_by_name, github.team_list_members + team_name = owner.split('/')[1] + team = github.team_get_by_name(team_name) + # Exclude author from assignment. + members = [m['login'] for m in github.team_list_members(team['id']) if m['login'] != sender] + # Is a team member already assigned? The first assigned team member is picked. Bad O(n^2) as + # Starlark doesn't have sets, n is small. + for assignee in assignees: + if assignee in members: + api_assignee = assignee + break + # Otherwise, pick at "random" (we just use timestamp). + if not api_assignee: + api_assignee = members[now().second % len(members)] + lines.append('API shepherd assignee is @%s' % api_assignee) + github.issue_assign(api_assignee) + if lines: github.issue_create_comment('\n'.join(lines)) -def _reconcile_and_comment(config): - _comment(config, _reconcile(config)) +def _reconcile_and_comment(config, assignees, sender): + _comment(config, _reconcile(config), assignees, sender) -def _force_reconcile_and_comment(config): - _comment(config, _reconcile(config), force=True) +def _force_reconcile_and_comment(config, assignees, sender): + _comment(config, _reconcile(config), assignees, sender, force=True) -def _pr(action, config): +def _pr(action, config, assignees, sender): if action in ['synchronize', 'opened']: - _reconcile_and_comment(config) + _reconcile_and_comment(config, assignees, sender) def _pr_review(action, review_state, config): diff --git a/docs/root/faq/extensions/contract.rst b/docs/root/faq/extensions/contract.rst index 314701aca805..755e15eb080b 100644 --- a/docs/root/faq/extensions/contract.rst +++ b/docs/root/faq/extensions/contract.rst @@ -5,12 +5,13 @@ Is there a contract my HTTP filter must adhere to? * Headers encoding/decoding - * During encoding/decoding of headers if a filter returns ``FilterHeadersStatus::StopIteration``, - the processing can be resumed if ``encodeData()``/``decodeData()`` return + * During encoding/decoding of headers if a local reply wasn't sent and a filter + returns ``FilterHeadersStatus::StopIteration``, the processing can be resumed + if ``encodeData()``/``decodeData()`` return ``FilterDataStatus::Continue`` or by explicitly calling ``continueEncoding()``/``continueDecoding()``. - * During encoding/decoding of headers if a filter returns + * During encoding/decoding of headers if a local reply wasn't sent and a filter returns ``FilterHeadersStatus::StopAllIterationAndBuffer`` or ``FilterHeadersStatus::StopAllIterationAndWatermark``, the processing can be resumed by calling ``continueEncoding()``/``continueDecoding()``. @@ -24,7 +25,7 @@ Is there a contract my HTTP filter must adhere to? * Data encoding/decoding - * During encoding/decoding of data if a filter returns + * During encoding/decoding of data if a local reply wasn't sent and a filter returns ``FilterDataStatus::StopIterationAndBuffer``, ``FilterDataStatus::StopIterationAndWatermark``, or ``FilterDataStatus::StopIterationNoBuffer``, the processing can be resumed if ``encodeData()``/``decodeData()`` return ``FilterDataStatus::Continue`` or by explicitly @@ -32,7 +33,8 @@ Is there a contract my HTTP filter must adhere to? * Trailers encoding/decoding - * During encoding/decoding of trailers if a filter returns ``FilterTrailersStatus::StopIteration``, + * During encoding/decoding of trailers if a local reply wasn't sent and a filter + returns ``FilterTrailersStatus::StopIteration``, the processing can be resumed by explicitly calling ``continueEncoding()``/``continueDecoding()``. Are there well-known headers that will appear in the given headers map of ``decodeHeaders()``? diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst index ae2dd425a28e..514c25b51e09 100644 --- a/docs/root/operations/admin.rst +++ b/docs/root/operations/admin.rst @@ -511,9 +511,6 @@ modify different aspects of the server: but in response to user requests on high core-count machines, this can cause performance issues due to mutex contention. - This admin endpoint requires Envoy to be started with option - `--use-fake-symbol-table 0`. - See :repo:`source/docs/stats.md` for more details. Note also that actual mutex contention can be tracked via :http:get:`/contention`. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 5bad3f54a38a..a19010abf84d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -85,3 +85,4 @@ Deprecated * gzip: :ref:`HTTP Gzip filter ` is rejected now unless explicitly allowed with :ref:`runtime override ` `envoy.deprecated_features.allow_deprecated_gzip_http_filter` set to `true`. * logging: the `--log-format-prefix-with-location` option is removed. * ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. +* stats: the `--use-fake-symbol-table` option is removed. diff --git a/generated_api_shadow/envoy/api/v2/route/route_components.proto b/generated_api_shadow/envoy/api/v2/route/route_components.proto index c1e84a5618a7..d73fbb8674c9 100644 --- a/generated_api_shadow/envoy/api/v2/route/route_components.proto +++ b/generated_api_shadow/envoy/api/v2/route/route_components.proto @@ -1133,13 +1133,18 @@ message HedgePolicy { // [#not-implemented-hide:] type.FractionalPercent additional_request_chance = 2; - // Indicates that a hedged request should be sent when the per-try timeout - // is hit. This will only occur if the retry policy also indicates that a - // timed out request should be retried. - // Once a timed out request is retried due to per try timeout, the router - // filter will ensure that it is not retried again even if the returned - // response headers would otherwise be retried according the specified - // :ref:`RetryPolicy `. + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // // Defaults to false. bool hedge_on_per_try_timeout = 3; } diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index 6b97d3ff4a12..7813038930a3 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -1317,13 +1317,18 @@ message HedgePolicy { // [#not-implemented-hide:] type.v3.FractionalPercent additional_request_chance = 2; - // Indicates that a hedged request should be sent when the per-try timeout - // is hit. This will only occur if the retry policy also indicates that a - // timed out request should be retried. - // Once a timed out request is retried due to per try timeout, the router - // filter will ensure that it is not retried again even if the returned - // response headers would otherwise be retried according the specified - // :ref:`RetryPolicy `. + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // // Defaults to false. bool hedge_on_per_try_timeout = 3; } diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 5c9c2c46a202..0860c9d56700 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -1324,13 +1324,18 @@ message HedgePolicy { // [#not-implemented-hide:] type.v3.FractionalPercent additional_request_chance = 2; - // Indicates that a hedged request should be sent when the per-try timeout - // is hit. This will only occur if the retry policy also indicates that a - // timed out request should be retried. - // Once a timed out request is retried due to per try timeout, the router - // filter will ensure that it is not retried again even if the returned - // response headers would otherwise be retried according the specified - // :ref:`RetryPolicy `. + // Indicates that a hedged request should be sent when the per-try timeout is hit. + // This means that a retry will be issued without resetting the original request, leaving multiple upstream requests in flight. + // The first request to complete successfully will be the one returned to the caller. + // + // * At any time, a successful response (i.e. not triggering any of the retry-on conditions) would be returned to the client. + // * Before per-try timeout, an error response (per retry-on conditions) would be retried immediately or returned ot the client + // if there are no more retries left. + // * After per-try timeout, an error response would be discarded, as a retry in the form of a hedged request is already in progress. + // + // Note: For this to have effect, you must have a :ref:`RetryPolicy ` that retries at least + // one error code and specifies a maximum number of retries. + // // Defaults to false. bool hedge_on_per_try_timeout = 3; } diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h index 362857899bc2..9b3709af846b 100644 --- a/include/envoy/server/options.h +++ b/include/envoy/server/options.h @@ -234,11 +234,6 @@ class Options { */ virtual bool mutexTracingEnabled() const PURE; - /** - * @return whether to use the fake symbol table implementation. - */ - virtual bool fakeSymbolTableEnabled() const PURE; - /** * @return bool indicating whether cpuset size should determine the number of worker threads. */ diff --git a/include/envoy/stats/symbol_table.h b/include/envoy/stats/symbol_table.h index b84d340f79d1..e7f171b6f8c9 100644 --- a/include/envoy/stats/symbol_table.h +++ b/include/envoy/stats/symbol_table.h @@ -136,25 +136,6 @@ class SymbolTable { virtual void debugPrint() const PURE; #endif - /** - * Calls the provided function with a string-view representation of the - * elaborated name. This is useful during the interim period when we - * are using FakeSymbolTableImpl, to avoid an extra allocation. Once - * we migrate to using SymbolTableImpl, this interface will no longer - * be helpful and can be removed. The reason it's useful now is that - * it makes up, in part, for some extra runtime overhead that is spent - * on the SymbolTable abstraction and API, without getting full benefit - * from the improved representation. - * - * TODO(#6307): Remove this when the transition from FakeSymbolTableImpl to - * SymbolTableImpl is complete. - * - * @param stat_name The stat name. - * @param fn The function to call with the elaborated stat name as a string_view. - */ - virtual void callWithStringView(StatName stat_name, - const std::function& fn) const PURE; - using RecentLookupsFn = std::function; /** diff --git a/include/envoy/upstream/host_description.h b/include/envoy/upstream/host_description.h index bd0714bae455..71f91cde11dc 100644 --- a/include/envoy/upstream/host_description.h +++ b/include/envoy/upstream/host_description.h @@ -4,6 +4,7 @@ #include #include +#include "envoy/common/time.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/network/address.h" #include "envoy/network/transport_socket.h" @@ -148,6 +149,11 @@ class HostDescription { * Set the current priority. */ virtual void priority(uint32_t) PURE; + + /** + * @return timestamp in milliseconds of when host was created. + */ + virtual MonotonicTime creationTime() const PURE; }; using HostDescriptionConstSharedPtr = std::shared_ptr; diff --git a/repokitteh.star b/repokitteh.star index bf4c78b59291..5181346d560a 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -21,6 +21,7 @@ use( "path": "api/envoy/", "label": "api", "github_status_label": "any API change", + "auto_assign": True, }, { "owner": "envoyproxy/api-watchers", diff --git a/source/common/grpc/async_client_manager_impl.cc b/source/common/grpc/async_client_manager_impl.cc index 5f809755f89d..14866aa25d0e 100644 --- a/source/common/grpc/async_client_manager_impl.cc +++ b/source/common/grpc/async_client_manager_impl.cc @@ -11,6 +11,20 @@ namespace Envoy { namespace Grpc { +namespace { + +// Validates a string for gRPC header key compliance. This is a subset of legal HTTP characters. +// See https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +bool validateGrpcHeaderChars(absl::string_view key) { + for (auto ch : key) { + if (!(absl::ascii_isalnum(ch) || ch == '_' || ch == '.' || ch == '-')) { + return false; + } + } + return true; +} + +} // namespace AsyncClientFactoryImpl::AsyncClientFactoryImpl(Upstream::ClusterManager& cm, const envoy::config::core::v3::GrpcService& config, @@ -66,6 +80,14 @@ GoogleAsyncClientFactoryImpl::GoogleAsyncClientFactoryImpl( #else ASSERT(google_tls_slot_ != nullptr); #endif + + // Check metadata for gRPC API compliance. Uppercase characters are lowered in the HeaderParser. + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + for (const auto& header : config.initial_metadata()) { + if (!validateGrpcHeaderChars(header.key()) || !validateGrpcHeaderChars(header.value())) { + throw EnvoyException("Illegal characters in gRPC initial metadata."); + } + } } RawAsyncClientPtr GoogleAsyncClientFactoryImpl::create() { diff --git a/source/common/signal/fatal_action.h b/source/common/signal/fatal_action.h index 4ecaf1461635..96e843dc1877 100644 --- a/source/common/signal/fatal_action.h +++ b/source/common/signal/fatal_action.h @@ -17,7 +17,7 @@ enum class Status { // We either haven't set up the Fatal Action manager, or we unregistered it // as the server terminated. - ActionManangerUnset, + ActionManagerUnset, // Another thread beat us to running the Fatal Actions. RunningOnAnotherThread, diff --git a/source/common/signal/fatal_error_handler.cc b/source/common/signal/fatal_error_handler.cc index e7099e6f0254..627c352415b1 100644 --- a/source/common/signal/fatal_error_handler.cc +++ b/source/common/signal/fatal_error_handler.cc @@ -67,7 +67,7 @@ FatalAction::Status runFatalActions(FatalActionType action_type) { FatalAction::FatalActionManager* action_manager = fatal_action_manager.load(); if (action_manager == nullptr) { - return FatalAction::Status::ActionManangerUnset; + return FatalAction::Status::ActionManagerUnset; } int64_t my_tid = action_manager->getThreadFactory().currentThreadId().getId(); diff --git a/source/common/signal/signal_action.cc b/source/common/signal/signal_action.cc index 143b95a81f6c..dea90f1cc1f4 100644 --- a/source/common/signal/signal_action.cc +++ b/source/common/signal/signal_action.cc @@ -32,7 +32,7 @@ void SignalAction::sigHandler(int sig, siginfo_t* info, void* context) { FatalErrorHandler::callFatalErrorHandlers(std::cerr); FatalErrorHandler::runUnsafeActions(); break; - case FatalAction::Status::ActionManangerUnset: + case FatalAction::Status::ActionManagerUnset: FatalErrorHandler::callFatalErrorHandlers(std::cerr); break; case FatalAction::Status::RunningOnAnotherThread: { diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index 5a9a6df7461d..8b6479c6d840 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -236,11 +236,6 @@ std::string SymbolTableImpl::toString(const StatName& stat_name) const { return absl::StrJoin(decodeStrings(stat_name.data(), stat_name.dataSize()), "."); } -void SymbolTableImpl::callWithStringView(StatName stat_name, - const std::function& fn) const { - fn(toString(stat_name)); -} - void SymbolTableImpl::incRefCount(const StatName& stat_name) { // Before taking the lock, decode the array of symbols from the SymbolTable::Storage. const SymbolVec symbols = Encoding::decodeSymbols(stat_name.data(), stat_name.dataSize()); @@ -615,7 +610,7 @@ void StatNameList::clear(SymbolTable& symbol_table) { } StatNameSet::StatNameSet(SymbolTable& symbol_table, absl::string_view name) - : name_(std::string(name)), symbol_table_(symbol_table), pool_(symbol_table) { + : name_(std::string(name)), pool_(symbol_table) { builtin_stat_names_[""] = StatName(); } diff --git a/source/common/stats/symbol_table_impl.h b/source/common/stats/symbol_table_impl.h index 816799461803..97c457c0e0a9 100644 --- a/source/common/stats/symbol_table_impl.h +++ b/source/common/stats/symbol_table_impl.h @@ -186,8 +186,6 @@ class SymbolTableImpl : public SymbolTable { void populateList(const StatName* names, uint32_t num_names, StatNameList& list) override; StoragePtr encode(absl::string_view name) override; StoragePtr makeDynamicStorage(absl::string_view name) override; - void callWithStringView(StatName stat_name, - const std::function& fn) const override; #ifndef ENVOY_CONFIG_COVERAGE void debugPrint() const override; @@ -585,7 +583,7 @@ class StatNamePool { * SymbolTable lock, but tokens are not shared across StatNames. * * The SymbolTable is required as a constructor argument to assist in encoding - * the stat-names, which differs between FakeSymbolTableImpl and SymbolTableImpl. + * the stat-names. * * Example usage: * StatNameDynamicPool pool(symbol_table); @@ -652,7 +650,6 @@ class StatNameList { void clear(SymbolTable& symbol_table); private: - friend class FakeSymbolTableImpl; friend class SymbolTableImpl; /** @@ -666,10 +663,8 @@ class StatNameList { * ... * * - * For FakeSymbolTableImpl, each symbol is a single char, casted into a - * uint8_t. For SymbolTableImpl, each symbol is 1 or more bytes, in a - * variable-length encoding. See SymbolTableImpl::Encoding::addSymbol for - * details. + * For SymbolTableImpl, each symbol is 1 or more bytes, in a variable-length + * encoding. See SymbolTableImpl::Encoding::addSymbol for details. */ void moveStorageIntoList(SymbolTable::StoragePtr&& storage) { storage_ = std::move(storage); } @@ -841,13 +836,11 @@ class StatNameSet { } private: - friend class FakeSymbolTableImpl; friend class SymbolTableImpl; StatNameSet(SymbolTable& symbol_table, absl::string_view name); const std::string name_; - Stats::SymbolTable& symbol_table_; Stats::StatNamePool pool_ ABSL_GUARDED_BY(mutex_); mutable absl::Mutex mutex_; using StringStatNameMap = absl::flat_hash_map; diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 11b977440837..e936a8654371 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -349,9 +349,8 @@ class StatNameTagHelper { : pool_(tls.symbolTable()), stat_name_tags_(stat_name_tags.value_or(StatNameTagVector())) { if (!stat_name_tags) { TagVector tags; - tls.symbolTable().callWithStringView(name, [&tags, &tls, this](absl::string_view name_str) { - tag_extracted_name_ = pool_.add(tls.tagProducer().produceTags(name_str, tags)); - }); + tag_extracted_name_ = + pool_.add(tls.tagProducer().produceTags(tls.symbolTable().toString(name), tags)); StatName empty; for (const auto& tag : tags) { StatName tag_name = tls.wellKnownTags().getBuiltin(tag.name_, empty); @@ -603,10 +602,7 @@ Histogram& ThreadLocalStoreImpl::ScopeImpl::histogramFromStatNameWithTags( StatNameTagHelper tag_helper(parent_, joiner.tagExtractedName(), stat_name_tags); ConstSupportedBuckets* buckets = nullptr; - symbolTable().callWithStringView(final_stat_name, - [&buckets, this](absl::string_view stat_name) { - buckets = &parent_.histogram_settings_->buckets(stat_name); - }); + buckets = &parent_.histogram_settings_->buckets(symbolTable().toString(final_stat_name)); RefcountPtr stat; { diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index 124ea3c5489d..15d2409f4d09 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -20,7 +20,7 @@ EdsClusterImpl::EdsClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api), + added_via_api, factory_context.dispatcher().timeSource()), Envoy::Config::SubscriptionBase( cluster.eds_cluster_config().eds_config().resource_api_version(), factory_context.messageValidationVisitor(), "cluster_name"), @@ -58,7 +58,7 @@ void EdsClusterImpl::BatchUpdateHelper::batchUpdate(PrioritySet::HostUpdateCb& h priority_state_manager.registerHostForPriority( lb_endpoint.endpoint().hostname(), parent_.resolveProtoAddress(lb_endpoint.endpoint().address()), locality_lb_endpoint, - lb_endpoint); + lb_endpoint, parent_.time_source_); } } diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index 12c92979f418..7b7c94ad24fe 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -348,7 +348,8 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, ProtobufMessage::ValidationVisitor& validation_visitor, Api::Api& api) : runtime_(runtime), cluster_(std::move(cluster)), bind_config_(bind_config), stats_(stats), ssl_context_manager_(ssl_context_manager), added_via_api_(added_via_api), - hosts_(new HostVector()), validation_visitor_(validation_visitor) { + hosts_(new HostVector()), validation_visitor_(validation_visitor), + time_source_(dispatcher.timeSource()) { ENVOY_LOG(debug, "Creating an HdsCluster"); priority_set_.getOrCreateHostSet(0); // Set initial hashes for possible delta updates. @@ -377,7 +378,7 @@ HdsCluster::HdsCluster(Server::Admin& admin, Runtime::Loader& runtime, info_, "", Network::Address::resolveProtoAddress(host.endpoint().address()), nullptr, 1, locality_endpoints.locality(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::UNKNOWN, time_source_); // Add this host/endpoint pointer to our flat list of endpoints for health checking. hosts_->push_back(endpoint); // Add this host/endpoint pointer to our structured list by locality so results can be @@ -489,7 +490,7 @@ void HdsCluster::updateHosts( info_, "", Network::Address::resolveProtoAddress(endpoint.endpoint().address()), nullptr, 1, endpoints.locality(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::UNKNOWN, time_source_); // Set the initial health status as in HdsCluster::initialize. host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index a3ecbb7c428e..cb3cf1479368 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -108,6 +108,7 @@ class HdsCluster : public Cluster, Logger::Loggable { std::vector health_checkers_; HealthCheckerMap health_checkers_map_; ProtobufMessage::ValidationVisitor& validation_visitor_; + TimeSource& time_source_; void updateHealthchecks( const Protobuf::RepeatedPtrField& health_checks, diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index fec807c928e4..019432e68502 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -48,7 +48,8 @@ LogicalDnsCluster::LogicalDnsCluster( Network::DnsResolverSharedPtr dns_resolver, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) - : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), + : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, + factory_context.dispatcher().timeSource()), dns_resolver_(dns_resolver), dns_refresh_rate_ms_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))), @@ -121,8 +122,9 @@ void LogicalDnsCluster::startResolve() { Network::Utility::portFromTcpUrl(dns_url_)); if (!logical_host_) { - logical_host_ = std::make_shared( - info_, hostname_, new_address, localityLbEndpoint(), lbEndpoint(), nullptr); + logical_host_ = + std::make_shared(info_, hostname_, new_address, localityLbEndpoint(), + lbEndpoint(), nullptr, time_source_); const auto& locality_lb_endpoint = localityLbEndpoint(); PriorityStateManager priority_state_manager(*this, local_info_, nullptr); diff --git a/source/common/upstream/logical_host.h b/source/common/upstream/logical_host.h index 3e296b7e7548..574728fe92bb 100644 --- a/source/common/upstream/logical_host.h +++ b/source/common/upstream/logical_host.h @@ -1,5 +1,6 @@ #pragma once +#include "envoy/common/time.h" #include "envoy/config/core/v3/base.pb.h" #include "envoy/config/endpoint/v3/endpoint_components.pb.h" @@ -18,13 +19,14 @@ class LogicalHost : public HostImpl { const Network::Address::InstanceConstSharedPtr& address, const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, - const Network::TransportSocketOptionsSharedPtr& override_transport_socket_options) + const Network::TransportSocketOptionsSharedPtr& override_transport_socket_options, + TimeSource& time_source) : HostImpl(cluster, hostname, address, // TODO(zyfjeff): Created through metadata shared pool std::make_shared(lb_endpoint.metadata()), lb_endpoint.load_balancing_weight().value(), locality_lb_endpoint.locality(), lb_endpoint.endpoint().health_check_config(), locality_lb_endpoint.priority(), - lb_endpoint.health_status()), + lb_endpoint.health_status(), time_source), override_transport_socket_options_(override_transport_socket_options) {} // Set the new address. Updates are typically rare so a R/W lock is used for address updates. @@ -104,6 +106,7 @@ class RealHostDescription : public HostDescription { // checking. NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } + MonotonicTime creationTime() const override { return logical_host_->creationTime(); } uint32_t priority() const override { return logical_host_->priority(); } void priority(uint32_t) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 2f2eec5e98ef..51c49c1340a2 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -57,7 +57,7 @@ HostConstSharedPtr OriginalDstCluster::LoadBalancer::chooseHost(LoadBalancerCont info, info->name() + dst_addr.asString(), std::move(host_ip_port), nullptr, 1, envoy::config::core::v3::Locality().default_instance(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig().default_instance(), 0, - envoy::config::core::v3::UNKNOWN)); + envoy::config::core::v3::UNKNOWN, parent_->time_source_)); ENVOY_LOG(debug, "Created host {}.", host->address()->asString()); // Tell the cluster about the new host @@ -107,7 +107,8 @@ OriginalDstCluster::OriginalDstCluster( const envoy::config::cluster::v3::Cluster& config, Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) - : ClusterImplBase(config, runtime, factory_context, std::move(stats_scope), added_via_api), + : ClusterImplBase(config, runtime, factory_context, std::move(stats_scope), added_via_api, + factory_context.dispatcher().timeSource()), dispatcher_(factory_context.dispatcher()), cleanup_interval_ms_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(config, cleanup_interval, 5000))), diff --git a/source/common/upstream/static_cluster.cc b/source/common/upstream/static_cluster.cc index 2657b809465f..12fa3999b552 100644 --- a/source/common/upstream/static_cluster.cc +++ b/source/common/upstream/static_cluster.cc @@ -11,7 +11,8 @@ StaticClusterImpl::StaticClusterImpl( const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) - : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), + : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, + factory_context.dispatcher().timeSource()), priority_state_manager_( new PriorityStateManager(*this, factory_context.localInfo(), nullptr)) { // TODO(dio): Use by-reference when cluster.hosts() is removed. @@ -23,13 +24,15 @@ StaticClusterImpl::StaticClusterImpl( overprovisioning_factor_ = PROTOBUF_GET_WRAPPED_OR_DEFAULT( cluster_load_assignment.policy(), overprovisioning_factor, kDefaultOverProvisioningFactor); + Event::Dispatcher& dispatcher = factory_context.dispatcher(); + for (const auto& locality_lb_endpoint : cluster_load_assignment.endpoints()) { validateEndpointsForZoneAwareRouting(locality_lb_endpoint); priority_state_manager_->initializePriorityFor(locality_lb_endpoint); for (const auto& lb_endpoint : locality_lb_endpoint.lb_endpoints()) { priority_state_manager_->registerHostForPriority( lb_endpoint.endpoint().hostname(), resolveProtoAddress(lb_endpoint.endpoint().address()), - locality_lb_endpoint, lb_endpoint); + locality_lb_endpoint, lb_endpoint, dispatcher.timeSource()); } } } diff --git a/source/common/upstream/strict_dns_cluster.cc b/source/common/upstream/strict_dns_cluster.cc index 328f2f7a79f8..70f1dd37e00b 100644 --- a/source/common/upstream/strict_dns_cluster.cc +++ b/source/common/upstream/strict_dns_cluster.cc @@ -14,7 +14,7 @@ StrictDnsClusterImpl::StrictDnsClusterImpl( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api), + added_via_api, factory_context.dispatcher().timeSource()), local_info_(factory_context.localInfo()), dns_resolver_(dns_resolver), dns_refresh_rate_ms_( std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))), @@ -134,7 +134,7 @@ void StrictDnsClusterImpl::ResolveTarget::startResolve() { std::make_shared(lb_endpoint_.metadata()), lb_endpoint_.load_balancing_weight().value(), locality_lb_endpoint_.locality(), lb_endpoint_.endpoint().health_check_config(), locality_lb_endpoint_.priority(), - lb_endpoint_.health_status())); + lb_endpoint_.health_status(), parent_.time_source_)); ttl_refresh_rate = min(ttl_refresh_rate, resp.ttl_); } diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 8e6a6db3c507..a9fd6f2be38f 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -254,7 +254,7 @@ HostDescriptionImpl::HostDescriptionImpl( Network::Address::InstanceConstSharedPtr dest_address, MetadataConstSharedPtr metadata, const envoy::config::core::v3::Locality& locality, const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config, - uint32_t priority) + uint32_t priority, TimeSource& time_source) : cluster_(cluster), hostname_(hostname), health_checks_hostname_(health_check_config.hostname()), address_(dest_address), canary_(Config::Metadata::metadataValue(metadata.get(), @@ -264,7 +264,8 @@ HostDescriptionImpl::HostDescriptionImpl( metadata_(metadata), locality_(locality), locality_zone_stat_name_(locality.zone(), cluster->statsScope().symbolTable()), priority_(priority), - socket_factory_(resolveTransportSocketFactory(dest_address, metadata_.get())) { + socket_factory_(resolveTransportSocketFactory(dest_address, metadata_.get())), + creation_time_(time_source.monotonicTime()) { if (health_check_config.port_value() != 0 && dest_address->type() != Network::Address::Type::Ip) { // Setting the health check port to non-0 only works for IP-type addresses. Setting the port // for a pipe address is a misconfiguration. Throw an exception. @@ -889,9 +890,10 @@ ClusterInfoImpl::upstreamHttpProtocol(absl::optional downstream_ ClusterImplBase::ClusterImplBase( const envoy::config::cluster::v3::Cluster& cluster, Runtime::Loader& runtime, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, - Stats::ScopePtr&& stats_scope, bool added_via_api) + Stats::ScopePtr&& stats_scope, bool added_via_api, TimeSource& time_source) : init_manager_(fmt::format("Cluster {}", cluster.name())), init_watcher_("ClusterImplBase", [this]() { onInitDone(); }), runtime_(runtime), + time_source_(time_source), local_cluster_(factory_context.clusterManager().localClusterName().value_or("") == cluster.name()), const_metadata_shared_pool_(Config::Metadata::getConstMetadataSharedPool( @@ -1234,14 +1236,14 @@ void PriorityStateManager::initializePriorityFor( void PriorityStateManager::registerHostForPriority( const std::string& hostname, Network::Address::InstanceConstSharedPtr address, const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, - const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint) { + const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, TimeSource& time_source) { auto metadata = lb_endpoint.has_metadata() ? parent_.constMetadataSharedPool()->getObject(lb_endpoint.metadata()) : nullptr; const HostSharedPtr host(new HostImpl( parent_.info(), hostname, address, metadata, lb_endpoint.load_balancing_weight().value(), locality_lb_endpoint.locality(), lb_endpoint.endpoint().health_check_config(), - locality_lb_endpoint.priority(), lb_endpoint.health_status())); + locality_lb_endpoint.priority(), lb_endpoint.health_status(), time_source)); registerHostForPriority(host, locality_lb_endpoint); } diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index c74e489384f0..0b327a1163fa 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -11,6 +11,7 @@ #include #include +#include "envoy/common/time.h" #include "envoy/config/cluster/v3/cluster.pb.h" #include "envoy/config/core/v3/address.pb.h" #include "envoy/config/core/v3/base.pb.h" @@ -80,7 +81,7 @@ class HostDescriptionImpl : virtual public HostDescription, Network::Address::InstanceConstSharedPtr dest_address, MetadataConstSharedPtr metadata, const envoy::config::core::v3::Locality& locality, const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config, - uint32_t priority); + uint32_t priority, TimeSource& time_source); Network::TransportSocketFactory& transportSocketFactory() const override { return socket_factory_; @@ -139,6 +140,7 @@ class HostDescriptionImpl : virtual public HostDescription, Network::TransportSocketFactory& resolveTransportSocketFactory(const Network::Address::InstanceConstSharedPtr& dest_address, const envoy::config::core::v3::Metadata* metadata) const; + MonotonicTime creationTime() const override { return creation_time_; } protected: ClusterInfoConstSharedPtr cluster_; @@ -156,6 +158,7 @@ class HostDescriptionImpl : virtual public HostDescription, HealthCheckHostMonitorPtr health_checker_; std::atomic priority_; Network::TransportSocketFactory& socket_factory_; + const MonotonicTime creation_time_; }; /** @@ -169,9 +172,10 @@ class HostImpl : public HostDescriptionImpl, Network::Address::InstanceConstSharedPtr address, MetadataConstSharedPtr metadata, uint32_t initial_weight, const envoy::config::core::v3::Locality& locality, const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config, - uint32_t priority, const envoy::config::core::v3::HealthStatus health_status) + uint32_t priority, const envoy::config::core::v3::HealthStatus health_status, + TimeSource& time_source) : HostDescriptionImpl(cluster, hostname, address, metadata, locality, health_check_config, - priority), + priority, time_source), used_(true) { setEdsHealthFlag(health_status); HostImpl::weight(initial_weight); @@ -772,7 +776,7 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable { void registerHostForPriority( const std::string& hostname, Network::Address::InstanceConstSharedPtr address, const envoy::config::endpoint::v3::LocalityLbEndpoints& locality_lb_endpoint, - const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint); + const envoy::config::endpoint::v3::LbEndpoint& lb_endpoint, TimeSource& time_source); void registerHostForPriority( const HostSharedPtr& host, diff --git a/source/docs/stats.md b/source/docs/stats.md index 418a04f628d7..8b67355e24c8 100644 --- a/source/docs/stats.md +++ b/source/docs/stats.md @@ -193,11 +193,6 @@ with a format-check, but we can determine whether symbol-table lookups are occurring during via an admin endpoint that shows 20 recent lookups by name, at `ENVOY_HOST:ADMIN_PORT/stats?recentlookups`. -As of October 6, 2020, the "fake" symbol table implementation has been removed -from the system, and the "--use-fake-symbol-table" option is now a no-op, -triggering a warning if set to "1". The option will be removed in a later -release. - ### Symbol Table Class Overview Class | Superclass | Description @@ -205,7 +200,7 @@ Class | Superclass | Description SymbolTable | | Abstract class providing an interface for symbol tables SymbolTableImpl | SymbolTable | Implementation of SymbolTable API where StatName share symbols held in a table SymbolTableImpl::Encoding | | Helper class for incrementally encoding strings into symbols -StatName | | Provides an API and a view into a StatName (dynamic orsymbolized). Like absl::string_view, the backing store must be separately maintained. +StatName | | Provides an API and a view into a StatName (dynamic or symbolized). Like absl::string_view, the backing store must be separately maintained. StatNameStorageBase | | Holds storage (an array of bytes) for a dynamic or symbolized StatName StatNameStorage | StatNameStorageBase | Holds storage for a symbolized StatName. Must be explicitly freed (not just destructed). StatNameManagedStorage | StatNameStorage | Like StatNameStorage, but is 8 bytes larger, and can be destructed without free(). diff --git a/source/extensions/clusters/aggregate/cluster.cc b/source/extensions/clusters/aggregate/cluster.cc index c630a580a49a..595c918b4da7 100644 --- a/source/extensions/clusters/aggregate/cluster.cc +++ b/source/extensions/clusters/aggregate/cluster.cc @@ -18,7 +18,7 @@ Cluster::Cluster(const envoy::config::cluster::v3::Cluster& cluster, Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, ThreadLocal::SlotAllocator& tls, bool added_via_api) : Upstream::ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api), + added_via_api, factory_context.dispatcher().timeSource()), cluster_manager_(cluster_manager), runtime_(runtime), random_(random), tls_(tls), clusters_(config.clusters().begin(), config.clusters().end()) {} diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc index c5215d894b57..022516a83708 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.cc +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.cc @@ -22,7 +22,7 @@ Cluster::Cluster( Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api) : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api), + added_via_api, factory_context.dispatcher().timeSource()), dns_cache_manager_(cache_manager_factory.get()), dns_cache_(dns_cache_manager_->getCache(config.dns_cache_config())), update_callbacks_handle_(dns_cache_->addUpdateCallbacks(*this)), local_info_(local_info), @@ -107,7 +107,7 @@ void Cluster::addOrUpdateWorker( new_host_map->try_emplace(host, host_info, std::make_shared( info(), host, host_info->address(), dummy_locality_lb_endpoint_, - dummy_lb_endpoint_, nullptr)); + dummy_lb_endpoint_, nullptr, time_source_)); if (hosts_added == nullptr) { hosts_added = std::make_unique(); } diff --git a/source/extensions/clusters/redis/redis_cluster.cc b/source/extensions/clusters/redis/redis_cluster.cc index d7a7fc9c7336..673bffa67705 100644 --- a/source/extensions/clusters/redis/redis_cluster.cc +++ b/source/extensions/clusters/redis/redis_cluster.cc @@ -25,7 +25,7 @@ RedisCluster::RedisCluster( Stats::ScopePtr&& stats_scope, bool added_via_api, ClusterSlotUpdateCallBackSharedPtr lb_factory) : Upstream::BaseDynamicClusterImpl(cluster, runtime, factory_context, std::move(stats_scope), - added_via_api), + added_via_api, factory_context.dispatcher().timeSource()), cluster_manager_(cluster_manager), cluster_refresh_rate_(std::chrono::milliseconds( PROTOBUF_GET_MS_OR_DEFAULT(redis_cluster, cluster_refresh_rate, 5000))), @@ -96,9 +96,9 @@ void RedisCluster::onClusterSlotUpdate(ClusterSlotsPtr&& slots) { Upstream::HostVector new_hosts; for (const ClusterSlot& slot : *slots) { - new_hosts.emplace_back(new RedisHost(info(), "", slot.primary(), *this, true)); + new_hosts.emplace_back(new RedisHost(info(), "", slot.primary(), *this, true, time_source_)); for (auto const& replica : slot.replicas()) { - new_hosts.emplace_back(new RedisHost(info(), "", replica, *this, false)); + new_hosts.emplace_back(new RedisHost(info(), "", replica, *this, false, time_source_)); } } @@ -274,9 +274,9 @@ void RedisCluster::RedisDiscoverySession::startResolveRedis() { Upstream::HostSharedPtr host; if (parent_.hosts_.empty()) { const int rand_idx = parent_.random_.random() % discovery_address_list_.size(); - auto it = discovery_address_list_.begin(); + auto it = std::next(discovery_address_list_.begin(), rand_idx); host = Upstream::HostSharedPtr{ - new RedisHost(parent_.info(), "", *std::next(it, rand_idx), parent_, true)}; + new RedisHost(parent_.info(), "", *it, parent_, true, parent_.timeSource())}; } else { const int rand_idx = parent_.random_.random() % parent_.hosts_.size(); host = parent_.hosts_[rand_idx]; diff --git a/source/extensions/clusters/redis/redis_cluster.h b/source/extensions/clusters/redis/redis_cluster.h index b3d842aa19de..9707954dedee 100644 --- a/source/extensions/clusters/redis/redis_cluster.h +++ b/source/extensions/clusters/redis/redis_cluster.h @@ -114,11 +114,14 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { values[1].asString() = "SLOTS"; asArray().swap(values); } + static ClusterSlotsRequest instance_; }; InitializePhase initializePhase() const override { return InitializePhase::Primary; } + TimeSource& timeSource() const { return time_source_; } + private: friend class RedisClusterTest; @@ -145,7 +148,8 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { class RedisHost : public Upstream::HostImpl { public: RedisHost(Upstream::ClusterInfoConstSharedPtr cluster, const std::string& hostname, - Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool primary) + Network::Address::InstanceConstSharedPtr address, RedisCluster& parent, bool primary, + TimeSource& time_source) : Upstream::HostImpl( cluster, hostname, address, // TODO(zyfjeff): Created through metadata shared pool @@ -153,7 +157,8 @@ class RedisCluster : public Upstream::BaseDynamicClusterImpl { parent.lbEndpoint().load_balancing_weight().value(), parent.localityLbEndpoint().locality(), parent.lbEndpoint().endpoint().health_check_config(), - parent.localityLbEndpoint().priority(), parent.lbEndpoint().health_status()), + parent.localityLbEndpoint().priority(), parent.lbEndpoint().health_status(), + time_source), primary_(primary) {} bool isPrimary() const { return primary_; } diff --git a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc index 8e9ac6f186a6..a9f5197b9b87 100644 --- a/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc +++ b/source/extensions/filters/network/redis_proxy/conn_pool_impl.cc @@ -335,7 +335,7 @@ Common::Redis::Client::PoolRequest* InstanceImpl::ThreadLocalPool::makeRequestTo Upstream::HostSharedPtr new_host{new Upstream::HostImpl( cluster_->info(), "", address_ptr, nullptr, 1, envoy::config::core::v3::Locality(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, - envoy::config::core::v3::UNKNOWN)}; + envoy::config::core::v3::UNKNOWN, dispatcher_.timeSource())}; host_address_map_[host_address_map_key] = new_host; created_via_redirect_hosts_.push_back(new_host); it = host_address_map_.find(host_address_map_key); diff --git a/source/server/options_impl.cc b/source/server/options_impl.cc index 9a488b1fe2e7..d4623d5a4141 100644 --- a/source/server/options_impl.cc +++ b/source/server/options_impl.cc @@ -147,10 +147,6 @@ OptionsImpl::OptionsImpl(std::vector args, TCLAP::SwitchArg cpuset_threads( "", "cpuset-threads", "Get the default # of worker threads from cpuset size", cmd, false); - TCLAP::ValueArg use_fake_symbol_table("", "use-fake-symbol-table", - "Use fake symbol table implementation", false, false, - "bool", cmd); - TCLAP::ValueArg disable_extensions("", "disable-extensions", "Comma-separated list of extensions to disable", false, "", "string", cmd); @@ -181,11 +177,6 @@ OptionsImpl::OptionsImpl(std::vector args, hot_restart_disabled_ = disable_hot_restart.getValue(); mutex_tracing_enabled_ = enable_mutex_tracing.getValue(); - fake_symbol_table_enabled_ = use_fake_symbol_table.getValue(); - if (fake_symbol_table_enabled_) { - ENVOY_LOG(warn, "Fake symbol tables have been removed. Please remove references to " - "--use-fake-symbol-table"); - } cpuset_threads_ = cpuset_threads.getValue(); @@ -423,8 +414,8 @@ OptionsImpl::OptionsImpl(const std::string& service_cluster, const std::string& service_zone_(service_zone), file_flush_interval_msec_(10000), drain_time_(600), parent_shutdown_time_(900), drain_strategy_(Server::DrainStrategy::Gradual), mode_(Server::Mode::Serve), hot_restart_disabled_(false), signal_handling_enabled_(true), - mutex_tracing_enabled_(false), cpuset_threads_(false), fake_symbol_table_enabled_(false), - socket_path_("@envoy_domain_socket"), socket_mode_(0) {} + mutex_tracing_enabled_(false), cpuset_threads_(false), socket_path_("@envoy_domain_socket"), + socket_mode_(0) {} void OptionsImpl::disableExtensions(const std::vector& names) { for (const auto& name : names) { diff --git a/source/server/options_impl.h b/source/server/options_impl.h index cacf88685aa9..85897ac7d822 100644 --- a/source/server/options_impl.h +++ b/source/server/options_impl.h @@ -100,10 +100,6 @@ class OptionsImpl : public Server::Options, protected Logger::Loggable disabled_extensions_; uint32_t count_; diff --git a/source/server/server.cc b/source/server/server.cc index 214f9d045938..0e238f9825d8 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -246,6 +246,14 @@ void InstanceImpl::flushStatsInternal() { bool InstanceImpl::healthCheckFailed() { return !live_.load(); } +ProcessContextOptRef InstanceImpl::processContext() { + if (process_context_ == nullptr) { + return absl::nullopt; + } + + return *process_context_; +} + namespace { // Loads a bootstrap object, potentially at a specific version (upgrading if necessary). void loadBootstrap(absl::optional bootstrap_version, diff --git a/source/server/server.h b/source/server/server.h index 1c06562b5a0e..0409b9db529b 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -251,7 +251,7 @@ class InstanceImpl final : Logger::Loggable, Stats::Store& stats() override { return stats_store_; } Grpc::Context& grpcContext() override { return grpc_context_; } Http::Context& httpContext() override { return http_context_; } - ProcessContextOptRef processContext() override { return *process_context_; } + ProcessContextOptRef processContext() override; ThreadLocal::Instance& threadLocal() override { return thread_local_; } const LocalInfo::LocalInfo& localInfo() const override { return *local_info_; } TimeSource& timeSource() override { return time_source_; } diff --git a/test/common/access_log/BUILD b/test/common/access_log/BUILD index 313fdbe0391e..485d2bd756f8 100644 --- a/test/common/access_log/BUILD +++ b/test/common/access_log/BUILD @@ -25,6 +25,7 @@ envoy_cc_test( "//test/mocks/server:factory_context_mocks", "//test/mocks/upstream:cluster_info_mocks", "//test/test_common:registry_lib", + "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/accesslog/v3:pkg_cc_proto", ], diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index 0f9d94467d5c..b7d27d0c850c 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -23,6 +23,7 @@ #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/printers.h" #include "test/test_common/registry.h" +#include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -44,7 +45,7 @@ envoy::config::accesslog::v3::AccessLog parseAccessLogFromV3Yaml(const std::stri return access_log; } -class AccessLogImplTest : public testing::Test { +class AccessLogImplTest : public Event::TestUsingSimulatedTime, public testing::Test { public: AccessLogImplTest() : file_(new MockAccessLogFile()) { ON_CALL(context_, runtime()).WillByDefault(ReturnRef(runtime_)); @@ -101,7 +102,8 @@ name: accesslog EXPECT_CALL(*file_, write(_)); auto cluster = std::make_shared>(); - stream_info_.upstream_host_ = Upstream::makeTestHostDescription(cluster, "tcp://10.0.0.5:1234"); + stream_info_.upstream_host_ = + Upstream::makeTestHostDescription(cluster, "tcp://10.0.0.5:1234", simTime()); stream_info_.response_flags_ = StreamInfo::ResponseFlag::DownstreamConnectionTermination; log->log(&request_headers_, &response_headers_, &response_trailers_, stream_info_); @@ -176,7 +178,8 @@ name: accesslog TEST_F(AccessLogImplTest, UpstreamHost) { auto cluster = std::make_shared>(); - stream_info_.upstream_host_ = Upstream::makeTestHostDescription(cluster, "tcp://10.0.0.5:1234"); + stream_info_.upstream_host_ = + Upstream::makeTestHostDescription(cluster, "tcp://10.0.0.5:1234", simTime()); const std::string yaml = R"EOF( name: accesslog diff --git a/test/common/conn_pool/conn_pool_base_test.cc b/test/common/conn_pool/conn_pool_base_test.cc index 0096c95f46e2..cc79fbea4572 100644 --- a/test/common/conn_pool/conn_pool_base_test.cc +++ b/test/common/conn_pool/conn_pool_base_test.cc @@ -79,8 +79,9 @@ class ConnPoolImplBaseTest : public testing::Test { std::shared_ptr> descr_{ new NiceMock()}; std::shared_ptr cluster_{new NiceMock()}; - Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; NiceMock dispatcher_; + Upstream::HostSharedPtr host_{ + Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", dispatcher_.timeSource())}; TestConnPoolImplBase pool_; AttachContext context_; std::vector clients_; diff --git a/test/common/grpc/async_client_manager_impl_test.cc b/test/common/grpc/async_client_manager_impl_test.cc index 448f52aa6140..76c7c869a3b8 100644 --- a/test/common/grpc/async_client_manager_impl_test.cc +++ b/test/common/grpc/async_client_manager_impl_test.cc @@ -88,6 +88,44 @@ TEST_F(AsyncClientManagerImplTest, GoogleGrpc) { #endif } +TEST_F(AsyncClientManagerImplTest, GoogleGrpcIllegalChars) { + EXPECT_CALL(scope_, createScope_("grpc.foo.")); + envoy::config::core::v3::GrpcService grpc_service; + grpc_service.mutable_google_grpc()->set_stat_prefix("foo"); + + auto& metadata = *grpc_service.mutable_initial_metadata()->Add(); + metadata.set_key("illegalcharacter;"); + metadata.set_value("value"); + +#ifdef ENVOY_GOOGLE_GRPC + EXPECT_THROW_WITH_MESSAGE( + async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException, + "Illegal characters in gRPC initial metadata."); +#else + EXPECT_THROW_WITH_MESSAGE( + async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException, + "Google C++ gRPC client is not linked"); +#endif +} + +TEST_F(AsyncClientManagerImplTest, LegalGoogleGrpcChar) { + EXPECT_CALL(scope_, createScope_("grpc.foo.")); + envoy::config::core::v3::GrpcService grpc_service; + grpc_service.mutable_google_grpc()->set_stat_prefix("foo"); + + auto& metadata = *grpc_service.mutable_initial_metadata()->Add(); + metadata.set_key("_legal-character."); + metadata.set_value("value"); + +#ifdef ENVOY_GOOGLE_GRPC + EXPECT_NE(nullptr, async_client_manager_.factoryForGrpcService(grpc_service, scope_, false)); +#else + EXPECT_THROW_WITH_MESSAGE( + async_client_manager_.factoryForGrpcService(grpc_service, scope_, false), EnvoyException, + "Google C++ gRPC client is not linked"); +#endif +} + TEST_F(AsyncClientManagerImplTest, EnvoyGrpcUnknownOk) { envoy::config::core::v3::GrpcService grpc_service; grpc_service.mutable_envoy_grpc()->set_cluster_name("foo"); diff --git a/test/common/http/codec_client_test.cc b/test/common/http/codec_client_test.cc index 25ba456a87fe..0a16b2d31158 100644 --- a/test/common/http/codec_client_test.cc +++ b/test/common/http/codec_client_test.cc @@ -40,7 +40,7 @@ namespace Envoy { namespace Http { namespace { -class CodecClientTest : public testing::Test { +class CodecClientTest : public Event::TestUsingSimulatedTime, public testing::Test { public: CodecClientTest() { connection_ = new NiceMock(); @@ -72,7 +72,7 @@ class CodecClientTest : public testing::Test { std::shared_ptr cluster_{ new NiceMock()}; Upstream::HostDescriptionConstSharedPtr host_{ - Upstream::makeTestHostDescription(cluster_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHostDescription(cluster_, "tcp://127.0.0.1:80", simTime())}; NiceMock stream_info_; }; @@ -279,7 +279,8 @@ TEST_F(CodecClientTest, SSLConnectionInfo) { } // Test the codec getting input from a real TCP connection. -class CodecNetworkTest : public testing::TestWithParam { +class CodecNetworkTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { public: CodecNetworkTest() : api_(Api::createApiForTest()), stream_info_(api_->timeSource()) { dispatcher_ = api_->allocateDispatcher("test_thread"); @@ -355,7 +356,7 @@ class CodecNetworkTest : public testing::TestWithParam client_; std::shared_ptr cluster_{new NiceMock()}; Upstream::HostDescriptionConstSharedPtr host_{ - Upstream::makeTestHostDescription(cluster_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHostDescription(cluster_, "tcp://127.0.0.1:80", simTime())}; Network::ConnectionPtr upstream_connection_; NiceMock upstream_callbacks_; Network::ClientConnection* client_connection_{}; diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 0e6e2a54cc29..b86e9c426a31 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -48,13 +48,13 @@ namespace { /** * A test version of ConnPoolImpl that allows for mocking beneath the codec clients. */ -class ConnPoolImplForTest : public FixedHttpConnPoolImpl { +class ConnPoolImplForTest : public Event::TestUsingSimulatedTime, public FixedHttpConnPoolImpl { public: ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::ClusterInfoConstSharedPtr cluster, Random::RandomGenerator& random_generator) : FixedHttpConnPoolImpl( - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", dispatcher.timeSource()), Upstream::ResourcePriority::Default, dispatcher, nullptr, nullptr, random_generator, state_, [](HttpConnPoolImplBase* pool) { return std::make_unique(*pool); }, @@ -108,7 +108,8 @@ class ConnPoolImplForTest : public FixedHttpConnPoolImpl { } } }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", simTime()), + *test_client.client_dispatcher_); EXPECT_CALL(*test_client.connect_timer_, enableTimer(_, _)); EXPECT_CALL(mock_dispatcher_, createClientConnection_(_, _, _, _)) .WillOnce(Return(test_client.connection_)); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 47d6799a4c8d..d15076d5785c 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -62,7 +62,7 @@ class TestConnPoolImpl : public FixedHttpConnPoolImpl { class ActiveTestRequest; -class Http2ConnPoolImplTest : public testing::Test { +class Http2ConnPoolImplTest : public Event::TestUsingSimulatedTime, public testing::Test { public: struct TestCodecClient { Http::MockClientConnection* codec_; @@ -132,7 +132,8 @@ class Http2ConnPoolImplTest : public testing::Test { test_client.codec_client_ = new CodecClientForTest( CodecClient::Type::HTTP1, std::move(connection), test_client.codec_, [this](CodecClient*) -> void { onClientDestroy(); }, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), *test_client.client_dispatcher_); + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", simTime()), + *test_client.client_dispatcher_); if (buffer_limits) { EXPECT_CALL(*cluster_, perConnectionBufferLimitBytes()) .Times(num_clients) @@ -210,7 +211,7 @@ class Http2ConnPoolImplTest : public testing::Test { Api::ApiPtr api_; NiceMock dispatcher_; std::shared_ptr cluster_{new NiceMock()}; - Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80")}; + Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", simTime())}; std::unique_ptr pool_; std::vector test_clients_; NiceMock runtime_; @@ -335,7 +336,7 @@ TEST_F(Http2ConnPoolImplTest, VerifyAlpnFallback) { // Recreate the conn pool so that the host re-evaluates the transport socket match, arriving at // our test transport socket factory. - host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80"); + host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", simTime()); pool_ = std::make_unique( dispatcher_, random_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr, state_); diff --git a/test/common/http/mixed_conn_pool_test.cc b/test/common/http/mixed_conn_pool_test.cc index a3ba9b2e7b46..b826d2e29851 100644 --- a/test/common/http/mixed_conn_pool_test.cc +++ b/test/common/http/mixed_conn_pool_test.cc @@ -19,11 +19,11 @@ namespace Http { namespace { // TODO(alyssawilk) replace this with the MixedConnectionPool once it lands. -class ConnPoolImplForTest : public HttpConnPoolImplBase { +class ConnPoolImplForTest : public Event::TestUsingSimulatedTime, public HttpConnPoolImplBase { public: ConnPoolImplForTest(Event::MockDispatcher& dispatcher, Upstream::ClusterConnectivityState& state, Random::RandomGenerator& random, Upstream::ClusterInfoConstSharedPtr cluster) - : HttpConnPoolImplBase(Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), + : HttpConnPoolImplBase(Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", simTime()), Upstream::ResourcePriority::Default, dispatcher, nullptr, nullptr, random, state, {Http::Protocol::Http2, Http::Protocol::Http11}) {} diff --git a/test/common/signal/fatal_action_test.cc b/test/common/signal/fatal_action_test.cc index 5105e8f49ffd..9a286c9f9fb1 100644 --- a/test/common/signal/fatal_action_test.cc +++ b/test/common/signal/fatal_action_test.cc @@ -62,8 +62,8 @@ class FatalActionTest : public ::testing::Test { TEST_F(FatalActionTest, ShouldNotBeAbleToRunActionsBeforeRegistration) { // Call the actions - EXPECT_EQ(FatalErrorHandler::runSafeActions(), Status::ActionManangerUnset); - EXPECT_EQ(FatalErrorHandler::runUnsafeActions(), Status::ActionManangerUnset); + EXPECT_EQ(FatalErrorHandler::runSafeActions(), Status::ActionManagerUnset); + EXPECT_EQ(FatalErrorHandler::runUnsafeActions(), Status::ActionManagerUnset); } TEST_F(FatalActionTest, ShouldOnlyBeAbleToRegisterFatalActionsOnce) { diff --git a/test/common/stats/symbol_table_impl_test.cc b/test/common/stats/symbol_table_impl_test.cc index 5ac09db06a4b..7fd540803a15 100644 --- a/test/common/stats/symbol_table_impl_test.cc +++ b/test/common/stats/symbol_table_impl_test.cc @@ -704,8 +704,7 @@ TEST_F(StatNameTest, SupportsAbslHash) { // Tests the memory savings realized from using symbol tables with 1k // clusters. This test shows the memory drops from almost 8M to less than -// 2M. Note that only SymbolTableImpl is tested for memory consumption, -// and not FakeSymbolTableImpl. +// 2M. TEST(SymbolTableTest, Memory) { // Tests a stat-name allocation strategy. auto test_memory_usage = [](std::function fn) -> size_t { diff --git a/test/common/tcp/BUILD b/test/common/tcp/BUILD index bac530252c14..2a6da2345a2e 100644 --- a/test/common/tcp/BUILD +++ b/test/common/tcp/BUILD @@ -23,6 +23,7 @@ envoy_cc_test( "//test/mocks/runtime:runtime_mocks", "//test/mocks/tcp:tcp_mocks", "//test/mocks/upstream:cluster_info_mocks", + "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index 726edfed05b0..08e7f9fae2a6 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -15,6 +15,7 @@ #include "test/mocks/tcp/mocks.h" #include "test/mocks/upstream/cluster_info.h" #include "test/test_common/printers.h" +#include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -241,14 +242,15 @@ void ConnPoolBase::expectEnableUpstreamReady(bool run) { /** * Test fixture for connection pool tests. */ -class TcpConnPoolImplTest : public testing::TestWithParam { +class TcpConnPoolImplTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { public: TcpConnPoolImplTest() : test_new_connection_pool_(GetParam()), upstream_ready_cb_(test_new_connection_pool_ ? nullptr : new NiceMock(&dispatcher_)), - host_(Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000")), + host_(Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000", simTime())), conn_pool_(dispatcher_, host_, upstream_ready_cb_, test_new_connection_pool_) {} ~TcpConnPoolImplTest() override { @@ -268,14 +270,15 @@ class TcpConnPoolImplTest : public testing::TestWithParam { /** * Test fixture for connection pool destructor tests. */ -class TcpConnPoolImplDestructorTest : public testing::TestWithParam { +class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { public: TcpConnPoolImplDestructorTest() : test_new_connection_pool_(GetParam()), upstream_ready_cb_(test_new_connection_pool_ ? nullptr : new NiceMock(&dispatcher_)) { - host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000"); + host_ = Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000", simTime()); if (test_new_connection_pool_) { conn_pool_ = std::make_unique( dispatcher_, host_, Upstream::ResourcePriority::Default, nullptr, nullptr, state_); diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 65eeb64c1070..5ba42650f5c4 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -219,6 +219,7 @@ envoy_cc_test( "//source/common/upstream:host_utility_lib", "//source/common/upstream:upstream_includes", "//source/common/upstream:upstream_lib", + "//test/mocks/event:event_mocks", "//test/mocks/upstream:cluster_info_mocks", ], ) @@ -239,6 +240,7 @@ envoy_cc_test( "//test/mocks/upstream:load_balancer_context_mock", "//test/mocks/upstream:priority_set_mocks", "//test/test_common:logging_lib", + "//test/test_common:simulated_time_system_lib", "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], @@ -259,6 +261,7 @@ envoy_cc_test_library( "//test/mocks/upstream:host_set_mocks", "//test/mocks/upstream:load_balancer_context_mock", "//test/mocks/upstream:priority_set_mocks", + "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) @@ -477,6 +480,7 @@ envoy_cc_test( "//test/mocks/upstream:cluster_info_mocks", "//test/mocks/upstream:host_set_mocks", "//test/mocks/upstream:priority_set_mocks", + "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) @@ -491,6 +495,7 @@ envoy_cc_test( "//test/mocks/upstream:cluster_info_mocks", "//test/mocks/upstream:host_set_mocks", "//test/mocks/upstream:priority_set_mocks", + "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) @@ -522,6 +527,7 @@ envoy_cc_benchmark_binary( "//test/common/upstream:utility_lib", "//test/mocks/upstream:cluster_info_mocks", "//test/test_common:printers_lib", + "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", ], ) @@ -552,6 +558,7 @@ envoy_cc_test( "//test/mocks/upstream:host_set_mocks", "//test/mocks/upstream:load_balancer_mocks", "//test/mocks/upstream:priority_set_mocks", + "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", ], diff --git a/test/common/upstream/bounded_load_hlb_test.cc b/test/common/upstream/bounded_load_hlb_test.cc index 0a29c97bf9d7..e059b69be0bd 100644 --- a/test/common/upstream/bounded_load_hlb_test.cc +++ b/test/common/upstream/bounded_load_hlb_test.cc @@ -44,7 +44,8 @@ class TestBoundedLoadHashingLoadBalancer } }; -class BoundedLoadHashingLoadBalancerTest : public testing::Test { +class BoundedLoadHashingLoadBalancerTest : public Event::TestUsingSimulatedTime, + public testing::Test { public: HostOverloadFactorPredicate getHostOverloadFactorPredicate(const std::vector& addresses) { @@ -65,7 +66,7 @@ class BoundedLoadHashingLoadBalancerTest : public testing::Test { const double equal_weight = static_cast(1.0 / num_hosts); for (uint32_t i = 0; i < num_hosts; i++) { normalized_host_weights.push_back( - {makeTestHost(info_, fmt::format("tcp://127.0.0.1{}:90", i)), equal_weight}); + {makeTestHost(info_, fmt::format("tcp://127.0.0.1{}:90", i), simTime()), equal_weight}); } } @@ -74,7 +75,7 @@ class BoundedLoadHashingLoadBalancerTest : public testing::Test { NormalizedHostWeightVector& ring) { const double equal_weight = static_cast(1.0 / num_hosts); for (uint32_t i = 0; i < num_hosts; i++) { - HostConstSharedPtr h = makeTestHost(info_, fmt::format("tcp://127.0.0.1{}:90", i)); + HostConstSharedPtr h = makeTestHost(info_, fmt::format("tcp://127.0.0.1{}:90", i), simTime()); ring.push_back({h, equal_weight}); ring.push_back({h, equal_weight}); hosts.push_back({h, equal_weight}); diff --git a/test/common/upstream/cluster_factory_impl_test.cc b/test/common/upstream/cluster_factory_impl_test.cc index 7196fe2c99be..d7d4ba78f9ef 100644 --- a/test/common/upstream/cluster_factory_impl_test.cc +++ b/test/common/upstream/cluster_factory_impl_test.cc @@ -34,7 +34,9 @@ namespace Upstream { namespace { // Test Cluster Factory without custom configuration -class TestStaticClusterFactory : public ClusterFactoryImplBase { +class TestStaticClusterFactory : public Event::TestUsingSimulatedTime, + public ClusterFactoryImplBase { + public: TestStaticClusterFactory() : ClusterFactoryImplBase("envoy.clusters.test_static") {} diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 21217a421752..4289ef6662c4 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -747,7 +747,7 @@ class ClusterManagerImplThreadAwareLbTest : public ClusterManagerImplTest { EXPECT_EQ(nullptr, cluster_manager_->get("cluster_0")->loadBalancer().chooseHost(nullptr)); cluster1->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster1->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_)}; cluster1->prioritySet().getMockHostSet(0)->runCallbacks( cluster1->prioritySet().getMockHostSet(0)->hosts_, {}); cluster1->initialize_callback_(); @@ -1180,7 +1180,8 @@ TEST_F(ClusterManagerImplTest, DynamicRemoveWithLocalCluster) { // Fire a member callback on the local cluster, which should not call any update callbacks on // the deleted cluster. - foo->prioritySet().getMockHostSet(0)->hosts_ = {makeTestHost(foo->info_, "tcp://127.0.0.1:80")}; + foo->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(foo->info_, "tcp://127.0.0.1:80", time_system_)}; EXPECT_CALL(membership_updated, ready()); foo->prioritySet().getMockHostSet(0)->runCallbacks(foo->prioritySet().getMockHostSet(0)->hosts_, {}); @@ -1477,7 +1478,7 @@ TEST_F(ClusterManagerImplTest, DynamicAddRemove) { std::shared_ptr cluster2(new NiceMock()); cluster2->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster2->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster2->info_, "tcp://127.0.0.1:80", time_system_)}; EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) .WillOnce(Return(std::make_pair(cluster2, nullptr))); EXPECT_CALL(*cluster2, initializePhase()).Times(0); @@ -1588,11 +1589,11 @@ TEST_F(ClusterManagerImplTest, HostsPostedToTlsCluster) { cluster1->initialize_callback_(); // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy. - HostSharedPtr host1 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + HostSharedPtr host1 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); host1->healthFlagSet(HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); - HostSharedPtr host2 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + HostSharedPtr host2 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); host2->healthFlagSet(HostImpl::HealthFlag::FAILED_ACTIVE_HC); - HostSharedPtr host3 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + HostSharedPtr host3 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); HostVector hosts{host1, host2, host3}; auto hosts_ptr = std::make_shared(hosts); @@ -1622,7 +1623,7 @@ TEST_F(ClusterManagerImplTest, CloseHttpConnectionsOnHealthFailure) { clustersJson({defaultStaticClusterJson("some_cluster")})); std::shared_ptr cluster1(new NiceMock()); cluster1->info_->name_ = "some_cluster"; - HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); @@ -1685,7 +1686,7 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionPoolsOnHealthFailure) { clustersJson({defaultStaticClusterJson("some_cluster")})); std::shared_ptr cluster1(new NiceMock()); cluster1->info_->name_ = "some_cluster"; - HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); @@ -1755,7 +1756,7 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionsOnHealthFailure) { EXPECT_CALL(*cluster1->info_, features()) .WillRepeatedly(Return(ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE)); cluster1->info_->name_ = "some_cluster"; - HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); @@ -1832,7 +1833,7 @@ TEST_F(ClusterManagerImplTest, DoNotCloseTcpConnectionsOnHealthFailure) { std::shared_ptr cluster1(new NiceMock()); EXPECT_CALL(*cluster1->info_, features()).WillRepeatedly(Return(0)); cluster1->info_->name_ = "some_cluster"; - HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); @@ -3848,8 +3849,8 @@ TEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) { Cluster& cluster = cluster_manager_->activeClusters().begin()->second; // Set up the HostSet. - HostSharedPtr host1 = makeTestHost(cluster.info(), "tcp://127.0.0.1:80"); - HostSharedPtr host2 = makeTestHost(cluster.info(), "tcp://127.0.0.1:81"); + HostSharedPtr host1 = makeTestHost(cluster.info(), "tcp://127.0.0.1:80", time_system_); + HostSharedPtr host2 = makeTestHost(cluster.info(), "tcp://127.0.0.1:81", time_system_); HostVector hosts{host1, host2}; auto hosts_ptr = std::make_shared(hosts); @@ -3916,7 +3917,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) { tcp1 = dynamic_cast( cluster_manager_->tcpConnPoolForCluster("cluster_1", ResourcePriority::Default, nullptr)); - HostSharedPtr host3 = makeTestHost(cluster.info(), "tcp://127.0.0.1:82"); + HostSharedPtr host3 = makeTestHost(cluster.info(), "tcp://127.0.0.1:82", time_system_); HostVector hosts_added; hosts_added.push_back(host3); @@ -3957,7 +3958,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolsNotDrainedOnHostSetChange) { Cluster& cluster = cluster_manager_->activeClusters().begin()->second; // Set up the HostSet. - HostSharedPtr host1 = makeTestHost(cluster.info(), "tcp://127.0.0.1:80"); + HostSharedPtr host1 = makeTestHost(cluster.info(), "tcp://127.0.0.1:80", time_system_); HostVector hosts{host1}; auto hosts_ptr = std::make_shared(hosts); @@ -3983,7 +3984,7 @@ TEST_F(ClusterManagerImplTest, ConnPoolsNotDrainedOnHostSetChange) { Tcp::ConnectionPool::MockInstance* tcp1 = dynamic_cast( cluster_manager_->tcpConnPoolForCluster("cluster_1", ResourcePriority::Default, nullptr)); - HostSharedPtr host2 = makeTestHost(cluster.info(), "tcp://127.0.0.1:82"); + HostSharedPtr host2 = makeTestHost(cluster.info(), "tcp://127.0.0.1:82", time_system_); HostVector hosts_added; hosts_added.push_back(host2); @@ -4156,8 +4157,8 @@ class PrefetchTest : public ClusterManagerImplTest { cluster_ = &cluster_manager_->activeClusters().begin()->second.get(); // Set up the HostSet. - host1_ = makeTestHost(cluster_->info(), "tcp://127.0.0.1:80"); - host2_ = makeTestHost(cluster_->info(), "tcp://127.0.0.1:80"); + host1_ = makeTestHost(cluster_->info(), "tcp://127.0.0.1:80", time_system_); + host2_ = makeTestHost(cluster_->info(), "tcp://127.0.0.1:80", time_system_); HostVector hosts{host1_, host2_}; auto hosts_ptr = std::make_shared(hosts); diff --git a/test/common/upstream/health_check_fuzz.cc b/test/common/upstream/health_check_fuzz.cc index e564e2fb5a48..1d109d74bda4 100644 --- a/test/common/upstream/health_check_fuzz.cc +++ b/test/common/upstream/health_check_fuzz.cc @@ -102,8 +102,9 @@ void HttpHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase allocHttpHealthCheckerFromProto(input.health_check_config()); ON_CALL(runtime_.snapshot_, featureEnabled("health_check.verify_cluster", 100)) .WillByDefault(testing::Return(input.http_verify_cluster())); + auto time_source = std::make_unique>(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", *time_source)}; if (input.upstream_cx_success()) { cluster_->info_->stats().upstream_cx_total_.inc(); } @@ -213,8 +214,9 @@ void TcpHealthCheckFuzz::allocTcpHealthCheckerFromProto( void TcpHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) { allocTcpHealthCheckerFromProto(input.health_check_config()); + auto time_source = std::make_unique>(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", *time_source)}; if (input.upstream_cx_success()) { cluster_->info_->stats().upstream_cx_total_.inc(); } @@ -321,8 +323,9 @@ void GrpcHealthCheckFuzz::allocGrpcHealthCheckerFromProto( void GrpcHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase input) { test_session_ = std::make_unique(); allocGrpcHealthCheckerFromProto(input.health_check_config()); + auto time_source = std::make_unique>(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", *time_source)}; if (input.upstream_cx_success()) { cluster_->info_->stats().upstream_cx_total_.inc(); } @@ -338,11 +341,11 @@ void GrpcHealthCheckFuzz::initialize(test::common::upstream::HealthCheckTestCase std::shared_ptr cluster{ new NiceMock()}; Event::MockDispatcher dispatcher_; - + auto time_source = std::make_unique>(); test_session.codec_client_ = new CodecClientForTest( Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_), test_session.codec_, nullptr, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), dispatcher_); + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", *time_source), dispatcher_); return test_session.codec_client_; })); expectStreamCreate(); diff --git a/test/common/upstream/health_check_fuzz_test_utils.cc b/test/common/upstream/health_check_fuzz_test_utils.cc index b1e8b78387d0..78b5feac68fd 100644 --- a/test/common/upstream/health_check_fuzz_test_utils.cc +++ b/test/common/upstream/health_check_fuzz_test_utils.cc @@ -54,7 +54,8 @@ void HttpHealthCheckerImplTestBase::expectClientCreate( return new CodecClientForTest( Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_), test_session.codec_, nullptr, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), dispatcher_); + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", dispatcher_.timeSource()), + dispatcher_); })); } @@ -129,7 +130,8 @@ void GrpcHealthCheckerImplTestBaseUtils::expectClientCreate(size_t index) { test_session.codec_client_ = new CodecClientForTest( Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_), test_session.codec_, nullptr, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), dispatcher_); + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", dispatcher_.timeSource()), + dispatcher_); return test_session.codec_client_; })); } diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index 925e913fc699..f6a4653ed660 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -125,7 +125,9 @@ class TestHttpHealthCheckerImpl : public HttpHealthCheckerImpl { Http::CodecClient::Type codecClientType() { return codec_client_type_; } }; -class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { +class HttpHealthCheckerImplTest : public Event::TestUsingSimulatedTime, + public testing::Test, + public HealthCheckerTestBase { public: struct TestSession { Event::MockTimer* interval_timer_{}; @@ -440,8 +442,8 @@ class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTest const HostWithHealthCheckMap& hosts, const std::string& protocol = "tcp://", const uint32_t priority = 0) { for (const auto& host : hosts) { - cluster->prioritySet().getMockHostSet(priority)->hosts_.emplace_back( - makeTestHost(cluster->info_, fmt::format("{}{}", protocol, host.first), host.second)); + cluster->prioritySet().getMockHostSet(priority)->hosts_.emplace_back(makeTestHost( + cluster->info_, fmt::format("{}{}", protocol, host.first), host.second, simTime())); } } @@ -562,7 +564,7 @@ class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTest return new CodecClientForTest( Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_), test_session.codec_, nullptr, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), dispatcher_); + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", simTime()), dispatcher_); })); } @@ -614,7 +616,7 @@ class HttpHealthCheckerImplTest : public testing::Test, public HealthCheckerTest void expectSuccessStartFailedFailFirst( const absl::optional& health_checked_cluster = absl::optional()) { cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( Host::HealthFlag::FAILED_ACTIVE_HC); expectSessionCreate(); @@ -673,7 +675,7 @@ TEST_F(HttpHealthCheckerImplTest, Success) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -695,7 +697,7 @@ TEST_F(HttpHealthCheckerImplTest, Degraded) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)).Times(2); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -730,7 +732,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessIntervalJitter) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber()); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -759,7 +761,7 @@ TEST_F(HttpHealthCheckerImplTest, InitialJitterNoTraffic) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber()); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->interval_timer_, enableTimer(_, _)); @@ -790,7 +792,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessIntervalJitterPercentNoTraffic) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber()); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -819,7 +821,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessIntervalJitterPercent) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(testing::AnyNumber()); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -849,7 +851,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessWithSpurious100Continue) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -877,7 +879,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessWithSpuriousMetadata) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -905,8 +907,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessWithMultipleHosts) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80"), - makeTestHost(cluster_->info_, "tcp://127.0.0.1:81")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(cluster_->info_, "tcp://127.0.0.1:81", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); @@ -939,9 +941,9 @@ TEST_F(HttpHealthCheckerImplTest, SuccessWithMultipleHostSets) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->prioritySet().getMockHostSet(1)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:81")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:81", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); @@ -994,7 +996,7 @@ TEST_F(HttpHealthCheckerImplTest, ZeroRetryInterval) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1052,7 +1054,7 @@ TEST_F(HttpHealthCheckerImplTest, TlsOptions) { allocHealthChecker(yaml); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1070,7 +1072,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheck) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1105,7 +1107,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServicePrefixPatternCheck) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1140,7 +1142,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceExactPatternCheck) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1175,7 +1177,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceRegexPatternCheck) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1208,8 +1210,8 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValueOnTheHos health_check_config.set_hostname(host); auto test_host = std::make_shared( cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, - envoy::config::core::v3::Locality(), health_check_config, 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::Locality(), health_check_config, 0, envoy::config::core::v3::UNKNOWN, + simTime()); const std::string path = "/healthcheck"; setupServiceValidationHC(); // Requires non-empty `service_name` in config. @@ -1251,8 +1253,8 @@ TEST_F(HttpHealthCheckerImplTest, health_check_config.set_hostname(host); auto test_host = std::make_shared( cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, - envoy::config::core::v3::Locality(), health_check_config, 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::Locality(), health_check_config, 0, envoy::config::core::v3::UNKNOWN, + simTime()); const std::string path = "/healthcheck"; // Setup health check config with a different host, to check that we still get the host configured // on the endpoint. @@ -1298,7 +1300,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithCustomHostValue) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1362,7 +1364,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithAdditionalHeaders) { )EOF"); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", metadata)}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", metadata, simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1426,7 +1428,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessServiceCheckWithoutUserAgent) { std::string current_start_time; cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", metadata)}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", metadata, simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1463,7 +1465,7 @@ TEST_F(HttpHealthCheckerImplTest, ServiceDoesNotMatchFail) { EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1494,7 +1496,7 @@ TEST_F(HttpHealthCheckerImplTest, ServicePatternDoesNotMatchFail) { EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1525,7 +1527,7 @@ TEST_F(HttpHealthCheckerImplTest, ServiceNotPresentInResponseFail) { EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1553,7 +1555,7 @@ TEST_F(HttpHealthCheckerImplTest, ServiceCheckRuntimeOff) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1579,7 +1581,7 @@ TEST_F(HttpHealthCheckerImplTest, ServiceCheckRuntimeOffWithStringPattern) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -1610,7 +1612,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessNoTraffic) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1627,7 +1629,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessNoTraffic) { TEST_F(HttpHealthCheckerImplTest, UnhealthyTransitionNoTrafficHealthy) { setupNoTrafficHealthyValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( Host::HealthFlag::FAILED_ACTIVE_HC); expectSessionCreate(); @@ -1648,7 +1650,7 @@ TEST_F(HttpHealthCheckerImplTest, UnhealthyTransitionNoTrafficHealthy) { TEST_F(HttpHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { setupNoServiceValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( Host::HealthFlag::FAILED_ACTIVE_HC); expectSessionCreate(); @@ -1681,7 +1683,7 @@ TEST_F(HttpHealthCheckerImplTest, SuccessStartFailedFailFirstLogError) { TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackNoClose) { setupNoServiceValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1703,7 +1705,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackNoClose) { TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackClose) { setupNoServiceValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1724,7 +1726,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFailRemoveHostInCallbackClose) { TEST_F(HttpHealthCheckerImplTest, HttpFail) { setupNoServiceValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1771,7 +1773,7 @@ TEST_F(HttpHealthCheckerImplTest, HttpFail) { TEST_F(HttpHealthCheckerImplTest, HttpFailLogError) { setupNoServiceValidationHCAlwaysLogFailure(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1838,7 +1840,7 @@ TEST_F(HttpHealthCheckerImplTest, Disconnect) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::ChangePending)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1869,7 +1871,7 @@ TEST_F(HttpHealthCheckerImplTest, Disconnect) { TEST_F(HttpHealthCheckerImplTest, Timeout) { setupNoServiceValidationHCOneUnhealthy(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1894,7 +1896,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenSuccess) { setupNoServiceValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1929,7 +1931,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenRemoteClose) { setupNoServiceValidationHC(); EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -1964,7 +1966,7 @@ TEST_F(HttpHealthCheckerImplTest, TimeoutThenRemoteClose) { TEST_F(HttpHealthCheckerImplTest, TimeoutAfterDisconnect) { setupNoServiceValidationHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); @@ -1995,7 +1997,7 @@ TEST_F(HttpHealthCheckerImplTest, DynamicAddAndRemove) { expectSessionCreate(); expectStreamCreate(0); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); cluster_->prioritySet().getMockHostSet(0)->runCallbacks( {cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}, {}); @@ -2011,7 +2013,7 @@ TEST_F(HttpHealthCheckerImplTest, ConnectionClose) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2033,7 +2035,7 @@ TEST_F(HttpHealthCheckerImplTest, ProxyConnectionClose) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2058,7 +2060,7 @@ TEST_F(HttpHealthCheckerImplTest, ConnectionCloseLegacy) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2083,7 +2085,7 @@ TEST_F(HttpHealthCheckerImplTest, ProxyConnectionCloseLegacy) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2103,7 +2105,7 @@ TEST_F(HttpHealthCheckerImplTest, ProxyConnectionCloseLegacy) { TEST_F(HttpHealthCheckerImplTest, HealthCheckIntervals) { setupHealthCheckIntervalOverridesHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://128.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://128.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2326,7 +2328,7 @@ TEST_F(HttpHealthCheckerImplTest, RemoteCloseBetweenChecks) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2356,7 +2358,7 @@ TEST_F(HttpHealthCheckerImplTest, DontReuseConnectionBetweenChecks) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(2); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2386,7 +2388,7 @@ TEST_F(HttpHealthCheckerImplTest, StreamReachesWatermarkDuringCheck) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2407,7 +2409,7 @@ TEST_F(HttpHealthCheckerImplTest, ConnectionReachesWatermarkDuringCheck) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -2561,7 +2563,7 @@ TEST_F(HttpHealthCheckerImplTest, TransportSocketMatchCriteria) { allocHealthChecker(yaml); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -2602,7 +2604,7 @@ TEST_F(HttpHealthCheckerImplTest, NoTransportSocketMatchCriteria) { allocHealthChecker(yaml); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -2739,7 +2741,7 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMatch)) { EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Unchanged)).Times(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -2774,7 +2776,7 @@ TEST_F(HttpHealthCheckerImplTest, DEPRECATED_FEATURE_TEST(ServiceNameMismatch)) EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->info_->stats().upstream_cx_total_.inc(); expectSessionCreate(); expectStreamCreate(0); @@ -3045,7 +3047,9 @@ TEST(TcpHealthCheckMatcher, match) { EXPECT_TRUE(TcpHealthCheckMatcher::match(segments, buffer)); } -class TcpHealthCheckerImplTest : public testing::Test, public HealthCheckerTestBase { +class TcpHealthCheckerImplTest : public testing::Test, + public HealthCheckerTestBase, + public Event::TestUsingSimulatedTime { public: void allocHealthChecker(const std::string& yaml, bool avoid_boosting = true) { health_checker_ = std::make_shared( @@ -3123,7 +3127,7 @@ TEST_F(TcpHealthCheckerImplTest, Success) { setupData(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)); @@ -3147,7 +3151,7 @@ TEST_F(TcpHealthCheckerImplTest, DataWithoutReusingConnection) { setupDataDontReuseConnection(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(1); @@ -3176,7 +3180,7 @@ TEST_F(TcpHealthCheckerImplTest, WrongData) { setupDataDontReuseConnection(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(1); @@ -3207,7 +3211,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutThenRemoteClose) { expectSessionCreate(); expectClientCreate(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; EXPECT_CALL(*connection_, write(_, _)); EXPECT_CALL(*timeout_timer_, enableTimer(_, _)); @@ -3267,7 +3271,7 @@ TEST_F(TcpHealthCheckerImplTest, Timeout) { expectSessionCreate(); expectClientCreate(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; EXPECT_CALL(*connection_, write(_, _)); EXPECT_CALL(*timeout_timer_, enableTimer(_, _)); @@ -3301,7 +3305,7 @@ TEST_F(TcpHealthCheckerImplTest, DoubleTimeout) { expectSessionCreate(); expectClientCreate(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; EXPECT_CALL(*connection_, write(_, _)); EXPECT_CALL(*timeout_timer_, enableTimer(_, _)); @@ -3359,7 +3363,7 @@ TEST_F(TcpHealthCheckerImplTest, TimeoutWithoutReusingConnection) { setupDataDontReuseConnection(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(1); @@ -3429,7 +3433,7 @@ TEST_F(TcpHealthCheckerImplTest, NoData) { setupNoData(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(0); @@ -3452,7 +3456,7 @@ TEST_F(TcpHealthCheckerImplTest, PassiveFailure) { setupNoData(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(0); @@ -3490,7 +3494,7 @@ TEST_F(TcpHealthCheckerImplTest, PassiveFailureCrossThreadRemoveHostRace) { setupNoData(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(0); @@ -3519,7 +3523,7 @@ TEST_F(TcpHealthCheckerImplTest, PassiveFailureCrossThreadRemoveClusterRace) { setupNoData(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)).Times(0); @@ -3547,7 +3551,7 @@ TEST_F(TcpHealthCheckerImplTest, ConnectionLocalFailure) { setupData(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); EXPECT_CALL(*connection_, write(_, _)); @@ -3582,7 +3586,8 @@ class TestGrpcHealthCheckerImpl : public GrpcHealthCheckerImpl { MOCK_METHOD(Http::CodecClient*, createCodecClient_, (Upstream::Host::CreateConnectionData&)); }; -class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { +class GrpcHealthCheckerImplTestBase : public Event::TestUsingSimulatedTime, + public HealthCheckerTestBase { public: struct TestSession { TestSession() = default; @@ -3776,7 +3781,7 @@ class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { test_session.codec_client_ = new CodecClientForTest( Http::CodecClient::Type::HTTP1, std::move(conn_data.connection_), test_session.codec_, nullptr, - Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000"), dispatcher_); + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:9000", simTime()), dispatcher_); return test_session.codec_client_; })); } @@ -3812,7 +3817,7 @@ class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { void expectSingleHealthcheck(HealthTransition host_changed_state) { cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectHealthchecks(host_changed_state, 1); } @@ -3890,7 +3895,7 @@ class GrpcHealthCheckerImplTestBase : public HealthCheckerTestBase { setupServiceNameHC(authority); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; runHealthCheck(expected_host); } @@ -3960,8 +3965,8 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessWithHostname) { health_check_config.set_hostname(expected_host); auto test_host = std::make_shared( cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, - envoy::config::core::v3::Locality(), health_check_config, 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::Locality(), health_check_config, 0, envoy::config::core::v3::UNKNOWN, + simTime()); cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; runHealthCheck(expected_host); } @@ -3975,8 +3980,8 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessWithHostnameOverridesConfig) { health_check_config.set_hostname(expected_host); auto test_host = std::make_shared( cluster_->info_, "", Network::Utility::resolveUrl("tcp://127.0.0.1:80"), nullptr, 1, - envoy::config::core::v3::Locality(), health_check_config, 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::Locality(), health_check_config, 0, envoy::config::core::v3::UNKNOWN, + simTime()); cluster_->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; runHealthCheck(expected_host); } @@ -4023,8 +4028,8 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessWithMultipleHosts) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80"), - makeTestHost(cluster_->info_, "tcp://127.0.0.1:81")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(cluster_->info_, "tcp://127.0.0.1:81", simTime())}; expectHealthchecks(HealthTransition::Unchanged, 2); @@ -4039,9 +4044,9 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessWithMultipleHostSets) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->prioritySet().getMockHostSet(1)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:81")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:81", simTime())}; expectHealthchecks(HealthTransition::Unchanged, 2); @@ -4079,7 +4084,7 @@ TEST_F(GrpcHealthCheckerImplTest, ConnectionReachesWatermarkDuringCheck) { TEST_F(GrpcHealthCheckerImplTest, SuccessNoTraffic) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4097,7 +4102,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessNoTraffic) { TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( Host::HealthFlag::FAILED_ACTIVE_HC); cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( @@ -4123,7 +4128,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedSuccessFirst) { TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( Host::HealthFlag::FAILED_ACTIVE_HC); cluster_->prioritySet().getMockHostSet(0)->hosts_[0]->healthFlagSet( @@ -4170,7 +4175,7 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) { TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4209,7 +4214,7 @@ TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) { TEST_F(GrpcHealthCheckerImplTest, Disconnect) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4237,7 +4242,7 @@ TEST_F(GrpcHealthCheckerImplTest, Disconnect) { TEST_F(GrpcHealthCheckerImplTest, Timeout) { setupHCWithUnhealthyThreshold(1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4256,7 +4261,7 @@ TEST_F(GrpcHealthCheckerImplTest, Timeout) { TEST_F(GrpcHealthCheckerImplTest, DoubleTimeout) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4288,7 +4293,7 @@ TEST_F(GrpcHealthCheckerImplTest, DynamicAddAndRemove) { expectSessionCreate(); expectStreamCreate(0); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); cluster_->prioritySet().getMockHostSet(0)->runCallbacks( {cluster_->prioritySet().getMockHostSet(0)->hosts_.back()}, {}); @@ -4302,7 +4307,7 @@ TEST_F(GrpcHealthCheckerImplTest, DynamicAddAndRemove) { TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { setupHealthCheckIntervalOverridesHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://128.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://128.0.0.1:80", simTime())}; expectSessionCreate(); expectStreamCreate(0); EXPECT_CALL(*test_sessions_[0]->timeout_timer_, enableTimer(_, _)); @@ -4516,7 +4521,7 @@ TEST_F(GrpcHealthCheckerImplTest, HealthCheckIntervals) { TEST_F(GrpcHealthCheckerImplTest, RemoteCloseBetweenChecks) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4545,7 +4550,7 @@ TEST_F(GrpcHealthCheckerImplTest, RemoteCloseBetweenChecks) { TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionBetweenChecks) { setupNoReuseConnectionHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4574,7 +4579,7 @@ TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionBetweenChecks) { TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionTimeout) { setupNoReuseConnectionHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4604,7 +4609,7 @@ TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionTimeout) { TEST_F(GrpcHealthCheckerImplTest, DontReuseConnectionStreamReset) { setupNoReuseConnectionHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4726,7 +4731,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayErrorProbeInProgress) { TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { setupHCWithUnhealthyThreshold(/*threshold=*/1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4756,7 +4761,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgress) { TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressTimeout) { setupHCWithUnhealthyThreshold(/*threshold=*/1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4791,7 +4796,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressTimeout) { TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressStreamReset) { setupHCWithUnhealthyThreshold(/*threshold=*/1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4826,7 +4831,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressStreamReset) { TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressBadResponse) { setupHCWithUnhealthyThreshold(/*threshold=*/1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4863,7 +4868,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressBadResponse) { TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressConnectionClose) { setupHCWithUnhealthyThreshold(/*threshold=*/1); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); @@ -4898,7 +4903,7 @@ TEST_F(GrpcHealthCheckerImplTest, GoAwayProbeInProgressConnectionClose) { TEST_F(GrpcHealthCheckerImplTest, GoAwayBetweenChecks) { setupHC(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectHealthcheckStart(0); diff --git a/test/common/upstream/host_utility_test.cc b/test/common/upstream/host_utility_test.cc index 6ee52395864c..ba0436d5eb92 100644 --- a/test/common/upstream/host_utility_test.cc +++ b/test/common/upstream/host_utility_test.cc @@ -3,9 +3,12 @@ #include "common/upstream/upstream_impl.h" #include "test/common/upstream/utility.h" +#include "test/mocks/common.h" #include "test/mocks/upstream/cluster_info.h" -#include "gtest/gtest.h" +#include "gmock/gmock.h" + +using ::testing::Return; namespace Envoy { namespace Upstream { @@ -13,8 +16,13 @@ namespace { TEST(HostUtilityTest, All) { auto cluster = std::make_shared>(); - HostSharedPtr host = makeTestHost(cluster, "tcp://127.0.0.1:80"); + auto time_source = std::make_unique>(); + auto time_ms = std::chrono::milliseconds(5); + ON_CALL(*time_source, monotonicTime()).WillByDefault(Return(MonotonicTime(time_ms))); + HostSharedPtr host = makeTestHost(cluster, "tcp://127.0.0.1:80", *time_source); EXPECT_EQ("healthy", HostUtility::healthFlagsToString(*host)); + EXPECT_EQ(time_ms, std::chrono::time_point_cast(host->creationTime()) + .time_since_epoch()); host->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); EXPECT_EQ("/failed_active_hc", HostUtility::healthFlagsToString(*host)); diff --git a/test/common/upstream/load_balancer_benchmark.cc b/test/common/upstream/load_balancer_benchmark.cc index 3d13b15edb0e..7c0a7c4cf8fe 100644 --- a/test/common/upstream/load_balancer_benchmark.cc +++ b/test/common/upstream/load_balancer_benchmark.cc @@ -14,6 +14,7 @@ #include "test/benchmark/main.h" #include "test/common/upstream/utility.h" #include "test/mocks/upstream/cluster_info.h" +#include "test/test_common/simulated_time_system.h" #include "benchmark/benchmark.h" @@ -21,7 +22,7 @@ namespace Envoy { namespace Upstream { namespace { -class BaseTester { +class BaseTester : public Event::TestUsingSimulatedTime { public: static constexpr absl::string_view metadata_key = "key"; // We weight the first weighted_subset_percent of hosts with weight. @@ -41,9 +42,9 @@ class BaseTester { (*metadata.mutable_filter_metadata())[Config::MetadataFilters::get().ENVOY_LB]; (*map.mutable_fields())[std::string(metadata_key)] = value; - hosts.push_back(makeTestHost(info_, url, metadata, effective_weight)); + hosts.push_back(makeTestHost(info_, url, metadata, simTime(), effective_weight)); } else { - hosts.push_back(makeTestHost(info_, url, effective_weight)); + hosts.push_back(makeTestHost(info_, url, simTime(), effective_weight)); } } diff --git a/test/common/upstream/load_balancer_fuzz_base.cc b/test/common/upstream/load_balancer_fuzz_base.cc index 8fce55d4f7cc..96f068675044 100644 --- a/test/common/upstream/load_balancer_fuzz_base.cc +++ b/test/common/upstream/load_balancer_fuzz_base.cc @@ -22,8 +22,9 @@ constructByteVectorForRandom(const Protobuf::RepeatedField& ra HostVector LoadBalancerFuzzBase::initializeHostsForUseInFuzzing(std::shared_ptr info) { HostVector hosts; + auto time_source = std::make_unique>(); for (uint32_t i = 1; i <= 60000; ++i) { - hosts.push_back(makeTestHost(info, "tcp://127.0.0.1:" + std::to_string(i))); + hosts.push_back(makeTestHost(info, "tcp://127.0.0.1:" + std::to_string(i), *time_source)); } return hosts; } diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 81603a608a65..8597b952117a 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -18,6 +18,7 @@ #include "test/mocks/upstream/load_balancer_context.h" #include "test/mocks/upstream/priority_set.h" #include "test/test_common/logging.h" +#include "test/test_common/simulated_time_system.h" #include "test/test_common/test_runtime.h" #include "gmock/gmock.h" @@ -32,7 +33,8 @@ namespace Envoy { namespace Upstream { namespace { -class LoadBalancerTestBase : public testing::TestWithParam { +class LoadBalancerTestBase : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { protected: // Run all tests against both priority 0 and priority 1 host sets, to ensure // all the load balancers have equivalent functionality for failover host sets. @@ -84,7 +86,7 @@ class LoadBalancerBaseTest : public LoadBalancerTestBase { host_set.degraded_hosts_.clear(); host_set.excluded_hosts_.clear(); for (uint32_t i = 0; i < num_hosts; ++i) { - host_set.hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:80")); + host_set.hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:80", simTime())); } uint32_t i = 0; for (; i < num_healthy_hosts; ++i) { @@ -577,8 +579,8 @@ using FailoverTest = RoundRobinLoadBalancerTest; // Ensure if all the hosts with priority 0 unhealthy, the next priority hosts are used. TEST_P(FailoverTest, BasicFailover) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; - failover_host_set_.healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + failover_host_set_.healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}; failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_; init(false); EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb_->peekAnotherHost(nullptr)); @@ -587,7 +589,7 @@ TEST_P(FailoverTest, BasicFailover) { // Ensure if all the hosts with priority 0 degraded, the first priority degraded hosts are used. TEST_P(FailoverTest, BasicDegradedHosts) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; host_set_.degraded_hosts_ = host_set_.hosts_; failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_; init(false); @@ -598,9 +600,9 @@ TEST_P(FailoverTest, BasicDegradedHosts) { // Ensure if all the hosts with priority 0 degraded, but healthy hosts in the failover, the healthy // hosts in the second priority are used. TEST_P(FailoverTest, BasicFailoverDegradedHosts) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; host_set_.degraded_hosts_ = host_set_.hosts_; - failover_host_set_.healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82")}; + failover_host_set_.healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}; failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_; init(false); EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb_->chooseHost(nullptr)); @@ -608,8 +610,8 @@ TEST_P(FailoverTest, BasicFailoverDegradedHosts) { // Test that extending the priority set with an existing LB causes the correct updates. TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSet) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; - failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; init(false); // With both the primary and failover hosts unhealthy, we should select an // unhealthy primary host. @@ -618,7 +620,7 @@ TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSet) { // Update the priority set with a new priority level P=2 and ensure the host // is chosen MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2); - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); tertiary_host_set_.hosts_ = *hosts; tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_; HostVector add_hosts; @@ -640,8 +642,8 @@ TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSet) { // Test that extending the priority set with an existing LB causes the correct updates when the // cluster is configured to disable on panic. TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSetDisableOnPanic) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; - failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true); init(false); @@ -651,7 +653,7 @@ TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSetDisableOnPanic) { // Update the priority set with a new priority level P=2 and ensure the host // is chosen MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2); - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); tertiary_host_set_.hosts_ = *hosts; tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_; HostVector add_hosts; @@ -672,8 +674,8 @@ TEST_P(FailoverTest, PriorityUpdatesWithLocalHostSetDisableOnPanic) { // Test extending the priority set. TEST_P(FailoverTest, ExtendPrioritiesUpdatingPrioritySet) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; - failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; init(true); // With both the primary and failover hosts unhealthy, we should select an // unhealthy primary host. @@ -682,7 +684,7 @@ TEST_P(FailoverTest, ExtendPrioritiesUpdatingPrioritySet) { // Update the priority set with a new priority level P=2 // As it has healthy hosts, it should be selected. MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2); - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); tertiary_host_set_.hosts_ = *hosts; tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_; HostVector add_hosts; @@ -697,8 +699,8 @@ TEST_P(FailoverTest, ExtendPrioritiesUpdatingPrioritySet) { } TEST_P(FailoverTest, ExtendPrioritiesWithLocalPrioritySet) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; - failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; init(true); // With both the primary and failover hosts unhealthy, we should select an // unhealthy primary host. @@ -707,7 +709,8 @@ TEST_P(FailoverTest, ExtendPrioritiesWithLocalPrioritySet) { // Update the host set with a new priority level. We should start selecting // hosts from that level as it has viable hosts. MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2); - HostVectorSharedPtr hosts2(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:84")})); + HostVectorSharedPtr hosts2( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:84", simTime())})); tertiary_host_set_.hosts_ = *hosts2; tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_; HostVector add_hosts; @@ -717,7 +720,7 @@ TEST_P(FailoverTest, ExtendPrioritiesWithLocalPrioritySet) { // Update the local hosts. We're not doing locality based routing in this // test, but it should at least do no harm. - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); updateHosts(hosts, HostsPerLocalityImpl::empty()); EXPECT_EQ(tertiary_host_set_.hosts_[0], lb_->chooseHost(nullptr)); } @@ -728,10 +731,10 @@ TEST_P(FailoverTest, PrioritiesWithNotAllWarmedHosts) { // P0: 1 healthy, 1 unhealthy, 1 warmed. // P1: 1 healthy. // We then expect no spillover, since P0 is still overprovisioned. - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; host_set_.healthy_hosts_ = {host_set_.hosts_[0]}; - failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82")}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}; failover_host_set_.healthy_hosts_ = failover_host_set_.hosts_; init(true); @@ -746,9 +749,9 @@ TEST_P(FailoverTest, PrioritiesWithZeroWarmedHosts) { // P0: 2 unhealthy, 0 warmed. // P1: 1 healthy. // We then expect all the traffic to spill over to P1 since P0 has an effective load of zero. - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; - failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}; failover_host_set_.healthy_hosts_ = failover_host_set_.hosts_; init(true); @@ -766,15 +769,15 @@ TEST_P(RoundRobinLoadBalancerTest, NoHosts) { } TEST_P(RoundRobinLoadBalancerTest, SingleHost) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; hostSet().hosts_ = hostSet().healthy_hosts_; init(false); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); } TEST_P(RoundRobinLoadBalancerTest, Normal) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; hostSet().hosts_ = hostSet().healthy_hosts_; init(false); @@ -793,7 +796,7 @@ TEST_P(RoundRobinLoadBalancerTest, Normal) { EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); // Change host set with no peeks in progress - hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:82")); + hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:82", simTime())); hostSet().hosts_.push_back(hostSet().healthy_hosts_.back()); hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {}); peekThenPick({2, 0, 1, 2}); @@ -803,7 +806,7 @@ TEST_P(RoundRobinLoadBalancerTest, Normal) { EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->peekAnotherHost(nullptr)); EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->peekAnotherHost(nullptr)); - hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:83")); + hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:83", simTime())); hostSet().hosts_.push_back(hostSet().healthy_hosts_.back()); hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {hostSet().healthy_hosts_.front()}); peekThenPick({1, 2, 3}); @@ -812,9 +815,9 @@ TEST_P(RoundRobinLoadBalancerTest, Normal) { // Validate that the RNG seed influences pick order. TEST_P(RoundRobinLoadBalancerTest, Seed) { hostSet().healthy_hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), + makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), }; hostSet().hosts_ = hostSet().healthy_hosts_; EXPECT_CALL(random_, random()).WillRepeatedly(Return(1)); @@ -826,9 +829,9 @@ TEST_P(RoundRobinLoadBalancerTest, Seed) { } TEST_P(RoundRobinLoadBalancerTest, Locality) { - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality({{(*hosts)[1]}, {(*hosts)[0]}, {(*hosts)[2]}}); hostSet().hosts_ = *hosts; @@ -856,9 +859,9 @@ TEST_P(RoundRobinLoadBalancerTest, Locality) { } TEST_P(RoundRobinLoadBalancerTest, DegradedLocality) { - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:84")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime())})); HostVectorSharedPtr healthy_hosts(new HostVector({(*hosts)[0]})); HostVectorSharedPtr degraded_hosts(new HostVector({(*hosts)[1], (*hosts)[2]})); HostsPerLocalitySharedPtr hosts_per_locality = @@ -885,8 +888,8 @@ TEST_P(RoundRobinLoadBalancerTest, DegradedLocality) { } TEST_P(RoundRobinLoadBalancerTest, Weighted) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), - makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime(), 2)}; hostSet().hosts_ = hostSet().healthy_hosts_; init(false); // Initial weights respected. @@ -908,7 +911,7 @@ TEST_P(RoundRobinLoadBalancerTest, Weighted) { EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); // Add a host, it should participate in next round of scheduling. - hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:82", 3)); + hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:82", simTime(), 3)); hostSet().hosts_.push_back(hostSet().healthy_hosts_.back()); hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, {}); EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); @@ -929,7 +932,7 @@ TEST_P(RoundRobinLoadBalancerTest, Weighted) { hostSet().healthy_hosts_.pop_back(); hostSet().hosts_.pop_back(); hostSet().hosts_.pop_back(); - hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:83", 4)); + hostSet().healthy_hosts_.push_back(makeTestHost(info_, "tcp://127.0.0.1:83", simTime(), 4)); hostSet().hosts_.push_back(hostSet().healthy_hosts_.back()); hostSet().healthy_hosts_[0]->weight(1); hostSet().runCallbacks({hostSet().healthy_hosts_.back()}, removed_hosts); @@ -947,8 +950,8 @@ TEST_P(RoundRobinLoadBalancerTest, Weighted) { // Validate that the RNG seed influences pick order when weighted RR. TEST_P(RoundRobinLoadBalancerTest, WeightedSeed) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), - makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime(), 2)}; hostSet().hosts_ = hostSet().healthy_hosts_; EXPECT_CALL(random_, random()).WillRepeatedly(Return(1)); init(false); @@ -962,12 +965,14 @@ TEST_P(RoundRobinLoadBalancerTest, WeightedSeed) { } TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanic) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83"), - makeTestHost(info_, "tcp://127.0.0.1:84"), makeTestHost(info_, "tcp://127.0.0.1:85")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:85", simTime())}; init(false); EXPECT_EQ(hostSet().hosts_[0], lb_->chooseHost(nullptr)); @@ -975,9 +980,10 @@ TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanic) { EXPECT_EQ(hostSet().hosts_[2], lb_->chooseHost(nullptr)); // Take the threshold back above the panic threshold. - hostSet().healthy_hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime())}; hostSet().runCallbacks({}, {}); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); @@ -988,12 +994,14 @@ TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanic) { // Test that no hosts are selected when fail_traffic_on_panic is enabled. TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanicDisableOnPanic) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83"), - makeTestHost(info_, "tcp://127.0.0.1:84"), makeTestHost(info_, "tcp://127.0.0.1:85")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:85", simTime())}; common_config_.mutable_zone_aware_lb_config()->set_fail_traffic_on_panic(true); @@ -1001,9 +1009,10 @@ TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanicDisableOnPanic) { EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); // Take the threshold back above the panic threshold. - hostSet().healthy_hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime())}; hostSet().runCallbacks({}, {}); EXPECT_EQ(hostSet().healthy_hosts_[0], lb_->chooseHost(nullptr)); @@ -1015,7 +1024,7 @@ TEST_P(RoundRobinLoadBalancerTest, MaxUnhealthyPanicDisableOnPanic) { // Ensure if the panic threshold is 0%, panic mode is disabled. TEST_P(RoundRobinLoadBalancerTest, DisablePanicMode) { hostSet().healthy_hosts_ = {}; - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; common_config_.mutable_healthy_panic_threshold()->set_value(0); @@ -1030,10 +1039,11 @@ TEST_P(RoundRobinLoadBalancerTest, DisablePanicMode) { TEST_P(RoundRobinLoadBalancerTest, HostSelectionWithFilter) { NiceMock context; - HostVectorSharedPtr hosts(new HostVector( - {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")})); - HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality( - {{makeTestHost(info_, "tcp://127.0.0.1:80")}, {makeTestHost(info_, "tcp://127.0.0.1:81")}}); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())})); + HostsPerLocalitySharedPtr hosts_per_locality = + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}}); hostSet().hosts_ = *hosts; hostSet().healthy_hosts_ = *hosts; @@ -1072,13 +1082,13 @@ TEST_P(RoundRobinLoadBalancerTest, HostSelectionWithFilter) { } TEST_P(RoundRobinLoadBalancerTest, ZoneAwareSmallCluster) { - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); HostsPerLocalitySharedPtr hosts_per_locality = - makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81")}, - {makeTestHost(info_, "tcp://127.0.0.1:80")}, - {makeTestHost(info_, "tcp://127.0.0.1:82")}}); + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}}); hostSet().hosts_ = *hosts; hostSet().healthy_hosts_ = *hosts; @@ -1118,15 +1128,16 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareDifferentZoneSize) { if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. return; } - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); HostsPerLocalitySharedPtr upstream_hosts_per_locality = - makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81")}, - {makeTestHost(info_, "tcp://127.0.0.1:80")}, - {makeTestHost(info_, "tcp://127.0.0.1:82")}}); - HostsPerLocalitySharedPtr local_hosts_per_locality = makeHostsPerLocality( - {{makeTestHost(info_, "tcp://127.0.0.1:81")}, {makeTestHost(info_, "tcp://127.0.0.1:80")}}); + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}}); + HostsPerLocalitySharedPtr local_hosts_per_locality = + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}}); hostSet().healthy_hosts_ = *hosts; hostSet().hosts_ = *hosts; @@ -1152,13 +1163,13 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingLargeZoneSwitchOnOff) { if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. return; } - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime())})); HostsPerLocalitySharedPtr hosts_per_locality = - makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81")}, - {makeTestHost(info_, "tcp://127.0.0.1:80")}, - {makeTestHost(info_, "tcp://127.0.0.1:82")}}); + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}}); EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.healthy_panic_threshold", 50)) .WillRepeatedly(Return(50)); @@ -1189,23 +1200,28 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingSmallZone) { if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. return; } - HostVectorSharedPtr upstream_hosts(new HostVector( - {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83"), - makeTestHost(info_, "tcp://127.0.0.1:84")})); - HostVectorSharedPtr local_hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:0"), - makeTestHost(info_, "tcp://127.0.0.1:1"), - makeTestHost(info_, "tcp://127.0.0.1:2")})); - - HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality( - {{makeTestHost(info_, "tcp://127.0.0.1:81")}, - {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:82")}, - {makeTestHost(info_, "tcp://127.0.0.1:83"), makeTestHost(info_, "tcp://127.0.0.1:84")}}); + HostVectorSharedPtr upstream_hosts( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime())})); + HostVectorSharedPtr local_hosts( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:0", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:1", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:2", simTime())})); + + HostsPerLocalitySharedPtr upstream_hosts_per_locality = + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime())}}); HostsPerLocalitySharedPtr local_hosts_per_locality = - makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:0")}, - {makeTestHost(info_, "tcp://127.0.0.1:1")}, - {makeTestHost(info_, "tcp://127.0.0.1:2")}}); + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:0", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:1", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:2", simTime())}}); EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.healthy_panic_threshold", 50)) .WillRepeatedly(Return(50)); @@ -1236,10 +1252,12 @@ TEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) { return; } // upstream_hosts and local_hosts do not matter, zone aware routing is based on per zone hosts. - HostVectorSharedPtr upstream_hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80")})); + HostVectorSharedPtr upstream_hosts( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime())})); hostSet().healthy_hosts_ = *upstream_hosts; hostSet().hosts_ = *upstream_hosts; - HostVectorSharedPtr local_hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:0")})); + HostVectorSharedPtr local_hosts( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:0", simTime())})); std::vector upstream_hosts_per_locality; std::vector local_hosts_per_locality; @@ -1255,7 +1273,7 @@ TEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) { // situation. // Reuse the same host in all of the structures below to reduce time test takes and this does not // impact load balancing logic. - HostSharedPtr host = makeTestHost(info_, "tcp://127.0.0.1:80"); + HostSharedPtr host = makeTestHost(info_, "tcp://127.0.0.1:80", simTime()); HostVector current(45000); for (int i = 0; i < 45000; ++i) { @@ -1299,9 +1317,9 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingOneZone) { if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. return; } - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80")})); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime())})); HostsPerLocalitySharedPtr hosts_per_locality = - makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81")}}); + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}}); hostSet().healthy_hosts_ = *hosts; hostSet().hosts_ = *hosts; @@ -1312,10 +1330,12 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingOneZone) { } TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNotHealthy) { - HostVectorSharedPtr hosts(new HostVector( - {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.2:80")})); - HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality( - {{}, {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.2:80")}}); + HostVectorSharedPtr hosts(new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.2:80", simTime())})); + HostsPerLocalitySharedPtr hosts_per_locality = + makeHostsPerLocality({{}, + {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.2:80", simTime())}}); hostSet().healthy_hosts_ = *hosts; hostSet().hosts_ = *hosts; @@ -1332,12 +1352,14 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmpty) { if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. return; } - HostVectorSharedPtr upstream_hosts(new HostVector( - {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")})); + HostVectorSharedPtr upstream_hosts( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())})); HostVectorSharedPtr local_hosts(new HostVector({}, {})); - HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality( - {{makeTestHost(info_, "tcp://127.0.0.1:80")}, {makeTestHost(info_, "tcp://127.0.0.1:81")}}); + HostsPerLocalitySharedPtr upstream_hosts_per_locality = + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}}); HostsPerLocalitySharedPtr local_hosts_per_locality = makeHostsPerLocality({{}, {}}); EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.healthy_panic_threshold", 50)) @@ -1366,12 +1388,14 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmptyFailTrafficOnPani if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. return; } - HostVectorSharedPtr upstream_hosts(new HostVector( - {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")})); + HostVectorSharedPtr upstream_hosts( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())})); HostVectorSharedPtr local_hosts(new HostVector({}, {})); - HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality( - {{makeTestHost(info_, "tcp://127.0.0.1:80")}, {makeTestHost(info_, "tcp://127.0.0.1:81")}}); + HostsPerLocalitySharedPtr upstream_hosts_per_locality = + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}}); HostsPerLocalitySharedPtr local_hosts_per_locality = makeHostsPerLocality({{}, {}}); EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.healthy_panic_threshold", 50)) @@ -1401,13 +1425,15 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNoLocalLocality) { if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. return; } - HostVectorSharedPtr upstream_hosts(new HostVector( - {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")})); + HostVectorSharedPtr upstream_hosts( + new HostVector({makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())})); HostVectorSharedPtr local_hosts(new HostVector({}, {})); - HostsPerLocalitySharedPtr upstream_hosts_per_locality = makeHostsPerLocality( - {{makeTestHost(info_, "tcp://127.0.0.1:80")}, {makeTestHost(info_, "tcp://127.0.0.1:81")}}, - true); + HostsPerLocalitySharedPtr upstream_hosts_per_locality = + makeHostsPerLocality({{makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}, + {makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}}, + true); const HostsPerLocalitySharedPtr& local_hosts_per_locality = upstream_hosts_per_locality; hostSet().healthy_hosts_ = *upstream_hosts; @@ -1434,7 +1460,7 @@ class LeastRequestLoadBalancerTest : public LoadBalancerTestBase { TEST_P(LeastRequestLoadBalancerTest, NoHosts) { EXPECT_EQ(nullptr, lb_.chooseHost(nullptr)); } TEST_P(LeastRequestLoadBalancerTest, SingleHost) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. @@ -1471,8 +1497,8 @@ TEST_P(LeastRequestLoadBalancerTest, SingleHost) { } TEST_P(LeastRequestLoadBalancerTest, Normal) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; stats_.max_host_weight_.set(1UL); hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. @@ -1489,9 +1515,10 @@ TEST_P(LeastRequestLoadBalancerTest, Normal) { } TEST_P(LeastRequestLoadBalancerTest, PNC) { - hostSet().healthy_hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime())}; stats_.max_host_weight_.set(1UL); hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. @@ -1537,8 +1564,8 @@ TEST_P(LeastRequestLoadBalancerTest, PNC) { } TEST_P(LeastRequestLoadBalancerTest, WeightImbalance) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), - makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime(), 2)}; stats_.max_host_weight_.set(2UL); hostSet().hosts_ = hostSet().healthy_hosts_; @@ -1589,8 +1616,8 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithInvalidActiveRequestBias EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(-1.0)); - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), - makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime(), 2)}; hostSet().hosts_ = hostSet().healthy_hosts_; @@ -1643,8 +1670,8 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithCustomActiveRequestBias) EXPECT_CALL(runtime_.snapshot_, getDouble("ar_bias", 1.0)).WillRepeatedly(Return(0.0)); - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), - makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime(), 2)}; hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. @@ -1662,8 +1689,8 @@ TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceWithCustomActiveRequestBias) } TEST_P(LeastRequestLoadBalancerTest, WeightImbalanceCallbacks) { - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", 1), - makeTestHost(info_, "tcp://127.0.0.1:81", 2)}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime(), 2)}; stats_.max_host_weight_.set(2UL); hostSet().hosts_ = hostSet().healthy_hosts_; @@ -1705,8 +1732,8 @@ TEST_P(RandomLoadBalancerTest, NoHosts) { TEST_P(RandomLoadBalancerTest, Normal) { init(); - hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; + hostSet().healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; hostSet().hosts_ = hostSet().healthy_hosts_; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. @@ -1726,8 +1753,8 @@ TEST_P(RandomLoadBalancerTest, FailClusterOnPanic) { init(); hostSet().healthy_hosts_ = {}; - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; hostSet().runCallbacks({}, {}); // Trigger callbacks. The added/removed lists are not relevant. EXPECT_EQ(nullptr, lb_->chooseHost(nullptr)); } diff --git a/test/common/upstream/load_balancer_simulation_test.cc b/test/common/upstream/load_balancer_simulation_test.cc index 77a8f7d7004c..84988675596c 100644 --- a/test/common/upstream/load_balancer_simulation_test.cc +++ b/test/common/upstream/load_balancer_simulation_test.cc @@ -14,6 +14,7 @@ #include "common/upstream/upstream_impl.h" #include "test/common/upstream/utility.h" +#include "test/mocks/common.h" #include "test/mocks/runtime/mocks.h" #include "test/mocks/upstream/cluster_info.h" #include "test/mocks/upstream/host_set.h" @@ -30,14 +31,14 @@ namespace Upstream { namespace { static HostSharedPtr newTestHost(Upstream::ClusterInfoConstSharedPtr cluster, - const std::string& url, uint32_t weight = 1, - const std::string& zone = "") { + const std::string& url, TimeSource& time_source, + uint32_t weight = 1, const std::string& zone = "") { envoy::config::core::v3::Locality locality; locality.set_zone(zone); return HostSharedPtr{ new HostImpl(cluster, "", Network::Utility::resolveUrl(url), nullptr, weight, locality, envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, - envoy::config::core::v3::UNKNOWN)}; + envoy::config::core::v3::UNKNOWN, time_source)}; } // Simulate weighted LR load balancer. @@ -49,11 +50,12 @@ TEST(DISABLED_LeastRequestLoadBalancerWeightTest, Weight) { PrioritySetImpl priority_set; std::shared_ptr info_{new NiceMock()}; + NiceMock time_source_; HostVector hosts; for (uint64_t i = 0; i < num_hosts; i++) { const bool should_weight = i < num_hosts * (weighted_subset_percent / 100.0); hosts.push_back(makeTestHost(info_, fmt::format("tcp://10.0.{}.{}:6379", i / 256, i % 256), - should_weight ? weight : 1)); + time_source_, should_weight ? weight : 1)); if (should_weight) { hosts.back()->stats().rq_active_.set(active_requests); } @@ -201,7 +203,7 @@ class DISABLED_SimulationTest : public testing::Test { const std::string zone = std::to_string(i); for (uint32_t j = 0; j < hosts[i]; ++j) { const std::string url = fmt::format("tcp://host.{}.{}:80", i, j); - ret->push_back(newTestHost(info_, url, 1, zone)); + ret->push_back(newTestHost(info_, url, time_source_, 1, zone)); } } @@ -220,7 +222,7 @@ class DISABLED_SimulationTest : public testing::Test { for (uint32_t j = 0; j < hosts[i]; ++j) { const std::string url = fmt::format("tcp://host.{}.{}:80", i, j); - zone_hosts.push_back(newTestHost(info_, url, 1, zone)); + zone_hosts.push_back(newTestHost(info_, url, time_source_, 1, zone)); } ret.push_back(std::move(zone_hosts)); @@ -237,6 +239,7 @@ class DISABLED_SimulationTest : public testing::Test { MockHostSet& host_set_ = *priority_set_.getMockHostSet(0); std::shared_ptr info_{new NiceMock()}; NiceMock runtime_; + NiceMock time_source_; Random::RandomGeneratorImpl random_; Stats::IsolatedStoreImpl stats_store_; ClusterStats stats_; diff --git a/test/common/upstream/logical_dns_cluster_test.cc b/test/common/upstream/logical_dns_cluster_test.cc index c348e5132148..d3b74b73a7f5 100644 --- a/test/common/upstream/logical_dns_cluster_test.cc +++ b/test/common/upstream/logical_dns_cluster_test.cc @@ -39,7 +39,7 @@ namespace Envoy { namespace Upstream { namespace { -class LogicalDnsClusterTest : public testing::Test { +class LogicalDnsClusterTest : public Event::TestUsingSimulatedTime, public testing::Test { protected: LogicalDnsClusterTest() : api_(Api::createApiForTest(stats_store_, random_)) {} diff --git a/test/common/upstream/maglev_lb_test.cc b/test/common/upstream/maglev_lb_test.cc index 7e60e9b0e89b..da01022d893f 100644 --- a/test/common/upstream/maglev_lb_test.cc +++ b/test/common/upstream/maglev_lb_test.cc @@ -9,6 +9,7 @@ #include "test/mocks/upstream/cluster_info.h" #include "test/mocks/upstream/host_set.h" #include "test/mocks/upstream/priority_set.h" +#include "test/test_common/simulated_time_system.h" namespace Envoy { namespace Upstream { @@ -39,7 +40,7 @@ class TestLoadBalancerContext : public LoadBalancerContextBase { // Note: ThreadAwareLoadBalancer base is heavily tested by RingHashLoadBalancerTest. Only basic // functionality is covered here. -class MaglevLoadBalancerTest : public testing::Test { +class MaglevLoadBalancerTest : public Event::TestUsingSimulatedTime, public testing::Test { public: MaglevLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {} @@ -95,10 +96,12 @@ TEST_F(MaglevLoadBalancerTest, DefaultMaglevTableSize) { // Basic sanity tests. TEST_F(MaglevLoadBalancerTest, Basic) { - host_set_.hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), - makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93"), - makeTestHost(info_, "tcp://127.0.0.1:94"), makeTestHost(info_, "tcp://127.0.0.1:95")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:94", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:95", simTime())}; host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.runCallbacks({}, {}); init(7); @@ -126,12 +129,12 @@ TEST_F(MaglevLoadBalancerTest, Basic) { // Basic with hostname. TEST_F(MaglevLoadBalancerTest, BasicWithHostName) { - host_set_.hosts_ = {makeTestHost(info_, "90", "tcp://127.0.0.1:90"), - makeTestHost(info_, "91", "tcp://127.0.0.1:91"), - makeTestHost(info_, "92", "tcp://127.0.0.1:92"), - makeTestHost(info_, "93", "tcp://127.0.0.1:93"), - makeTestHost(info_, "94", "tcp://127.0.0.1:94"), - makeTestHost(info_, "95", "tcp://127.0.0.1:95")}; + host_set_.hosts_ = {makeTestHost(info_, "90", "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "91", "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "92", "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "93", "tcp://127.0.0.1:93", simTime()), + makeTestHost(info_, "94", "tcp://127.0.0.1:94", simTime()), + makeTestHost(info_, "95", "tcp://127.0.0.1:95", simTime())}; host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.runCallbacks({}, {}); common_config_ = envoy::config::cluster::v3::Cluster::CommonLbConfig(); @@ -164,10 +167,12 @@ TEST_F(MaglevLoadBalancerTest, BasicWithHostName) { // Same ring as the Basic test, but exercise retry host predicate behavior. TEST_F(MaglevLoadBalancerTest, BasicWithRetryHostPredicate) { - host_set_.hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), - makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93"), - makeTestHost(info_, "tcp://127.0.0.1:94"), makeTestHost(info_, "tcp://127.0.0.1:95")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:94", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:95", simTime())}; host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.runCallbacks({}, {}); init(7); @@ -210,8 +215,8 @@ TEST_F(MaglevLoadBalancerTest, BasicWithRetryHostPredicate) { // Weighted sanity test. TEST_F(MaglevLoadBalancerTest, Weighted) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1), - makeTestHost(info_, "tcp://127.0.0.1:91", 2)}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2)}; host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.runCallbacks({}, {}); init(17); @@ -248,8 +253,8 @@ TEST_F(MaglevLoadBalancerTest, Weighted) { // Locality weighted sanity test when localities have the same weights. Host weights for hosts in // different localities shouldn't matter. TEST_F(MaglevLoadBalancerTest, LocalityWeightedSameLocalityWeights) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1), - makeTestHost(info_, "tcp://127.0.0.1:91", 2)}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2)}; host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.hosts_per_locality_ = makeHostsPerLocality({{host_set_.hosts_[0]}, {host_set_.hosts_[1]}}); @@ -291,9 +296,9 @@ TEST_F(MaglevLoadBalancerTest, LocalityWeightedSameLocalityWeights) { // Locality weighted sanity test when localities have different weights. Host weights for hosts in // different localities shouldn't matter. TEST_F(MaglevLoadBalancerTest, LocalityWeightedDifferentLocalityWeights) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1), - makeTestHost(info_, "tcp://127.0.0.1:91", 2), - makeTestHost(info_, "tcp://127.0.0.1:92", 3)}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime(), 3)}; host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.hosts_per_locality_ = makeHostsPerLocality({{host_set_.hosts_[0]}, {host_set_.hosts_[2]}, {host_set_.hosts_[1]}}); @@ -334,7 +339,7 @@ TEST_F(MaglevLoadBalancerTest, LocalityWeightedDifferentLocalityWeights) { // Locality weighted with all localities zero weighted. TEST_F(MaglevLoadBalancerTest, LocalityWeightedAllZeroLocalityWeights) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1)}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1)}; host_set_.healthy_hosts_ = host_set_.hosts_; host_set_.hosts_per_locality_ = makeHostsPerLocality({{host_set_.hosts_[0]}}); host_set_.healthy_hosts_per_locality_ = host_set_.hosts_per_locality_; @@ -350,8 +355,8 @@ TEST_F(MaglevLoadBalancerTest, LocalityWeightedAllZeroLocalityWeights) { // Validate that when we are in global panic and have localities, we get sane // results (fall back to non-healthy hosts). TEST_F(MaglevLoadBalancerTest, LocalityWeightedGlobalPanic) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1), - makeTestHost(info_, "tcp://127.0.0.1:91", 2)}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2)}; host_set_.healthy_hosts_ = {}; host_set_.hosts_per_locality_ = makeHostsPerLocality({{host_set_.hosts_[0]}, {host_set_.hosts_[1]}}); @@ -396,7 +401,7 @@ TEST_F(MaglevLoadBalancerTest, LocalityWeightedLopsided) { host_set_.hosts_.clear(); HostVector heavy_but_sparse, light_but_dense; for (uint32_t i = 0; i < 1024; ++i) { - auto host(makeTestHost(info_, fmt::format("tcp://127.0.0.1:{}", i))); + auto host(makeTestHost(info_, fmt::format("tcp://127.0.0.1:{}", i), simTime())); host_set_.hosts_.push_back(host); (i == 0 ? heavy_but_sparse : light_but_dense).push_back(host); } diff --git a/test/common/upstream/original_dst_cluster_test.cc b/test/common/upstream/original_dst_cluster_test.cc index 643de67a77e5..fe9028141e60 100644 --- a/test/common/upstream/original_dst_cluster_test.cc +++ b/test/common/upstream/original_dst_cluster_test.cc @@ -62,7 +62,7 @@ class TestLoadBalancerContext : public LoadBalancerContextBase { Http::RequestHeaderMapPtr downstream_headers_; }; -class OriginalDstClusterTest : public testing::Test { +class OriginalDstClusterTest : public Event::TestUsingSimulatedTime, public testing::Test { public: // cleanup timer must be created before the cluster (in setup()), so that we can set expectations // on it. Ownership is transferred to the cluster at the cluster constructor, so the cluster will diff --git a/test/common/upstream/outlier_detection_impl_test.cc b/test/common/upstream/outlier_detection_impl_test.cc index 4170ceedcedb..b71ed9e3481a 100644 --- a/test/common/upstream/outlier_detection_impl_test.cc +++ b/test/common/upstream/outlier_detection_impl_test.cc @@ -64,7 +64,7 @@ class CallbackChecker { MOCK_METHOD(void, check, (HostSharedPtr host)); }; -class OutlierDetectorImplTest : public testing::Test { +class OutlierDetectorImplTest : public Event::TestUsingSimulatedTime, public testing::Test { public: OutlierDetectorImplTest() : outlier_detection_ejections_active_(cluster_.info_->stats_store_.gauge( @@ -88,7 +88,7 @@ class OutlierDetectorImplTest : public testing::Test { void addHosts(std::vector urls, bool primary = true) { HostVector& hosts = primary ? hosts_ : failover_hosts_; for (auto& url : urls) { - hosts.emplace_back(makeTestHost(cluster_.info_, url)); + hosts.emplace_back(makeTestHost(cluster_.info_, url, simTime())); } } diff --git a/test/common/upstream/ring_hash_lb_test.cc b/test/common/upstream/ring_hash_lb_test.cc index 4ceb362e240e..2da9634bab71 100644 --- a/test/common/upstream/ring_hash_lb_test.cc +++ b/test/common/upstream/ring_hash_lb_test.cc @@ -16,6 +16,7 @@ #include "test/mocks/upstream/cluster_info.h" #include "test/mocks/upstream/host_set.h" #include "test/mocks/upstream/priority_set.h" +#include "test/test_common/simulated_time_system.h" #include "absl/container/node_hash_map.h" #include "gmock/gmock.h" @@ -51,7 +52,8 @@ class TestLoadBalancerContext : public LoadBalancerContextBase { HostPredicate should_select_another_host_; }; -class RingHashLoadBalancerTest : public testing::TestWithParam { +class RingHashLoadBalancerTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { public: RingHashLoadBalancerTest() : stats_(ClusterInfoImpl::generateStats(stats_store_)) {} @@ -101,10 +103,12 @@ TEST_P(RingHashLoadBalancerTest, BadRingSizeBounds) { } TEST_P(RingHashLoadBalancerTest, Basic) { - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), - makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93"), - makeTestHost(info_, "tcp://127.0.0.1:94"), makeTestHost(info_, "tcp://127.0.0.1:95")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:94", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:95", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -170,8 +174,8 @@ TEST_P(RingHashLoadBalancerTest, Basic) { // Ensure if all the hosts with priority 0 unhealthy, the next priority hosts are used. TEST_P(RingHashFailoverTest, BasicFailover) { - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; - failover_host_set_.healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime())}; + failover_host_set_.healthy_hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}; failover_host_set_.hosts_ = failover_host_set_.healthy_hosts_; config_ = envoy::config::cluster::v3::Cluster::RingHashLbConfig(); @@ -197,8 +201,8 @@ TEST_P(RingHashFailoverTest, BasicFailover) { EXPECT_EQ(failover_host_set_.healthy_hosts_[0], lb->chooseHost(nullptr)); // Set up so P=0 gets 70% of the load, and P=1 gets 30%. - host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; host_set_.healthy_hosts_ = {host_set_.hosts_[0]}; host_set_.runCallbacks({}, {}); lb = lb_->factory()->create(); @@ -210,10 +214,12 @@ TEST_P(RingHashFailoverTest, BasicFailover) { // Expect reasonable results with Murmur2 hash. TEST_P(RingHashLoadBalancerTest, BasicWithMurmur2) { - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83"), - makeTestHost(info_, "tcp://127.0.0.1:84"), makeTestHost(info_, "tcp://127.0.0.1:85")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:85", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -265,12 +271,12 @@ TEST_P(RingHashLoadBalancerTest, BasicWithMurmur2) { // Expect reasonable results with hostname. TEST_P(RingHashLoadBalancerTest, BasicWithHostname) { - hostSet().hosts_ = {makeTestHost(info_, "90", "tcp://127.0.0.1:90"), - makeTestHost(info_, "91", "tcp://127.0.0.1:91"), - makeTestHost(info_, "92", "tcp://127.0.0.1:92"), - makeTestHost(info_, "93", "tcp://127.0.0.1:93"), - makeTestHost(info_, "94", "tcp://127.0.0.1:94"), - makeTestHost(info_, "95", "tcp://127.0.0.1:95")}; + hostSet().hosts_ = {makeTestHost(info_, "90", "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "91", "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "92", "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "93", "tcp://127.0.0.1:93", simTime()), + makeTestHost(info_, "94", "tcp://127.0.0.1:94", simTime()), + makeTestHost(info_, "95", "tcp://127.0.0.1:95", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -340,10 +346,12 @@ TEST_P(RingHashLoadBalancerTest, BasicWithHostname) { // Test the same ring as Basic but exercise retry host predicate behavior. TEST_P(RingHashLoadBalancerTest, BasicWithRetryHostPredicate) { - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), - makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93"), - makeTestHost(info_, "tcp://127.0.0.1:94"), makeTestHost(info_, "tcp://127.0.0.1:95")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:94", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:95", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -405,8 +413,8 @@ TEST_P(RingHashLoadBalancerTest, BasicWithRetryHostPredicate) { // Given 2 hosts and a minimum ring size of 3, expect 2 hashes per host and a ring size of 4. TEST_P(RingHashLoadBalancerTest, UnevenHosts) { - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80"), - makeTestHost(info_, "tcp://127.0.0.1:81")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -431,8 +439,8 @@ TEST_P(RingHashLoadBalancerTest, UnevenHosts) { EXPECT_EQ(hostSet().hosts_[0], lb->chooseHost(&context)); } - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -454,9 +462,9 @@ TEST_P(RingHashLoadBalancerTest, UnevenHosts) { // Given hosts with weights 1, 2 and 3, and a ring size of exactly 6, expect the correct number of // hashes for each host. TEST_P(RingHashLoadBalancerTest, HostWeightedTinyRing) { - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1), - makeTestHost(info_, "tcp://127.0.0.1:91", 2), - makeTestHost(info_, "tcp://127.0.0.1:92", 3)}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime(), 3)}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -483,9 +491,9 @@ TEST_P(RingHashLoadBalancerTest, HostWeightedTinyRing) { // Given hosts with weights 1, 2 and 3, and a sufficiently large ring, expect that requests will // distribute to the hosts with approximately the right proportion. TEST_P(RingHashLoadBalancerTest, HostWeightedLargeRing) { - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", 1), - makeTestHost(info_, "tcp://127.0.0.1:91", 2), - makeTestHost(info_, "tcp://127.0.0.1:92", 3)}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime(), 3)}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -512,8 +520,8 @@ TEST_P(RingHashLoadBalancerTest, HostWeightedLargeRing) { // Given locality weights all 0, expect the same behavior as if no hosts were provided at all. TEST_P(RingHashLoadBalancerTest, ZeroLocalityWeights) { - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90"), - makeTestHost(info_, "tcp://127.0.0.1:91")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().hosts_per_locality_ = makeHostsPerLocality({{hostSet().hosts_[0]}, {hostSet().hosts_[1]}}); @@ -528,9 +536,10 @@ TEST_P(RingHashLoadBalancerTest, ZeroLocalityWeights) { // Given localities with weights 1, 2, 3 and 0, and a ring size of exactly 6, expect the correct // number of hashes for each host. TEST_P(RingHashLoadBalancerTest, LocalityWeightedTinyRing) { - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), - makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().hosts_per_locality_ = makeHostsPerLocality( {{hostSet().hosts_[0]}, {hostSet().hosts_[1]}, {hostSet().hosts_[2]}, {hostSet().hosts_[3]}}); @@ -562,9 +571,10 @@ TEST_P(RingHashLoadBalancerTest, LocalityWeightedTinyRing) { // Given localities with weights 1, 2, 3 and 0, and a sufficiently large ring, expect that requests // will distribute to the hosts with approximately the right proportion. TEST_P(RingHashLoadBalancerTest, LocalityWeightedLargeRing) { - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), - makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().hosts_per_locality_ = makeHostsPerLocality( {{hostSet().hosts_[0]}, {hostSet().hosts_[1]}, {hostSet().hosts_[2]}, {hostSet().hosts_[3]}}); @@ -598,9 +608,10 @@ TEST_P(RingHashLoadBalancerTest, LocalityWeightedLargeRing) { TEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedTinyRing) { // :90 and :91 have a 1:2 ratio within the first locality, :92 and :93 have a 1:2 ratio within the // second locality, and the two localities have a 1:2 ratio overall. - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90", 1), makeTestHost(info_, "tcp://127.0.0.1:91", 2), - makeTestHost(info_, "tcp://127.0.0.1:92", 1), makeTestHost(info_, "tcp://127.0.0.1:93", 2)}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime(), 2)}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().hosts_per_locality_ = makeHostsPerLocality( {{hostSet().hosts_[0], hostSet().hosts_[1]}, {hostSet().hosts_[2], hostSet().hosts_[3]}}); @@ -635,9 +646,10 @@ TEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedTinyRing) { TEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedLargeRing) { // :90 and :91 have a 1:2 ratio within the first locality, :92 and :93 have a 1:2 ratio within the // second locality, and the two localities have a 1:2 ratio overall. - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90", 1), makeTestHost(info_, "tcp://127.0.0.1:91", 2), - makeTestHost(info_, "tcp://127.0.0.1:92", 1), makeTestHost(info_, "tcp://127.0.0.1:93", 2)}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime(), 2), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime(), 1), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime(), 2)}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().hosts_per_locality_ = makeHostsPerLocality( {{hostSet().hosts_[0], hostSet().hosts_[1]}, {hostSet().hosts_[2], hostSet().hosts_[3]}}); @@ -670,9 +682,10 @@ TEST_P(RingHashLoadBalancerTest, HostAndLocalityWeightedLargeRing) { // Given 4 hosts and a ring size of exactly 2, expect that 2 hosts will be present in the ring and // the other 2 hosts will be absent. TEST_P(RingHashLoadBalancerTest, SmallFractionalScale) { - hostSet().hosts_ = { - makeTestHost(info_, "tcp://127.0.0.1:90"), makeTestHost(info_, "tcp://127.0.0.1:91"), - makeTestHost(info_, "tcp://127.0.0.1:92"), makeTestHost(info_, "tcp://127.0.0.1:93")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:92", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:93", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -711,8 +724,8 @@ TEST_P(RingHashLoadBalancerTest, SmallFractionalScale) { // Given 2 hosts and a ring size of exactly 1023, expect that one host will have 511 entries and the // other will have 512. TEST_P(RingHashLoadBalancerTest, LargeFractionalScale) { - hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90"), - makeTestHost(info_, "tcp://127.0.0.1:91")}; + hostSet().hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:91", simTime())}; hostSet().healthy_hosts_ = hostSet().hosts_; hostSet().runCallbacks({}, {}); @@ -743,7 +756,7 @@ TEST_P(RingHashLoadBalancerTest, LopsidedWeightSmallScale) { hostSet().hosts_.clear(); HostVector heavy_but_sparse, light_but_dense; for (uint32_t i = 0; i < 1024; ++i) { - auto host(makeTestHost(info_, fmt::format("tcp://127.0.0.1:{}", i))); + auto host(makeTestHost(info_, fmt::format("tcp://127.0.0.1:{}", i), simTime())); hostSet().hosts_.push_back(host); (i == 0 ? heavy_but_sparse : light_but_dense).push_back(host); } diff --git a/test/common/upstream/subset_lb_test.cc b/test/common/upstream/subset_lb_test.cc index 521d95fd2f4c..41345a6d27a0 100644 --- a/test/common/upstream/subset_lb_test.cc +++ b/test/common/upstream/subset_lb_test.cc @@ -23,6 +23,7 @@ #include "test/mocks/upstream/host_set.h" #include "test/mocks/upstream/load_balancer.h" #include "test/mocks/upstream/priority_set.h" +#include "test/test_common/simulated_time_system.h" #include "absl/types/optional.h" #include "gmock/gmock.h" @@ -123,7 +124,8 @@ class TestLoadBalancerContext : public LoadBalancerContextBase { enum class UpdateOrder { RemovesFirst, Simultaneous }; -class SubsetLoadBalancerTest : public testing::TestWithParam { +class SubsetLoadBalancerTest : public Event::TestUsingSimulatedTime, + public testing::TestWithParam { public: SubsetLoadBalancerTest() : scope_(stats_store_.createScope("testprefix")), @@ -256,7 +258,7 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { .set_string_value(m_it.second); } - return makeTestHost(info_, url, m); + return makeTestHost(info_, url, m, simTime()); } HostSharedPtr makeHost(const std::string& url, const HostListMetadata& metadata) { envoy::config::core::v3::Metadata m; @@ -268,7 +270,7 @@ class SubsetLoadBalancerTest : public testing::TestWithParam { } } - return makeTestHost(info_, url, m); + return makeTestHost(info_, url, m, simTime()); } ProtobufWkt::Struct makeDefaultSubset(HostMetadata metadata) { @@ -1421,7 +1423,7 @@ TEST_F(SubsetLoadBalancerTest, IgnoresHostsWithoutMetadata) { EXPECT_CALL(subset_info_, subsetSelectors()).WillRepeatedly(ReturnRef(subset_selectors)); HostVector hosts; - hosts.emplace_back(makeTestHost(info_, "tcp://127.0.0.1:80")); + hosts.emplace_back(makeTestHost(info_, "tcp://127.0.0.1:80", simTime())); hosts.emplace_back(makeHost("tcp://127.0.0.1:81", {{"version", "1.0"}})); host_set_.hosts_ = hosts; diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index a796379301d8..11ce22198cf9 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -1190,25 +1190,28 @@ TEST_F(StrictDnsClusterImplTest, Http2UserDefinedSettingsParametersValidation) { " both"); } -TEST(HostImplTest, HostCluster) { +class HostImplTest : public Event::TestUsingSimulatedTime, public testing::Test {}; + +TEST_F(HostImplTest, HostCluster) { MockClusterMockPrioritySet cluster; - HostSharedPtr host = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 1); + HostSharedPtr host = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 1); EXPECT_EQ(cluster.info_.get(), &host->cluster()); EXPECT_EQ("", host->hostname()); EXPECT_FALSE(host->canary()); EXPECT_EQ("", host->locality().zone()); } -TEST(HostImplTest, Weight) { +TEST_F(HostImplTest, Weight) { MockClusterMockPrioritySet cluster; - EXPECT_EQ(1U, makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 0)->weight()); - EXPECT_EQ(128U, makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 128)->weight()); + EXPECT_EQ(1U, makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 0)->weight()); + EXPECT_EQ(128U, makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 128)->weight()); EXPECT_EQ(std::numeric_limits::max(), - makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", std::numeric_limits::max()) + makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), + std::numeric_limits::max()) ->weight()); - HostSharedPtr host = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 50); + HostSharedPtr host = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 50); EXPECT_EQ(50U, host->weight()); host->weight(51); EXPECT_EQ(51U, host->weight()); @@ -1218,7 +1221,7 @@ TEST(HostImplTest, Weight) { EXPECT_EQ(std::numeric_limits::max(), host->weight()); } -TEST(HostImplTest, HostnameCanaryAndLocality) { +TEST_F(HostImplTest, HostnameCanaryAndLocality) { MockClusterMockPrioritySet cluster; envoy::config::core::v3::Metadata metadata; Config::Metadata::mutableMetadataValue(metadata, Config::MetadataFilters::get().ENVOY_LB, @@ -1231,7 +1234,7 @@ TEST(HostImplTest, HostnameCanaryAndLocality) { HostImpl host(cluster.info_, "lyft.com", Network::Utility::resolveUrl("tcp://10.0.0.1:1234"), std::make_shared(metadata), 1, locality, envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 1, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::UNKNOWN, simTime()); EXPECT_EQ(cluster.info_.get(), &host.cluster()); EXPECT_EQ("lyft.com", host.hostname()); EXPECT_TRUE(host.canary()); @@ -1241,9 +1244,9 @@ TEST(HostImplTest, HostnameCanaryAndLocality) { EXPECT_EQ(1, host.priority()); } -TEST(HostImplTest, HealthFlags) { +TEST_F(HostImplTest, HealthFlags) { MockClusterMockPrioritySet cluster; - HostSharedPtr host = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 1); + HostSharedPtr host = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 1); // To begin with, no flags are set so we're healthy. EXPECT_EQ(Host::Health::Healthy, host->health()); @@ -1277,26 +1280,27 @@ TEST(HostImplTest, HealthFlags) { // domain socket host and a health check config with non-zero port. // This is a regression test for oss-fuzz issue // https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=11095 -TEST(HostImplTest, HealthPipeAddress) { +TEST_F(HostImplTest, HealthPipeAddress) { EXPECT_THROW_WITH_MESSAGE( { std::shared_ptr info{new NiceMock()}; envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config; config.set_port_value(8000); HostDescriptionImpl descr(info, "", Network::Utility::resolveUrl("unix://foo"), nullptr, - envoy::config::core::v3::Locality().default_instance(), config, - 1); + envoy::config::core::v3::Locality().default_instance(), config, 1, + simTime()); }, EnvoyException, "Invalid host configuration: non-zero port for non-IP address"); } // Test that hostname flag from the health check config propagates. -TEST(HostImplTest, HealthcheckHostname) { +TEST_F(HostImplTest, HealthcheckHostname) { std::shared_ptr info{new NiceMock()}; envoy::config::endpoint::v3::Endpoint::HealthCheckConfig config; config.set_hostname("foo"); HostDescriptionImpl descr(info, "", Network::Utility::resolveUrl("tcp://1.2.3.4:80"), nullptr, - envoy::config::core::v3::Locality().default_instance(), config, 1); + envoy::config::core::v3::Locality().default_instance(), config, 1, + simTime()); EXPECT_EQ("foo", descr.hostnameForHealthChecks()); } @@ -2139,7 +2143,9 @@ TEST(PrioritySet, Extend) { // Now add hosts for priority 1, and ensure they're added and subscribers are notified. std::shared_ptr info{new NiceMock()}; - HostVectorSharedPtr hosts(new HostVector({makeTestHost(info, "tcp://127.0.0.1:80")})); + auto time_source = std::make_unique>(); + HostVectorSharedPtr hosts( + new HostVector({makeTestHost(info, "tcp://127.0.0.1:80", *time_source)})); HostsPerLocalitySharedPtr hosts_per_locality = std::make_shared(); { HostVector hosts_added{hosts->front()}; @@ -2998,8 +3004,10 @@ TEST(HostsPerLocalityImpl, Empty) { EXPECT_EQ(0, HostsPerLocalityImpl::empty()->get().size()); } +class HostsWithLocalityImpl : public Event::TestUsingSimulatedTime, public testing::Test {}; + // Validate HostsPerLocalityImpl constructors. -TEST(HostsPerLocalityImpl, Cons) { +TEST_F(HostsWithLocalityImpl, Cons) { { const HostsPerLocalityImpl hosts_per_locality; EXPECT_FALSE(hosts_per_locality.hasLocalLocality()); @@ -3007,8 +3015,8 @@ TEST(HostsPerLocalityImpl, Cons) { } MockClusterMockPrioritySet cluster; - HostSharedPtr host_0 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 1); - HostSharedPtr host_1 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 1); + HostSharedPtr host_0 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 1); + HostSharedPtr host_1 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 1); { std::vector locality_hosts = {{host_0}, {host_1}}; @@ -3027,10 +3035,10 @@ TEST(HostsPerLocalityImpl, Cons) { } } -TEST(HostsPerLocalityImpl, Filter) { +TEST_F(HostsWithLocalityImpl, Filter) { MockClusterMockPrioritySet cluster; - HostSharedPtr host_0 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 1); - HostSharedPtr host_1 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", 1); + HostSharedPtr host_0 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 1); + HostSharedPtr host_1 = makeTestHost(cluster.info_, "tcp://10.0.0.1:1234", simTime(), 1); { std::vector locality_hosts = {{host_0}, {host_1}}; @@ -3055,15 +3063,17 @@ TEST(HostsPerLocalityImpl, Filter) { } } -class HostSetImplLocalityTest : public testing::Test { +class HostSetImplLocalityTest : public Event::TestUsingSimulatedTime, public testing::Test { public: LocalityWeightsConstSharedPtr locality_weights_; HostSetImpl host_set_{0, kDefaultOverProvisioningFactor}; std::shared_ptr info_{new NiceMock()}; - HostVector hosts_{ - makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), - makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83"), - makeTestHost(info_, "tcp://127.0.0.1:84"), makeTestHost(info_, "tcp://127.0.0.1:85")}; + HostVector hosts_{makeTestHost(info_, "tcp://127.0.0.1:80", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:81", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:82", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:83", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:84", simTime()), + makeTestHost(info_, "tcp://127.0.0.1:85", simTime())}; }; // When no locality weights belong to the host set, there's an empty pick. @@ -3245,9 +3255,10 @@ TEST(OverProvisioningFactorTest, LocalityPickChanges) { const uint32_t pick_0, const uint32_t pick_1) { HostSetImpl host_set(0, overprovisioning_factor); std::shared_ptr cluster_info{new NiceMock()}; - HostVector hosts{makeTestHost(cluster_info, "tcp://127.0.0.1:80"), - makeTestHost(cluster_info, "tcp://127.0.0.1:81"), - makeTestHost(cluster_info, "tcp://127.0.0.1:82")}; + auto time_source = std::make_unique>(); + HostVector hosts{makeTestHost(cluster_info, "tcp://127.0.0.1:80", *time_source), + makeTestHost(cluster_info, "tcp://127.0.0.1:81", *time_source), + makeTestHost(cluster_info, "tcp://127.0.0.1:82", *time_source)}; LocalityWeightsConstSharedPtr locality_weights{new LocalityWeights{1, 1}}; HostsPerLocalitySharedPtr hosts_per_locality = makeHostsPerLocality({{hosts[0], hosts[1]}, {hosts[2]}}); @@ -3286,9 +3297,11 @@ TEST(OverProvisioningFactorTest, LocalityPickChanges) { // Verifies that partitionHosts correctly splits hosts based on their health flags. TEST(HostPartitionTest, PartitionHosts) { std::shared_ptr info{new NiceMock()}; - HostVector hosts{ - makeTestHost(info, "tcp://127.0.0.1:80"), makeTestHost(info, "tcp://127.0.0.1:81"), - makeTestHost(info, "tcp://127.0.0.1:82"), makeTestHost(info, "tcp://127.0.0.1:83")}; + auto time_source = std::make_unique>(); + HostVector hosts{makeTestHost(info, "tcp://127.0.0.1:80", *time_source), + makeTestHost(info, "tcp://127.0.0.1:81", *time_source), + makeTestHost(info, "tcp://127.0.0.1:82", *time_source), + makeTestHost(info, "tcp://127.0.0.1:83", *time_source)}; hosts[0]->healthFlagSet(Host::HealthFlag::FAILED_ACTIVE_HC); hosts[1]->healthFlagSet(Host::HealthFlag::DEGRADED_ACTIVE_HC); diff --git a/test/common/upstream/utility.h b/test/common/upstream/utility.h index cd92eb9d37a3..2a1c153d3dde 100644 --- a/test/common/upstream/utility.h +++ b/test/common/upstream/utility.h @@ -78,49 +78,52 @@ inline envoy::config::cluster::v3::Cluster defaultStaticCluster(const std::strin } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& hostname, - const std::string& url, uint32_t weight = 1) { + const std::string& url, TimeSource& time_source, + uint32_t weight = 1) { return std::make_shared( cluster, hostname, Network::Utility::resolveUrl(url), nullptr, weight, envoy::config::core::v3::Locality(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::UNKNOWN, time_source); } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, - uint32_t weight = 1, uint32_t priority = 0) { + TimeSource& time_source, uint32_t weight = 1, + uint32_t priority = 0) { return std::make_shared( cluster, "", Network::Utility::resolveUrl(url), nullptr, weight, envoy::config::core::v3::Locality(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), priority, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::UNKNOWN, time_source); } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, const envoy::config::core::v3::Metadata& metadata, - uint32_t weight = 1) { + TimeSource& time_source, uint32_t weight = 1) { return std::make_shared( cluster, "", Network::Utility::resolveUrl(url), std::make_shared(metadata), weight, envoy::config::core::v3::Locality(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::UNKNOWN, time_source); } inline HostSharedPtr makeTestHost(ClusterInfoConstSharedPtr cluster, const std::string& url, const envoy::config::endpoint::v3::Endpoint::HealthCheckConfig& health_check_config, - uint32_t weight = 1) { + TimeSource& time_source, uint32_t weight = 1) { return std::make_shared(cluster, "", Network::Utility::resolveUrl(url), nullptr, weight, envoy::config::core::v3::Locality(), health_check_config, 0, - envoy::config::core::v3::UNKNOWN); + envoy::config::core::v3::UNKNOWN, time_source); } inline HostDescriptionConstSharedPtr makeTestHostDescription(ClusterInfoConstSharedPtr cluster, - const std::string& url) { + const std::string& url, + TimeSource& time_source) { return std::make_shared( cluster, "", Network::Utility::resolveUrl(url), nullptr, envoy::config::core::v3::Locality().default_instance(), - envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0); + envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, time_source); } inline HostsPerLocalitySharedPtr makeHostsPerLocality(std::vector&& locality_hosts, diff --git a/test/extensions/clusters/aggregate/BUILD b/test/extensions/clusters/aggregate/BUILD index 84937bba9af6..22c6ab2a47ba 100644 --- a/test/extensions/clusters/aggregate/BUILD +++ b/test/extensions/clusters/aggregate/BUILD @@ -26,6 +26,7 @@ envoy_extension_cc_test( "//test/mocks/upstream:load_balancer_context_mock", "//test/mocks/upstream:load_balancer_mocks", "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/clusters/aggregate/v3:pkg_cc_proto", ], diff --git a/test/extensions/clusters/aggregate/cluster_test.cc b/test/extensions/clusters/aggregate/cluster_test.cc index c45de45e3778..1049d5b0cb90 100644 --- a/test/extensions/clusters/aggregate/cluster_test.cc +++ b/test/extensions/clusters/aggregate/cluster_test.cc @@ -14,6 +14,7 @@ #include "test/mocks/upstream/load_balancer.h" #include "test/mocks/upstream/load_balancer_context.h" #include "test/test_common/environment.h" +#include "test/test_common/simulated_time_system.h" using testing::Eq; using testing::Return; @@ -29,7 +30,7 @@ const std::string primary_name("primary"); const std::string secondary_name("secondary"); } // namespace -class AggregateClusterTest : public testing::Test { +class AggregateClusterTest : public Event::TestUsingSimulatedTime, public testing::Test { public: AggregateClusterTest() : stats_(Upstream::ClusterInfoImpl::generateStats(stats_store_)) { ON_CALL(*primary_info_, name()).WillByDefault(ReturnRef(primary_name)); @@ -40,19 +41,20 @@ class AggregateClusterTest : public testing::Test { int degraded_hosts, int unhealthy_hosts, uint32_t priority) { Upstream::HostVector hosts; for (int i = 0; i < healthy_hosts; ++i) { - hosts.emplace_back(Upstream::makeTestHost(cluster, "tcp://127.0.0.1:80", 1, priority)); + hosts.emplace_back( + Upstream::makeTestHost(cluster, "tcp://127.0.0.1:80", simTime(), 1, priority)); } for (int i = 0; i < degraded_hosts; ++i) { Upstream::HostSharedPtr host = - Upstream::makeTestHost(cluster, "tcp://127.0.0.2:80", 1, priority); + Upstream::makeTestHost(cluster, "tcp://127.0.0.2:80", simTime(), 1, priority); host->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); hosts.emplace_back(host); } for (int i = 0; i < unhealthy_hosts; ++i) { Upstream::HostSharedPtr host = - Upstream::makeTestHost(cluster, "tcp://127.0.0.3:80", 1, priority); + Upstream::makeTestHost(cluster, "tcp://127.0.0.3:80", simTime(), 1, priority); host->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC); hosts.emplace_back(host); } @@ -171,7 +173,8 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { // Cluster 2: // Priority 0: 33.3% // Priority 1: 33.3% - Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80", simTime()); EXPECT_CALL(primary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(host)); EXPECT_CALL(secondary_load_balancer_, chooseHost(_)).WillRepeatedly(Return(nullptr)); @@ -220,7 +223,8 @@ TEST_F(AggregateClusterTest, LoadBalancerTest) { TEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) { initialize(default_yaml_config_); - Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80", simTime()); // Set up the HostSet with 0 healthy, 0 degraded and 2 unhealthy. setupPrimary(0, 0, 0, 2); setupPrimary(1, 0, 0, 2); @@ -258,7 +262,8 @@ TEST_F(AggregateClusterTest, AllHostAreUnhealthyTest) { TEST_F(AggregateClusterTest, ClusterInPanicTest) { initialize(default_yaml_config_); - Upstream::HostSharedPtr host = Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80", simTime()); setupPrimary(0, 1, 0, 4); setupPrimary(1, 1, 0, 4); setupSecondary(0, 1, 0, 4); @@ -341,7 +346,7 @@ TEST_F(AggregateClusterTest, ContextDeterminePriorityLoad) { const uint32_t invalid_priority = 42; Upstream::HostSharedPtr host = - Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80", 1, invalid_priority); + Upstream::makeTestHost(primary_info_, "tcp://127.0.0.1:80", simTime(), 1, invalid_priority); // The linearized priorities are [P0, P1, S0, S1]. Upstream::HealthyAndDegradedLoad secondary_priority_1{Upstream::HealthyLoad({0, 0, 0, 100}), diff --git a/test/extensions/clusters/aggregate/cluster_update_test.cc b/test/extensions/clusters/aggregate/cluster_update_test.cc index b706e38dfbfc..9a7f25b8a8d6 100644 --- a/test/extensions/clusters/aggregate/cluster_update_test.cc +++ b/test/extensions/clusters/aggregate/cluster_update_test.cc @@ -30,7 +30,7 @@ envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV2Yaml(const std::stri return bootstrap; } -class AggregateClusterUpdateTest : public testing::Test { +class AggregateClusterUpdateTest : public Event::TestUsingSimulatedTime, public testing::Test { public: AggregateClusterUpdateTest() : http_context_(stats_store_.symbolTable()), grpc_context_(stats_store_.symbolTable()) {} @@ -51,7 +51,6 @@ class AggregateClusterUpdateTest : public testing::Test { NiceMock factory_; Upstream::ThreadLocalCluster* cluster_; - Event::SimulatedTimeSystem time_system_; NiceMock validation_context_; std::unique_ptr cluster_manager_; AccessLog::MockAccessLogManager log_manager_; @@ -140,11 +139,14 @@ TEST_F(AggregateClusterUpdateTest, LoadBalancingTest) { EXPECT_NE(nullptr, secondary); // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy. - Upstream::HostSharedPtr host1 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host1 = + Upstream::makeTestHost(primary->info(), "tcp://127.0.0.1:80", simTime()); host1->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); - Upstream::HostSharedPtr host2 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.2:80"); + Upstream::HostSharedPtr host2 = + Upstream::makeTestHost(primary->info(), "tcp://127.0.0.2:80", simTime()); host2->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC); - Upstream::HostSharedPtr host3 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.3:80"); + Upstream::HostSharedPtr host3 = + Upstream::makeTestHost(primary->info(), "tcp://127.0.0.3:80", simTime()); Upstream::Cluster& cluster = cluster_manager_->activeClusters().find("primary")->second; cluster.prioritySet().updateHosts( 0, @@ -154,11 +156,14 @@ TEST_F(AggregateClusterUpdateTest, LoadBalancingTest) { nullptr, {host1, host2, host3}, {}, 100); // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy. - Upstream::HostSharedPtr host4 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.4:80"); + Upstream::HostSharedPtr host4 = + Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.4:80", simTime()); host4->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); - Upstream::HostSharedPtr host5 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.5:80"); + Upstream::HostSharedPtr host5 = + Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.5:80", simTime()); host5->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC); - Upstream::HostSharedPtr host6 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.6:80"); + Upstream::HostSharedPtr host6 = + Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.6:80", simTime()); Upstream::Cluster& cluster1 = cluster_manager_->activeClusters().find("secondary")->second; cluster1.prioritySet().updateHosts( 0, @@ -192,11 +197,14 @@ TEST_F(AggregateClusterUpdateTest, LoadBalancingTest) { EXPECT_EQ(nullptr, cluster_manager_->get("primary")); // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy. - Upstream::HostSharedPtr host7 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.7:80"); + Upstream::HostSharedPtr host7 = + Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.7:80", simTime()); host7->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); - Upstream::HostSharedPtr host8 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.8:80"); + Upstream::HostSharedPtr host8 = + Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.8:80", simTime()); host8->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC); - Upstream::HostSharedPtr host9 = Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.9:80"); + Upstream::HostSharedPtr host9 = + Upstream::makeTestHost(secondary->info(), "tcp://127.0.0.9:80", simTime()); cluster1.prioritySet().updateHosts( 1, Upstream::HostSetImpl::partitionHosts( @@ -275,11 +283,14 @@ TEST_F(AggregateClusterUpdateTest, InitializeAggregateClusterAfterOtherClusters) EXPECT_EQ("127.0.0.1:80", host->address()->asString()); // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy. - Upstream::HostSharedPtr host1 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.1:80"); + Upstream::HostSharedPtr host1 = + Upstream::makeTestHost(primary->info(), "tcp://127.0.0.1:80", simTime()); host1->healthFlagSet(Upstream::HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); - Upstream::HostSharedPtr host2 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.2:80"); + Upstream::HostSharedPtr host2 = + Upstream::makeTestHost(primary->info(), "tcp://127.0.0.2:80", simTime()); host2->healthFlagSet(Upstream::HostImpl::HealthFlag::FAILED_ACTIVE_HC); - Upstream::HostSharedPtr host3 = Upstream::makeTestHost(primary->info(), "tcp://127.0.0.3:80"); + Upstream::HostSharedPtr host3 = + Upstream::makeTestHost(primary->info(), "tcp://127.0.0.3:80", simTime()); Upstream::Cluster& cluster = cluster_manager_->activeClusters().find("primary")->second; cluster.prioritySet().updateHosts( 0, diff --git a/test/extensions/clusters/redis/BUILD b/test/extensions/clusters/redis/BUILD index ecbb05097215..99c5d54c3ecb 100644 --- a/test/extensions/clusters/redis/BUILD +++ b/test/extensions/clusters/redis/BUILD @@ -83,6 +83,7 @@ envoy_extension_cc_test( "//test/mocks/ssl:ssl_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/mocks/upstream:cluster_info_mocks", + "//test/test_common:simulated_time_system_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/extensions/clusters/redis/redis_cluster_lb_test.cc b/test/extensions/clusters/redis/redis_cluster_lb_test.cc index bf1deed4b92a..a0ed75aedf1f 100644 --- a/test/extensions/clusters/redis/redis_cluster_lb_test.cc +++ b/test/extensions/clusters/redis/redis_cluster_lb_test.cc @@ -7,6 +7,7 @@ #include "test/common/upstream/utility.h" #include "test/mocks/common.h" #include "test/mocks/upstream/cluster_info.h" +#include "test/test_common/simulated_time_system.h" using testing::Return; @@ -37,7 +38,7 @@ class TestLoadBalancerContext : public RedisLoadBalancerContext, NetworkFilters::Common::Redis::Client::ReadPolicy read_policy_; }; -class RedisClusterLoadBalancerTest : public testing::Test { +class RedisClusterLoadBalancerTest : public Event::TestUsingSimulatedTime, public testing::Test { public: RedisClusterLoadBalancerTest() = default; @@ -102,9 +103,9 @@ TEST_F(RedisClusterLoadBalancerTest, NoHost) { // Works correctly with empty context TEST_F(RedisClusterLoadBalancerTest, NoHash) { - Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:92")}; + Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:92", simTime())}; ClusterSlotsPtr slots = std::make_unique>(std::vector{ ClusterSlot(0, 1000, hosts[0]->address()), @@ -123,9 +124,9 @@ TEST_F(RedisClusterLoadBalancerTest, NoHash) { }; TEST_F(RedisClusterLoadBalancerTest, Basic) { - Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:92")}; + Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:92", simTime())}; ClusterSlotsPtr slots = std::make_unique>(std::vector{ ClusterSlot(0, 1000, hosts[0]->address()), @@ -149,10 +150,10 @@ TEST_F(RedisClusterLoadBalancerTest, Basic) { TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesHealthy) { Upstream::HostVector hosts{ - Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), - Upstream::makeTestHost(info_, "tcp://127.0.0.2:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.2:91"), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.2:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.2:91", simTime()), }; ClusterSlotsPtr slots = std::make_unique>(std::vector{ @@ -191,10 +192,10 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesHealthy) { TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyPrimary) { Upstream::HostVector hosts{ - Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), - Upstream::makeTestHost(info_, "tcp://127.0.0.2:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.2:91"), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.2:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.2:91", simTime()), }; ClusterSlotsPtr slots = std::make_unique>(std::vector{ @@ -238,10 +239,10 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyPrimary) { TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyReplica) { Upstream::HostVector hosts{ - Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), - Upstream::makeTestHost(info_, "tcp://127.0.0.2:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.2:91"), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.2:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.2:91", simTime()), }; ClusterSlotsPtr slots = std::make_unique>(std::vector{ @@ -284,8 +285,8 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesUnhealthyReplica) { } TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesNoReplica) { - Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91")}; + Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime())}; ClusterSlotsPtr slots = std::make_unique>(std::vector{ ClusterSlot(0, 2000, hosts[0]->address()), @@ -316,8 +317,8 @@ TEST_F(RedisClusterLoadBalancerTest, ReadStrategiesNoReplica) { } TEST_F(RedisClusterLoadBalancerTest, ClusterSlotUpdate) { - Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91")}; + Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime())}; ClusterSlotsPtr slots = std::make_unique>(std::vector{ ClusterSlot(0, 1000, hosts[0]->address()), ClusterSlot(1001, 16383, hosts[1]->address())}); Upstream::HostMap all_hosts{{hosts[0]->address()->asString(), hosts[0]}, @@ -347,9 +348,9 @@ TEST_F(RedisClusterLoadBalancerTest, ClusterSlotUpdate) { } TEST_F(RedisClusterLoadBalancerTest, ClusterSlotNoUpdate) { - Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:91"), - Upstream::makeTestHost(info_, "tcp://127.0.0.1:92")}; + Upstream::HostVector hosts{Upstream::makeTestHost(info_, "tcp://127.0.0.1:90", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:91", simTime()), + Upstream::makeTestHost(info_, "tcp://127.0.0.1:92", simTime())}; ClusterSlotsPtr slots = std::make_unique>(std::vector{ ClusterSlot(0, 1000, hosts[0]->address()), diff --git a/test/extensions/filters/http/fault/fault_filter_integration_test.cc b/test/extensions/filters/http/fault/fault_filter_integration_test.cc index 0a1a013b6bfc..836192f63488 100644 --- a/test/extensions/filters/http/fault/fault_filter_integration_test.cc +++ b/test/extensions/filters/http/fault/fault_filter_integration_test.cc @@ -1,4 +1,5 @@ #include "test/integration/http_protocol_integration.h" +#include "test/test_common/simulated_time_system.h" #include "gtest/gtest.h" diff --git a/test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2 b/test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2 index 90806cc7076c..085413792af7 100644 --- a/test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2 +++ b/test/extensions/filters/network/kafka/broker/integration_test/envoy_config_yaml.j2 @@ -8,22 +8,25 @@ static_resources: - filters: - name: kafka typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker + "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker stat_prefix: testfilter - name: tcp typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: ingress_tcp cluster: localinstallation clusters: - name: localinstallation connect_timeout: 0.25s - type: strict_dns - lb_policy: round_robin - hosts: - - socket_address: - address: 127.0.0.1 - port_value: {{ data['kafka_real_port'] }} + load_assignment: + cluster_name: localinstallation + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: {{ data['kafka_real_port'] }} admin: access_log_path: /dev/null profile_path: /dev/null diff --git a/test/extensions/filters/network/kafka/serialization_test.cc b/test/extensions/filters/network/kafka/serialization_test.cc index 2cc656926d64..c7e28d14ca87 100644 --- a/test/extensions/filters/network/kafka/serialization_test.cc +++ b/test/extensions/filters/network/kafka/serialization_test.cc @@ -101,10 +101,12 @@ TEST(VarUInt32Deserializer, ShouldDeserializeEdgeValues) { Buffer::OwnedImpl buffer; // when + const uint32_t expected_size = encoder.computeCompactSize(values[i]); const uint32_t written = encoder.encodeCompact(values[i], buffer); // then ASSERT_EQ(written, i + 1); + ASSERT_EQ(written, expected_size); absl::string_view data = {getRawData(buffer), 1024}; // All bits in lower bytes need to be set. for (auto j = 0; j + 1 < i; ++j) { @@ -434,6 +436,17 @@ TEST(NullableCompactArrayDeserializer, ShouldConsumeNullArray) { NullableCompactArrayDeserializer>(value); } +TEST(NullableCompactArrayDeserializer, ShouldConsumeCorrectAmountOfDataForLargeInput) { + std::vector raw; + raw.reserve(4096); + for (int32_t i = 0; i < 4096; ++i) { + raw.push_back(i); + } + const NullableArray value{raw}; + serializeCompactThenDeserializeAndCheckEquality< + NullableCompactArrayDeserializer>(value); +} + // Tagged fields. TEST(TaggedFieldDeserializer, ShouldConsumeCorrectAmountOfData) { diff --git a/test/extensions/filters/network/kafka/serialization_utilities.h b/test/extensions/filters/network/kafka/serialization_utilities.h index e9fb2a4491b8..5c252ab4a870 100644 --- a/test/extensions/filters/network/kafka/serialization_utilities.h +++ b/test/extensions/filters/network/kafka/serialization_utilities.h @@ -114,12 +114,16 @@ void serializeCompactThenDeserializeAndCheckEqualityInOneGo(AT expected) { Buffer::OwnedImpl buffer; EncodingContext encoder{-1}; + const uint32_t expected_written_size = encoder.computeCompactSize(expected); const uint32_t written = encoder.encodeCompact(expected, buffer); + ASSERT_EQ(written, expected_written_size); // Insert garbage after serialized payload. const uint32_t garbage_size = encoder.encode(Bytes(10000), buffer); + const char* raw_buffer_ptr = + reinterpret_cast(buffer.linearize(written + garbage_size)); // Tell parser that there is more data, it should never consume more than written. - const absl::string_view orig_data = {getRawData(buffer), written + garbage_size}; + const absl::string_view orig_data = {raw_buffer_ptr, written + garbage_size}; absl::string_view data = orig_data; // when @@ -147,11 +151,16 @@ void serializeCompactThenDeserializeAndCheckEqualityWithChunks(AT expected) { Buffer::OwnedImpl buffer; EncodingContext encoder{-1}; + const uint32_t expected_written_size = encoder.computeCompactSize(expected); const uint32_t written = encoder.encodeCompact(expected, buffer); + ASSERT_EQ(written, expected_written_size); // Insert garbage after serialized payload. const uint32_t garbage_size = encoder.encode(Bytes(10000), buffer); - const absl::string_view orig_data = {getRawData(buffer), written + garbage_size}; + const char* raw_buffer_ptr = + reinterpret_cast(buffer.linearize(written + garbage_size)); + // Tell parser that there is more data, it should never consume more than written. + const absl::string_view orig_data = {raw_buffer_ptr, written + garbage_size}; // when absl::string_view data = orig_data; diff --git a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc index 3470b4c34818..45013b1b2dd5 100644 --- a/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/rocketmq_proxy/conn_manager_test.cc @@ -39,7 +39,7 @@ class TestConfigImpl : public ConfigImpl { RocketmqFilterStats stats_; }; -class RocketmqConnectionManagerTest : public testing::Test { +class RocketmqConnectionManagerTest : public Event::TestUsingSimulatedTime, public testing::Test { public: RocketmqConnectionManagerTest() : stats_(RocketmqFilterStats::generateStats("test.", store_)) {} @@ -93,7 +93,8 @@ class RocketmqConnectionManagerTest : public testing::Test { std::shared_ptr cluster_info_{ new NiceMock()}; - Upstream::HostSharedPtr host_{Upstream::makeTestHost(cluster_info_, "tcp://127.0.0.1:80")}; + Upstream::HostSharedPtr host_{ + Upstream::makeTestHost(cluster_info_, "tcp://127.0.0.1:80", simTime())}; Upstream::PrioritySetImpl priority_set_; NiceMock thread_local_cluster_; }; diff --git a/test/extensions/filters/network/thrift_proxy/BUILD b/test/extensions/filters/network/thrift_proxy/BUILD index 0d842f34a5ac..24eb2f193703 100644 --- a/test/extensions/filters/network/thrift_proxy/BUILD +++ b/test/extensions/filters/network/thrift_proxy/BUILD @@ -333,7 +333,6 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_name = "envoy.filters.network.thrift_proxy", - tags = ["fails_on_windows"], deps = [ ":integration_lib", ":utility_lib", @@ -350,7 +349,6 @@ envoy_extension_cc_test( "//test/extensions/filters/network/thrift_proxy/driver:generate_fixture", ], extension_name = "envoy.filters.network.thrift_proxy", - tags = ["fails_on_windows"], deps = [ ":integration_lib", ":utility_lib", diff --git a/test/extensions/filters/network/thrift_proxy/driver/BUILD b/test/extensions/filters/network/thrift_proxy/driver/BUILD index 4e5d0f47d1d4..e27580ed82cd 100644 --- a/test/extensions/filters/network/thrift_proxy/driver/BUILD +++ b/test/extensions/filters/network/thrift_proxy/driver/BUILD @@ -1,5 +1,6 @@ load("@rules_python//python:defs.bzl", "py_binary") load("//bazel:envoy_build_system.bzl", "envoy_package") +load("@thrift_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 @@ -23,6 +24,7 @@ py_binary( "//test/extensions/filters/network/thrift_proxy/driver/finagle:finagle_lib", "//test/extensions/filters/network/thrift_proxy/driver/generated/example:example_lib", "@com_github_twitter_common_rpc//:twitter_common_rpc", + requirement("six"), ], ) @@ -35,5 +37,6 @@ py_binary( "//test/extensions/filters/network/thrift_proxy/driver/finagle:finagle_lib", "//test/extensions/filters/network/thrift_proxy/driver/generated/example:example_lib", "@com_github_twitter_common_rpc//:twitter_common_rpc", + requirement("six"), ], ) diff --git a/test/extensions/filters/network/thrift_proxy/driver/client.py b/test/extensions/filters/network/thrift_proxy/driver/client.py index f323cd7f3a49..544e5acff16a 100755 --- a/test/extensions/filters/network/thrift_proxy/driver/client.py +++ b/test/extensions/filters/network/thrift_proxy/driver/client.py @@ -14,6 +14,11 @@ from fbthrift import THeaderTransport from twitter.common.rpc.finagle.protocol import TFinagleProtocol +# On Windows we run this test on Python3 +if sys.version_info[0] != 2: + sys.stdin.reconfigure(encoding='utf-8') + sys.stdout.reconfigure(encoding='utf-8') + class TRecordingTransport(TTransport.TTransportBase): diff --git a/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh b/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh index a4c58cb798d8..6304d28c8314 100755 --- a/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh +++ b/test/extensions/filters/network/thrift_proxy/driver/generate_fixture.sh @@ -7,20 +7,11 @@ set -e function usage() { - echo "Usage: $0 -s [multiplex-service] -H [headers] method [param...]" + echo "Usage: $0 -s [multiplex-service] -H [headers] -T [TempPath] method [param...]" echo "where mode is success, exception, or idl-exception" exit 1 } -FIXTURE_DIR="${TEST_TMPDIR}" -mkdir -p "${FIXTURE_DIR}" - -DRIVER_DIR="${TEST_SRCDIR}/envoy/test/extensions/filters/network/thrift_proxy/driver" - -if [[ -z "${TEST_UDSDIR}" ]]; then - TEST_UDSDIR=$(mktemp -d /tmp/envoy_test_thrift.XXXXXX) -fi - MODE="$1" TRANSPORT="$2" PROTOCOL="$3" @@ -35,7 +26,8 @@ fi MULTIPLEX= HEADERS= -while getopts ":s:H:" opt; do +TEST_TMPDIR= +while getopts ":s:H:T:" opt; do case ${opt} in s) MULTIPLEX=$OPTARG @@ -43,7 +35,9 @@ while getopts ":s:H:" opt; do H) HEADERS=$OPTARG ;; - + T) + TEST_TMPDIR=$OPTARG + ;; \?) echo "Invalid Option: -$OPTARG" >&2 exit 1 @@ -62,11 +56,33 @@ if [[ "${METHOD}" == "" ]]; then fi shift -SOCKET="${TEST_UDSDIR}/fixture.sock" -rm -f "${SOCKET}" +FIXTURE_DIR="${TEST_TMPDIR}" +mkdir -p "${FIXTURE_DIR}" + +DRIVER_DIR="${TEST_SRCDIR}/envoy/test/extensions/filters/network/thrift_proxy/driver" + +# On UNIX python supports AF_UNIX socket which are more reliable and efficient for communication +# between the client and the server, so we use it. On Windows, we find a random unused port +# on and let the communication happen via TCP. +SOCKET="" +if [[ "$OSTYPE" == "msys" ]]; then + while + port=$(shuf -n 1 -i 49152-65535) + netstat -atn | grep -q "$port" >> /dev/null + do + continue + done + SOCKET="127.0.0.1:${port}" +else + if [[ -z "${TEST_UDSDIR}" ]]; then + TEST_UDSDIR=$(mktemp -d /tmp/envoy_test_thrift.XXXXXX) + fi + SOCKET="${TEST_UDSDIR}/fixture.sock" + rm -f "${SOCKET}" +fi +echo "Using address ${SOCKET}" SERVICE_FLAGS=("--addr" "${SOCKET}" - "--unix" "--response" "${MODE}" "--transport" "${TRANSPORT}" "--protocol" "${PROTOCOL}") @@ -83,26 +99,41 @@ else fi # start server -"${DRIVER_DIR}/server" "${SERVICE_FLAGS[@]}" & -SERVER_PID="$!" +if [[ "$OSTYPE" == "msys" ]]; then + echo "${SERVICE_FLAGS[@]}" + "${DRIVER_DIR}/server.exe" "${SERVICE_FLAGS[@]}" & + SERVER_PID="$!" +else + SERVICE_FLAGS+=("--unix") + "${DRIVER_DIR}/server" "${SERVICE_FLAGS[@]}" & + SERVER_PID="$!" + while [[ ! -a "${SOCKET}" ]]; do + sleep 0.1 + + if ! kill -0 "${SERVER_PID}"; then + echo "server failed to start" + exit 1 + fi + done +fi trap 'kill ${SERVER_PID}' EXIT; -while [[ ! -a "${SOCKET}" ]]; do - sleep 0.1 - - if ! kill -0 "${SERVER_PID}"; then - echo "server failed to start" - exit 1 - fi -done +CLIENT_COMMAND="" +if [[ "$OSTYPE" == "msys" ]]; then + CLIENT_COMMAND="${DRIVER_DIR}/client.exe" +else + CLIENT_COMMAND="${DRIVER_DIR}/client" +fi if [[ -n "$HEADERS" ]]; then SERVICE_FLAGS+=("--headers") SERVICE_FLAGS+=("$HEADERS") fi -"${DRIVER_DIR}/client" "${SERVICE_FLAGS[@]}" \ - --request "${REQUEST_FILE}" \ - --response "${RESPONSE_FILE}" \ - "${METHOD}" "$@" +echo "${METHOD}" "$@" + +$CLIENT_COMMAND "${SERVICE_FLAGS[@]}" \ + --request "${REQUEST_FILE}" \ + --response "${RESPONSE_FILE}" \ + "${METHOD}" "$@" diff --git a/test/extensions/filters/network/thrift_proxy/driver/server.py b/test/extensions/filters/network/thrift_proxy/driver/server.py index e4d5a5c7cc5b..650280919eaa 100755 --- a/test/extensions/filters/network/thrift_proxy/driver/server.py +++ b/test/extensions/filters/network/thrift_proxy/driver/server.py @@ -15,6 +15,11 @@ from fbthrift import THeaderTransport from finagle import TFinagleServerProcessor, TFinagleServerProtocol +# On Windows we run this test on Python3 +if sys.version_info[0] != 2: + sys.stdin.reconfigure(encoding='utf-8') + sys.stdout.reconfigure(encoding='utf-8') + class SuccessHandler: diff --git a/test/extensions/filters/network/thrift_proxy/integration.cc b/test/extensions/filters/network/thrift_proxy/integration.cc index 62f33950f474..cd907b4e084a 100644 --- a/test/extensions/filters/network/thrift_proxy/integration.cc +++ b/test/extensions/filters/network/thrift_proxy/integration.cc @@ -80,6 +80,10 @@ void BaseThriftIntegrationTest::preparePayloads(const PayloadOptions& options, args.push_back(absl::StrJoin(headers, ",")); } + auto temp_path = TestEnvironment::temporaryDirectory(); + args.push_back("-T"); + args.push_back(temp_path); + args.push_back(options.method_name_); std::copy(options.method_args_.begin(), options.method_args_.end(), std::back_inserter(args)); diff --git a/test/extensions/filters/network/thrift_proxy/integration_test.cc b/test/extensions/filters/network/thrift_proxy/integration_test.cc index 2766bf61d540..c3ea956fb23f 100644 --- a/test/extensions/filters/network/thrift_proxy/integration_test.cc +++ b/test/extensions/filters/network/thrift_proxy/integration_test.cc @@ -401,6 +401,9 @@ INSTANTIATE_TEST_SUITE_P(FramedTwitter, ThriftTwitterConnManagerIntegrationTest, // Because of the protocol upgrade requests and the difficulty of separating them, we test this // protocol independently. TEST_P(ThriftTwitterConnManagerIntegrationTest, Success) { +// This test relies on an old Apache Thrift Python package +// that is only available in Python2. Disabling the test on Windows. +#ifndef WIN32 initializeCall(DriverMode::Success); uint32_t upgrade_request_size = request_bytes_.peekBEInt() + 4; @@ -457,6 +460,7 @@ TEST_P(ThriftTwitterConnManagerIntegrationTest, Success) { EXPECT_EQ(2U, counter->value()); counter = test_server_->counter("thrift.thrift_stats.response_success"); EXPECT_EQ(2U, counter->value()); +#endif } } // namespace ThriftProxy diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 47e37851b7fe..62fbf4395ba5 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -34,7 +34,8 @@ namespace HealthCheckers { namespace RedisHealthChecker { class RedisHealthCheckerTest - : public testing::Test, + : public Event::TestUsingSimulatedTime, + public testing::Test, public Extensions::NetworkFilters::Common::Redis::Client::ClientFactory { public: RedisHealthCheckerTest() @@ -268,7 +269,8 @@ class RedisHealthCheckerTest } void exerciseStubs() { - Upstream::HostSharedPtr host = Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:100"); + Upstream::HostSharedPtr host = + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:100", simTime()); RedisHealthChecker::RedisActiveHealthCheckSessionPtr session = std::make_unique(*health_checker_, host); @@ -308,7 +310,7 @@ TEST_F(RedisHealthCheckerTest, PingWithAuth) { setupWithAuth(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -356,7 +358,7 @@ TEST_F(RedisHealthCheckerTest, ExistsWithAuth) { setupExistsHealthcheckWithAuth(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -402,7 +404,7 @@ TEST_F(RedisHealthCheckerTest, PingAndVariousFailures) { exerciseStubs(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -470,7 +472,7 @@ TEST_F(RedisHealthCheckerTest, FailuresLogging) { setupAlwaysLogHealthCheckFailures(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -528,7 +530,7 @@ TEST_F(RedisHealthCheckerTest, LogInitialFailure) { setup(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -579,7 +581,7 @@ TEST_F(RedisHealthCheckerTest, DEPRECATED_FEATURE_TEST(ExistsDeprecated)) { setupExistsHealthcheckDeprecated(false); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -631,7 +633,7 @@ TEST_F(RedisHealthCheckerTest, Exists) { setupExistsHealthcheck(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -683,7 +685,7 @@ TEST_F(RedisHealthCheckerTest, ExistsRedirected) { setupExistsHealthcheck(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); @@ -727,7 +729,7 @@ TEST_F(RedisHealthCheckerTest, NoConnectionReuse) { setupDontReuseConnection(); cluster_->prioritySet().getMockHostSet(0)->hosts_ = { - Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80")}; + Upstream::makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; expectSessionCreate(); expectClientCreate(); diff --git a/test/extensions/stats_sinks/common/statsd/statsd_test.cc b/test/extensions/stats_sinks/common/statsd/statsd_test.cc index 26cd52fa7348..94842c061d1a 100644 --- a/test/extensions/stats_sinks/common/statsd/statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/statsd_test.cc @@ -33,7 +33,7 @@ namespace Common { namespace Statsd { namespace { -class TcpStatsdSinkTest : public testing::Test { +class TcpStatsdSinkTest : public Event::TestUsingSimulatedTime, public testing::Test { public: TcpStatsdSinkTest() { sink_ = std::make_unique( @@ -46,7 +46,7 @@ class TcpStatsdSinkTest : public testing::Test { Upstream::MockHost::MockCreateConnectionData conn_info; conn_info.connection_ = connection_; conn_info.host_description_ = Upstream::makeTestHost( - std::make_unique>(), "tcp://127.0.0.1:80"); + std::make_unique>(), "tcp://127.0.0.1:80", simTime()); EXPECT_CALL(cluster_manager_, tcpConnForCluster_("fake_cluster", _)) .WillOnce(Return(conn_info)); diff --git a/test/integration/BUILD b/test/integration/BUILD index ea3b24a6c0a5..2496c954792e 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -609,9 +609,11 @@ envoy_cc_test_library( "//source/common/http:codec_client_lib", "//source/common/stats:isolated_store_lib", "//test/common/upstream:utility_lib", + "//test/mocks/event:event_mocks", "//test/mocks/upstream:cluster_info_mocks", "//test/test_common:network_utility_lib", "//test/test_common:printers_lib", + "//test/test_common:simulated_time_system_lib", "//test/test_common:test_time_lib", ], ) @@ -712,6 +714,7 @@ envoy_cc_test_library( "//test/mocks/server:transport_socket_factory_context_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", + "//test/test_common:simulated_time_system_lib", "//test/test_common:test_time_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/api/v2:pkg_cc_proto", @@ -1552,7 +1555,6 @@ envoy_cc_test( "//source/extensions/filters/network/tcp_proxy:config", "//test/config:utility_lib", "//test/test_common:logging_lib", - "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", ], ) diff --git a/test/integration/clusters/custom_static_cluster.cc b/test/integration/clusters/custom_static_cluster.cc index cb073630cdd6..a3f4397ee982 100644 --- a/test/integration/clusters/custom_static_cluster.cc +++ b/test/integration/clusters/custom_static_cluster.cc @@ -27,7 +27,7 @@ Upstream::HostSharedPtr CustomStaticCluster::makeHost() { std::make_shared(info()->metadata()), 1, envoy::config::core::v3::Locality::default_instance(), envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), priority_, - envoy::config::core::v3::UNKNOWN)}; + envoy::config::core::v3::UNKNOWN, time_source_)}; } Upstream::ThreadAwareLoadBalancerPtr CustomStaticCluster::threadAwareLb() { diff --git a/test/integration/clusters/custom_static_cluster.h b/test/integration/clusters/custom_static_cluster.h index 9691f99234ea..95820e4ad70f 100644 --- a/test/integration/clusters/custom_static_cluster.h +++ b/test/integration/clusters/custom_static_cluster.h @@ -27,7 +27,8 @@ class CustomStaticCluster : public Upstream::ClusterImplBase { Server::Configuration::TransportSocketFactoryContextImpl& factory_context, Stats::ScopePtr&& stats_scope, bool added_via_api, uint32_t priority, std::string address, uint32_t port) - : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api), + : ClusterImplBase(cluster, runtime, factory_context, std::move(stats_scope), added_via_api, + factory_context.dispatcher().timeSource()), priority_(priority), address_(std::move(address)), port_(port), host_(makeHost()) {} InitializePhase initializePhase() const override { return InitializePhase::Primary; } diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index 729cc09bdc3c..f53dde86700d 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -78,18 +78,18 @@ echo "Hot restart test using dynamic base id" TEST_INDEX=0 function run_testsuite() { - local BASE_ID BASE_ID_PATH HOT_RESTART_JSON="$1" FAKE_SYMBOL_TABLE="$2" + local BASE_ID BASE_ID_PATH HOT_RESTART_JSON="$1" local SOCKET_PATH=@envoy_domain_socket local SOCKET_MODE=0 - if [ -n "$3" ] && [ -n "$4" ] + if [ -n "$2" ] && [ -n "$3" ] then - SOCKET_PATH="$3" - SOCKET_MODE="$4" + SOCKET_PATH="$2" + SOCKET_MODE="$3" fi start_test validation check "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" --mode validate --service-cluster cluster \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --service-node node --disable-hot-restart + --service-node node --disable-hot-restart BASE_ID_PATH=$(mktemp 'envoy_test_base_id.XXXXXX') echo "Selected dynamic base id path ${BASE_ID_PATH}" @@ -101,8 +101,7 @@ function run_testsuite() { ADMIN_ADDRESS_PATH_0="${TEST_TMPDIR}"/admin.0."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" \ --restart-epoch 0 --use-dynamic-base-id --base-id-path "${BASE_ID_PATH}" \ - --service-cluster cluster --service-node node --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" \ - --admin-address-path "${ADMIN_ADDRESS_PATH_0}" \ + --service-cluster cluster --service-node node --admin-address-path "${ADMIN_ADDRESS_PATH_0}" \ --socket-path "${SOCKET_PATH}" --socket-mode "${SOCKET_MODE}" BASE_ID=$(cat "${BASE_ID_PATH}") @@ -140,22 +139,13 @@ function run_testsuite() { echo "The Envoy's hot restart version is ${CLI_HOT_RESTART_VERSION}" echo "Now checking that the above version is what we expected." check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] - - start_test "Checking for consistency of /hot_restart_version with --use-fake-symbol-table ${FAKE_SYMBOL_TABLE}" - CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" 2>&1) - CLI_HOT_RESTART_VERSION=$(strip_fake_symbol_table_warning "$CLI_HOT_RESTART_VERSION" "$FAKE_SYMBOL_TABLE") - EXPECTED_CLI_HOT_RESTART_VERSION="11.${SHARED_MEMORY_SIZE}" - check [ "${CLI_HOT_RESTART_VERSION}" = "${EXPECTED_CLI_HOT_RESTART_VERSION}" ] - + start_test "Checking for match of --hot-restart-version and admin /hot_restart_version" ADMIN_ADDRESS_0=$(cat "${ADMIN_ADDRESS_PATH_0}") echo "fetching hot restart version from http://${ADMIN_ADDRESS_0}/hot_restart_version ..." ADMIN_HOT_RESTART_VERSION=$(curl -sg "http://${ADMIN_ADDRESS_0}/hot_restart_version") echo "Fetched ADMIN_HOT_RESTART_VERSION is ${ADMIN_HOT_RESTART_VERSION}" - CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" 2>&1) - CLI_HOT_RESTART_VERSION=$(strip_fake_symbol_table_warning "$CLI_HOT_RESTART_VERSION" "$FAKE_SYMBOL_TABLE") + CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --base-id "${BASE_ID}" 2>&1) check [ "${ADMIN_HOT_RESTART_VERSION}" = "${CLI_HOT_RESTART_VERSION}" ] start_test "Checking server.hot_restart_generation 1" @@ -175,7 +165,7 @@ function run_testsuite() { ADMIN_ADDRESS_PATH_1="${TEST_TMPDIR}"/admin.1."${TEST_INDEX}".address run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 1 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_1}" \ + --admin-address-path "${ADMIN_ADDRESS_PATH_1}" \ --socket-path "${SOCKET_PATH}" --socket-mode "${SOCKET_MODE}" SERVER_1_PID=$BACKGROUND_PID @@ -216,7 +206,7 @@ function run_testsuite() { start_test "Starting epoch 2" run_in_background_saving_pid "${ENVOY_BIN}" -c "${UPDATED_HOT_RESTART_JSON}" \ --restart-epoch 2 --base-id "${BASE_ID}" --service-cluster cluster --service-node node \ - --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" --admin-address-path "${ADMIN_ADDRESS_PATH_2}" \ + --admin-address-path "${ADMIN_ADDRESS_PATH_2}" \ --parent-shutdown-time-s 3 \ --socket-path "${SOCKET_PATH}" --socket-mode "${SOCKET_MODE}" @@ -267,37 +257,14 @@ function run_testsuite() { wait "${SERVER_2_PID}" } -# TODO(#13399): remove this helper function and the references to it, as long as -# the references to $FAKE_SYMBOL_TABLE. -function strip_fake_symbol_table_warning() { - local INPUT="$1" - local FAKE_SYMBOL_TABLE="$2" - if [ "$FAKE_SYMBOL_TABLE" = "1" ]; then - echo "$INPUT" | grep -v "Fake symbol tables have been removed" - else - echo "$INPUT" - fi -} - # Hotrestart in abstract namespace for HOT_RESTART_JSON in "${JSON_TEST_ARRAY[@]}" do - # Run one of the tests with real symbol tables. No need to do all of them. - if [ "$TEST_INDEX" = "0" ]; then - run_testsuite "$HOT_RESTART_JSON" "0" || exit 1 - fi - - run_testsuite "$HOT_RESTART_JSON" "1" || exit 1 + run_testsuite "$HOT_RESTART_JSON" || exit 1 done # Hotrestart in specified UDS -# Real symbol tables are the default, so I had run just one with fake symbol tables -# (Switch the "0" and "1" in the second arg in the two run_testsuite calls below). -if [ "$TEST_INDEX" = "0" ]; then - run_testsuite "${HOT_RESTART_JSON_V4}" "0" "${SOCKET_DIR}/envoy_domain_socket" "600" || exit 1 -fi - -run_testsuite "${HOT_RESTART_JSON_V4}" "1" "${SOCKET_DIR}/envoy_domain_socket" "600" || exit 1 +run_testsuite "${HOT_RESTART_JSON_V4}" "${SOCKET_DIR}/envoy_domain_socket" "600" || exit 1 start_test "disabling hot_restart by command line." CLI_HOT_RESTART_VERSION=$("${ENVOY_BIN}" --hot-restart-version --disable-hot-restart 2>&1) @@ -308,8 +275,7 @@ start_test socket-mode for socket path run_in_background_saving_pid "${ENVOY_BIN}" -c "${HOT_RESTART_JSON}" \ --restart-epoch 0 --base-id 0 --base-id-path "${BASE_ID_PATH}" \ --socket-path "${SOCKET_DIR}"/envoy_domain_socket --socket-mode 644 \ - --service-cluster cluster --service-node node --use-fake-symbol-table "$FAKE_SYMBOL_TABLE" \ - --admin-address-path "${ADMIN_ADDRESS_PATH_0}" + --service-cluster cluster --service-node node --admin-address-path "${ADMIN_ADDRESS_PATH_0}" sleep 3 EXPECTED_SOCKET_MODE=$(stat -c '%a' "${SOCKET_DIR}"/envoy_domain_socket_parent_0) check [ "644" = "${EXPECTED_SOCKET_MODE}" ] diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 198ca37278d1..e57b2c11b048 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -227,7 +227,8 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( cluster->http2_options_ = http2_options.value(); cluster->http1_settings_.enable_trailers_ = true; Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription( - cluster, fmt::format("tcp://{}:80", Network::Test::getLoopbackAddressUrlString(version_)))}; + cluster, fmt::format("tcp://{}:80", Network::Test::getLoopbackAddressUrlString(version_)), + timeSystem())}; return std::make_unique(*dispatcher_, random_, std::move(conn), host_description, downstream_protocol_); } diff --git a/test/integration/utility.cc b/test/integration/utility.cc index f6d369e6ce6a..2796eb30b92c 100644 --- a/test/integration/utility.cc +++ b/test/integration/utility.cc @@ -90,7 +90,7 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt Event::DispatcherPtr dispatcher(api.allocateDispatcher("test_thread")); std::shared_ptr cluster{new NiceMock()}; Upstream::HostDescriptionConstSharedPtr host_description{ - Upstream::makeTestHostDescription(cluster, "tcp://127.0.0.1:80")}; + Upstream::makeTestHostDescription(cluster, "tcp://127.0.0.1:80", time_system)}; Http::CodecClientProd client( type, dispatcher->createClientConnection(addr, Network::Address::InstanceConstSharedPtr(), diff --git a/test/mocks/server/options.h b/test/mocks/server/options.h index 51eeadf3d3b2..d28fd47b143a 100644 --- a/test/mocks/server/options.h +++ b/test/mocks/server/options.h @@ -47,7 +47,6 @@ class MockOptions : public Options { MOCK_METHOD(bool, hotRestartDisabled, (), (const)); MOCK_METHOD(bool, signalHandlingEnabled, (), (const)); MOCK_METHOD(bool, mutexTracingEnabled, (), (const)); - MOCK_METHOD(bool, fakeSymbolTableEnabled, (), (const)); MOCK_METHOD(bool, cpusetThreadsEnabled, (), (const)); MOCK_METHOD(const std::vector&, disabledExtensions, (), (const)); MOCK_METHOD(Server::CommandLineOptionsPtr, toCommandLineOptions, (), (const)); diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index 95183622dbb7..e4e102359b63 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -98,6 +98,7 @@ class MockHostDescription : public HostDescription { MOCK_METHOD(const envoy::config::core::v3::Locality&, locality, (), (const)); MOCK_METHOD(uint32_t, priority, (), (const)); MOCK_METHOD(void, priority, (uint32_t)); + MOCK_METHOD(MonotonicTime, creationTime, (), (const)); Stats::StatName localityZoneStatName() const override { Stats::SymbolTable& symbol_table = *symbol_table_; locality_zone_stat_name_ = @@ -192,6 +193,7 @@ class MockHost : public Host { MOCK_METHOD(uint32_t, priority, (), (const)); MOCK_METHOD(void, priority, (uint32_t)); MOCK_METHOD(bool, warmed, (), (const)); + MOCK_METHOD(MonotonicTime, creationTime, (), (const)); testing::NiceMock cluster_; Network::TransportSocketFactoryPtr socket_factory_; diff --git a/test/server/options_impl_test.cc b/test/server/options_impl_test.cc index bf22008af5a3..e618bb634349 100644 --- a/test/server/options_impl_test.cc +++ b/test/server/options_impl_test.cc @@ -94,7 +94,7 @@ TEST_F(OptionsImplTest, All) { "--log-path " "/foo/bar " "--disable-hot-restart --cpuset-threads --allow-unknown-static-fields " - "--reject-unknown-dynamic-fields --use-fake-symbol-table 0 --base-id 5 " + "--reject-unknown-dynamic-fields --base-id 5 " "--use-dynamic-base-id --base-id-path /foo/baz " "--socket-path /foo/envoy_domain_socket --socket-mode 644"); EXPECT_EQ(Server::Mode::Validate, options->mode()); @@ -118,7 +118,6 @@ TEST_F(OptionsImplTest, All) { EXPECT_TRUE(options->cpusetThreadsEnabled()); EXPECT_TRUE(options->allowUnknownStaticFields()); EXPECT_TRUE(options->rejectUnknownDynamicFields()); - EXPECT_FALSE(options->fakeSymbolTableEnabled()); EXPECT_EQ(5U, options->baseId()); EXPECT_TRUE(options->useDynamicBaseId()); EXPECT_EQ("/foo/baz", options->baseIdPath()); @@ -129,13 +128,6 @@ TEST_F(OptionsImplTest, All) { EXPECT_EQ(Server::Mode::InitOnly, options->mode()); } -// TODO(#13399): remove this test once we remove the option. -TEST_F(OptionsImplTest, FakeSymtabWarning) { - EXPECT_LOG_CONTAINS("warning", "Fake symbol tables have been removed", - createOptionsImpl("envoy --use-fake-symbol-table 1")); - EXPECT_NO_LOGS(createOptionsImpl("envoy --use-fake-symbol-table 0")); -} - // Either variants of allow-unknown-[static-]-fields works. TEST_F(OptionsImplTest, AllowUnknownFields) { { @@ -161,7 +153,6 @@ TEST_F(OptionsImplTest, SetAll) { bool hot_restart_disabled = options->hotRestartDisabled(); bool signal_handling_enabled = options->signalHandlingEnabled(); bool cpuset_threads_enabled = options->cpusetThreadsEnabled(); - bool fake_symbol_table_enabled = options->fakeSymbolTableEnabled(); options->setBaseId(109876); options->setConcurrency(42); @@ -189,7 +180,6 @@ TEST_F(OptionsImplTest, SetAll) { options->setCpusetThreads(!options->cpusetThreadsEnabled()); options->setAllowUnkownFields(true); options->setRejectUnknownFieldsDynamic(true); - options->setFakeSymbolTableEnabled(!options->fakeSymbolTableEnabled()); options->setSocketPath("/foo/envoy_domain_socket"); options->setSocketMode(0644); @@ -219,7 +209,6 @@ TEST_F(OptionsImplTest, SetAll) { EXPECT_EQ(!cpuset_threads_enabled, options->cpusetThreadsEnabled()); EXPECT_TRUE(options->allowUnknownStaticFields()); EXPECT_TRUE(options->rejectUnknownDynamicFields()); - EXPECT_EQ(!fake_symbol_table_enabled, options->fakeSymbolTableEnabled()); EXPECT_EQ("/foo/envoy_domain_socket", options->socketPath()); EXPECT_EQ(0644, options->socketMode()); @@ -293,14 +282,13 @@ TEST_F(OptionsImplTest, OptionsAreInSyncWithProto) { // Failure of this condition indicates that the server_info proto is not in sync with the options. // If an option is added/removed, please update server_info proto as well to keep it in sync. - // Currently the following 7 options are not defined in proto, hence the count differs by 7. + // Currently the following 6 options are not defined in proto, hence the count differs by 6. // 1. version - default TCLAP argument. // 2. help - default TCLAP argument. // 3. ignore_rest - default TCLAP argument. // 4. allow-unknown-fields - deprecated alias of allow-unknown-static-fields. - // 5. use-fake-symbol-table - short-term override for rollout of real symbol-table implementation. - // 6. hot restart version - print the hot restart version and exit. - const uint32_t options_not_in_proto = 6; + // 5. hot restart version - print the hot restart version and exit. + const uint32_t options_not_in_proto = 5; // There are two deprecated options: "max_stats" and "max_obj_name_len". const uint32_t deprecated_options = 2; diff --git a/test/server/server_corpus/grpc_illegal_characters b/test/server/server_corpus/grpc_illegal_characters new file mode 100644 index 000000000000..8309bb56e7d7 --- /dev/null +++ b/test/server/server_corpus/grpc_illegal_characters @@ -0,0 +1,64 @@ +cluster_manager { + load_stats_config { + api_type: GRPC + grpc_services { + google_grpc { + target_uri: "48?" + stat_prefix: "$" + } + initial_metadata { + key: "2" + value: "$$" + } + initial_metadata { + key: "2" + value: "2" + } + initial_metadata { + key: "2" + value: "$$" + } + initial_metadata { + key: "2" + value: "$$" + } + initial_metadata { + key: "2;" + value: "$$" + } + initial_metadata { + key: "1" + value: "$$" + } + initial_metadata { + key: "2" + value: "$$" + } + initial_metadata { + key: "2" + value: "$$" + } + initial_metadata { + key: "2" + value: "2" + } + initial_metadata { + key: "2" + value: "$$" + } + initial_metadata { + key: "4" + value: "$$" + } + initial_metadata { + key: "2" + value: "1" + } + initial_metadata { + key: "10" + value: "$$" + } + } + } +} +enable_dispatcher_stats: true \ No newline at end of file diff --git a/test/server/server_test.cc b/test/server/server_test.cc index aa097fc787cf..1db932a7c8c4 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -1411,6 +1411,16 @@ TEST_P(ServerInstanceImplTest, DisabledExtension) { ASSERT_TRUE(disabled_filter_found); } +TEST_P(ServerInstanceImplTest, NullProcessContextTest) { + // These are already the defaults. Repeated here for clarity. + process_object_ = nullptr; + process_context_ = nullptr; + + initialize("test/server/test_data/server/empty_bootstrap.yaml"); + ProcessContextOptRef context = server_->processContext(); + EXPECT_FALSE(context.has_value()); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/test_runner.cc b/test/test_runner.cc index 727268ffc4f1..e966cfcb15cd 100644 --- a/test/test_runner.cc +++ b/test/test_runner.cc @@ -152,7 +152,17 @@ int TestRunner::RunTests(int argc, char** argv) { file_logger = std::make_unique( TestEnvironment::getOptions().logPath(), access_log_manager, Logger::Registry::getSink()); } + +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + // Fuzz tests may run Envoy tests in fuzzing mode to generate corpora. In this case, we do not + // want to fail building the fuzz test because of a failed test run, which can happen when testing + // functionality in fuzzing test mode. Dependencies (like RE2) change behavior when in fuzzing + // mode, so we do not want to rely on a behavior test when generating a corpus. + (void)RUN_ALL_TESTS(); + return 0; +#else return RUN_ALL_TESTS(); +#endif } } // namespace Envoy diff --git a/tools/dependency/ossf_scorecard.py b/tools/dependency/ossf_scorecard.py new file mode 100755 index 000000000000..d15f97f9fde8 --- /dev/null +++ b/tools/dependency/ossf_scorecard.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 + +# Run OSSF Scorecard (https://github.com/ossf/scorecard) against Envoy dependencies. +# +# Usage: +# +# tools/dependency/ossf_scorecard.sh \ +# \ +# +# +# You will need to checkout and build the OSSF scorecard binary independently and supply it as a CLI +# argument. +# +# You will need to set a GitHub access token in the GITHUB_AUTH_TOKEN environment variable. You can +# generate personal access tokens under developer settings on GitHub. You should restrict the scope +# of the token to "repo: public_repo". +# +# The output is CSV suitable for import into Google Sheets. + +from collections import namedtuple +import csv +import json +import os +import subprocess as sp +import sys + +import utils + +Scorecard = namedtuple('Scorecard', [ + 'name', + 'contributors', + 'active', + 'ci_tests', + 'pull_requests', + 'code_review', + 'fuzzing', + 'security_policy', + 'releases', +]) + + +# Thrown on errors related to release date. +class OssfScorecardError(Exception): + pass + + +# We skip build, test, etc. +def IsScoredUseCategory(use_category): + return len( + set(use_category).intersection([ + 'dataplane_core', 'dataplane_ext', 'controlplane', 'observability_core', + 'observability_ext' + ])) > 0 + + +def Score(scorecard_path, repository_locations): + results = {} + for dep, metadata in sorted(repository_locations.items()): + if not IsScoredUseCategory(metadata['use_category']): + continue + results_key = metadata['project_name'] + formatted_name = '=HYPERLINK("%s", "%s")' % (metadata['project_url'], results_key) + github_project_url = utils.GetGitHubProjectUrl(metadata['urls']) + if not github_project_url: + na = 'Not Scorecard compatible' + results[results_key] = Scorecard(name=formatted_name, + contributors=na, + active=na, + ci_tests=na, + pull_requests=na, + code_review=na, + fuzzing=na, + security_policy=na, + releases=na) + continue + raw_scorecard = json.loads( + sp.check_output( + [scorecard_path, f'--repo={github_project_url}', '--show-details', '--format=json'])) + checks = {c['CheckName']: c for c in raw_scorecard['Checks']} + + # Generic check format. + def Format(key): + score = checks[key] + status = score['Pass'] + confidence = score['Confidence'] + return f'{status} ({confidence})' + + # Releases need to be extracted from Signed-Releases. + def ReleaseFormat(): + score = checks['Signed-Releases'] + if score['Pass']: + return Format('Signed-Releases') + details = score['Details'] + release_found = details is not None and any('release found:' in d for d in details) + if release_found: + return 'True (10)' + else: + return 'False (10)' + + results[results_key] = Scorecard(name=formatted_name, + contributors=Format('Contributors'), + active=Format('Active'), + ci_tests=Format('CI-Tests'), + pull_requests=Format('Pull-Requests'), + code_review=Format('Code-Review'), + fuzzing=Format('Fuzzing'), + security_policy=Format('Security-Policy'), + releases=ReleaseFormat()) + print(raw_scorecard) + print(results[results_key]) + return results + + +def PrintCsvResults(csv_output_path, results): + headers = Scorecard._fields + with open(csv_output_path, 'w') as f: + writer = csv.writer(f) + writer.writerow(headers) + for name in sorted(results): + writer.writerow(getattr(results[name], h) for h in headers) + + +if __name__ == '__main__': + if len(sys.argv) != 4: + print( + 'Usage: %s ' + % sys.argv[0]) + sys.exit(1) + access_token = os.getenv('GITHUB_AUTH_TOKEN') + if not access_token: + print('Missing GITHUB_AUTH_TOKEN') + sys.exit(1) + path = sys.argv[1] + scorecard_path = sys.argv[2] + csv_output_path = sys.argv[3] + spec_loader = utils.repository_locations_utils.load_repository_locations_spec + path_module = utils.LoadModule('repository_locations', path) + try: + results = Score(scorecard_path, spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC)) + PrintCsvResults(csv_output_path, results) + except OssfScorecardError as e: + print(f'An error occurred while processing {path}, please verify the correctness of the ' + f'metadata: {e}') diff --git a/tools/dependency/utils.py b/tools/dependency/utils.py index 15deb07576a4..7cd74c3f1f0e 100644 --- a/tools/dependency/utils.py +++ b/tools/dependency/utils.py @@ -30,6 +30,16 @@ def RepositoryLocations(): return locations +# Obtain GitHub project URL from a list of URLs. +def GetGitHubProjectUrl(urls): + for url in urls: + if not url.startswith('https://github.com/'): + continue + components = url.split('/') + return f'https://github.com/{components[3]}/{components[4]}' + return None + + # Information releated to a GitHub release version. GitHubRelease = namedtuple('GitHubRelease', ['organization', 'project', 'version', 'tagged'])