diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 151736a8f335..4efd687e546c 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -250,6 +250,10 @@ stages: steps: - bash: .azure-pipelines/cleanup.sh displayName: "Removing tools from agent" + - bash: | + echo "disk space at beginning of build:" + df -h + displayName: "Check disk space at beginning" - task: DownloadBuildArtifacts@0 inputs: buildType: current @@ -275,6 +279,11 @@ stages: AZP_SHA1: $(Build.SourceVersion) DOCKERHUB_USERNAME: $(DockerUsername) DOCKERHUB_PASSWORD: $(DockerPassword) + - bash: | + echo "disk space at end of build:" + df -h + displayName: "Check disk space at end" + condition: always() - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/build_images" diff --git a/.bazelrc b/.bazelrc index 2208bb55801b..dd242a8aacd1 100644 --- a/.bazelrc +++ b/.bazelrc @@ -193,8 +193,6 @@ build:remote --spawn_strategy=remote,sandboxed,local build:remote --strategy=Javac=remote,sandboxed,local build:remote --strategy=Closure=remote,sandboxed,local build:remote --strategy=Genrule=remote,sandboxed,local -# rules_rust is not remote runnable (yet) -build:remote --strategy=Rustc=sandboxed,local build:remote --remote_timeout=7200 build:remote --auth_enabled=true build:remote --remote_download_toplevel diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1bef2955c288..bafe92bb2d8a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -69,7 +69,7 @@ versioning guidelines: cause a configuration load failure, unless the feature in question is explicitly overridden in [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features) - config ([example](configs/using_deprecated_config.v2.yaml)). Finally, following the deprecation + config ([example](configs/using_deprecated_config.yaml)). Finally, following the deprecation of the API major version where the field was first marked deprecated, the entire implementation code will be removed from the Envoy implementation. * This policy means that organizations deploying master should have some time to get ready for diff --git a/OWNERS.md b/OWNERS.md index 995834c515ff..4adc81048c59 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -68,3 +68,5 @@ matter expert reviews. Feel free to loop them in as needed. * Redis, Python, configuration/operational questions. * Yuchen Dai ([lambdai](https://github.com/lambdai)) (lambdai@google.com) * v2 xDS, listeners, filter chain discovery service. +* Michael Payne ([moderation](https://github.com/moderation)) (m@m17e.org) + * External dependencies, Envoy's supply chain and documentation. diff --git a/api/bazel/external_deps.bzl b/api/bazel/external_deps.bzl index cd9b6759f98a..588879c4bd0a 100644 --- a/api/bazel/external_deps.bzl +++ b/api/bazel/external_deps.bzl @@ -2,12 +2,6 @@ load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locati # Envoy dependencies may be annotated with the following attributes: DEPENDENCY_ANNOTATIONS = [ - # List of the categories describing how the dependency is being used. This attribute is used - # for automatic tracking of security posture of Envoy's dependencies. - # Possible values are documented in the USE_CATEGORIES list below. - # This attribute is mandatory for each dependecy. - "use_category", - # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements @@ -15,6 +9,31 @@ DEPENDENCY_ANNOTATIONS = [ # This attribute is optional for components with use categories listed in the # USE_CATEGORIES_WITH_CPE_OPTIONAL "cpe", + + # A list of extensions when 'use_category' contains 'dataplane_ext' or 'observability_ext'. + "extensions", + + # Additional dependencies loaded transitively via this dependency that are not tracked in + # Envoy (see the external dependency at the given version for information). + "implied_untracked_deps", + + # When the dependency was last updated in Envoy. + "last_updated", + + # Project metadata. + "project_desc", + "project_name", + "project_url", + + # List of the categories describing how the dependency is being used. This attribute is used + # for automatic tracking of security posture of Envoy's dependencies. + # Possible values are documented in the USE_CATEGORIES list below. + # This attribute is mandatory for each dependecy. + "use_category", + + # The dependency version. This may be either a tagged release (preferred) + # or git SHA (as an exception when no release tagged version is suitable). + "version", ] # NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed @@ -38,6 +57,10 @@ USE_CATEGORIES = [ "other", # This dependency is used only in tests. "test_only", + # Documentation generation + "docs", + # Developer tools (not used in build or docs) + "devtools", ] # Components with these use categories are not required to specify the 'cpe' @@ -62,47 +85,43 @@ def load_repository_locations(repository_locations_spec): if "project_name" not in location: _fail_missing_attribute("project_name", key) - mutable_location.pop("project_name") if "project_desc" not in location: _fail_missing_attribute("project_desc", key) - mutable_location.pop("project_desc") if "project_url" not in location: _fail_missing_attribute("project_url", key) - project_url = mutable_location.pop("project_url") + project_url = location["project_url"] if not project_url.startswith("https://") and not project_url.startswith("http://"): fail("project_url must start with https:// or http://: " + project_url) if "version" not in location: _fail_missing_attribute("version", key) - mutable_location.pop("version") if "use_category" not in location: _fail_missing_attribute("use_category", key) - use_category = mutable_location.pop("use_category") + use_category = location["use_category"] if "dataplane_ext" in use_category or "observability_ext" in use_category: if "extensions" not in location: _fail_missing_attribute("extensions", key) - mutable_location.pop("extensions") if "last_updated" not in location: _fail_missing_attribute("last_updated", key) - last_updated = mutable_location.pop("last_updated") + last_updated = location["last_updated"] # Starlark doesn't have regexes. if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-": fail("last_updated must match YYYY-DD-MM: " + last_updated) if "cpe" in location: - cpe = mutable_location.pop("cpe") + cpe = location["cpe"] # Starlark doesn't have regexes. cpe_components = len(cpe.split(":")) - # We allow cpe:2.3:a:foo:* and cpe:2.3.:a:foo:bar:* only. - cpe_components_valid = cpe_components in [5, 6] + # We allow cpe:2.3:a:foo:*:* and cpe:2.3.:a:foo:bar:* only. + cpe_components_valid = (cpe_components == 6) cpe_matches = (cpe == "N/A" or (cpe.startswith("cpe:2.3:a:") and cpe.endswith(":*") and cpe_components_valid)) if not cpe_matches: fail("CPE must match cpe:2.3:a:::*: " + cpe) @@ -113,4 +132,9 @@ def load_repository_locations(repository_locations_spec): if category not in USE_CATEGORIES: fail("Unknown use_category value '" + category + "' for dependecy " + key) + # Remove any extra annotations that we add, so that we don't confuse http_archive etc. + for annotation in DEPENDENCY_ANNOTATIONS: + if annotation in mutable_location: + mutable_location.pop(annotation) + return locations diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index dab0eb1ce68f..aac1166f49fd 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -36,7 +36,7 @@ message ListenerCollection { repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -116,6 +116,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto index 12ba713d6c14..3ecfc7932b56 100644 --- a/api/envoy/config/listener/v3/listener_components.proto +++ b/api/envoy/config/listener/v3/listener_components.proto @@ -65,6 +65,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto index 3c9dced082b7..fbc65d0880f3 100644 --- a/api/envoy/config/listener/v4alpha/listener.proto +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -39,7 +39,7 @@ message ListenerCollection { repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -119,6 +119,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto index 4add5ee102ee..0c75f92b4027 100644 --- a/api/envoy/config/listener/v4alpha/listener_components.proto +++ b/api/envoy/config/listener/v4alpha/listener_components.proto @@ -65,6 +65,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // diff --git a/api/envoy/config/route/v3/route_components.proto b/api/envoy/config/route/v3/route_components.proto index 407f285310a5..595fde141e6c 100644 --- a/api/envoy/config/route/v3/route_components.proto +++ b/api/envoy/config/route/v3/route_components.proto @@ -1510,7 +1510,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1627,11 +1627,15 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { // The key to use in the descriptor entry. string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; @@ -1645,6 +1649,35 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1667,7 +1700,14 @@ message RateLimit { HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`metadata ` field + DynamicMetaData dynamic_metadata = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Rate limit on metadata. + MetaData metadata = 8; } } diff --git a/api/envoy/config/route/v4alpha/route_components.proto b/api/envoy/config/route/v4alpha/route_components.proto index 766d64ebedee..0bf0b493e956 100644 --- a/api/envoy/config/route/v4alpha/route_components.proto +++ b/api/envoy/config/route/v4alpha/route_components.proto @@ -1459,7 +1459,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1576,11 +1576,15 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; @@ -1597,6 +1601,42 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.MetaData"; + + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + + reserved 7; + + reserved "dynamic_metadata"; + oneof action_specifier { option (validate.required) = true; @@ -1618,8 +1658,8 @@ message RateLimit { // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; - // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // Rate limit on metadata. + MetaData metadata = 8; } } diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index bb0bcf815f43..dc02f5056cd0 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -37,6 +37,7 @@ def envoy_dependency_imports(go_version = GO_VERSION): }, ) + # These dependencies, like most of the Go in this repository, exist only for the API. go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", @@ -44,14 +45,12 @@ def envoy_dependency_imports(go_version = GO_VERSION): sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=", version = "v1.29.1", ) - go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", sum = "h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=", version = "v0.0.0-20190813141303-74dc4d7220e7", ) - go_repository( name = "org_golang_x_text", importpath = "golang.org/x/text", diff --git a/bazel/foreign_cc/BUILD b/bazel/foreign_cc/BUILD index 20c91b683b7c..c2a214747107 100644 --- a/bazel/foreign_cc/BUILD +++ b/bazel/foreign_cc/BUILD @@ -67,16 +67,13 @@ configure_make( # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, - "//bazel:windows_dbg_build": {"WINDOWS_DBG_BUILD": "debug"}, "//conditions:default": {}, }), lib_source = "@com_github_moonjit_moonjit//:all", make_commands = [], out_include_dir = "include/moonjit-2.2", - static_libraries = select({ - "//bazel:windows_x86_64": ["lua51.lib"], - "//conditions:default": ["libluajit-5.1.a"], - }), + static_libraries = ["libluajit-5.1.a"], + tags = ["skip_on_windows"], ) envoy_cmake_external( diff --git a/bazel/foreign_cc/luajit.patch b/bazel/foreign_cc/luajit.patch index b454b7dfd149..c0fb0da819fd 100644 --- a/bazel/foreign_cc/luajit.patch +++ b/bazel/foreign_cc/luajit.patch @@ -1,5 +1,5 @@ diff --git a/src/Makefile b/src/Makefile -index f56465d..5d91fa7 100644 +index e65b55e..f0a61dd 100644 --- a/src/Makefile +++ b/src/Makefile @@ -27,7 +27,7 @@ NODOTABIVER= 51 @@ -33,96 +33,96 @@ index f56465d..5d91fa7 100644 # # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. #XCFLAGS+= -DLUAJIT_DISABLE_JIT -@@ -111,7 +111,7 @@ XCFLAGS= - #XCFLAGS+= -DLUAJIT_NUMMODE=2 - # - # Enable GC64 mode for x64. --#XCFLAGS+= -DLUAJIT_ENABLE_GC64 -+XCFLAGS+= -DLUAJIT_ENABLE_GC64 - # - ############################################################################## - -@@ -587,7 +587,7 @@ endif - +@@ -591,7 +591,7 @@ endif + Q= @ E= @echo -#Q= +Q= #E= @: - - ############################################################################## -EOF ---- a/src/msvcbuild.bat 2020-08-13 18:42:05.667354300 +0000 -+++ b/src/msvcbuild.bat 2020-08-13 19:03:25.092297900 +0000 -@@ -14,7 +14,7 @@ - @if not defined INCLUDE goto :FAIL - @setlocal --@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline -+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT - @set LJLINK=link /nologo - @set LJMT=mt /nologo - @set LJLIB=lib /nologo /nodefaultlib -@@ -25,7 +25,7 @@ - @set LJLIBNAME=lua51.lib - @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c - --%LJCOMPILE% host\minilua.c -+%LJCOMPILE% /O2 host\minilua.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:minilua.exe minilua.obj - @if errorlevel 1 goto :BAD -@@ -48,7 +48,7 @@ - minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% - @if errorlevel 1 goto :BAD - --%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c -+%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:buildvm.exe buildvm*.obj - @if errorlevel 1 goto :BAD -@@ -72,24 +72,35 @@ - - @if "%1" neq "debug" goto :NODEBUG - @shift --@set LJCOMPILE=%LJCOMPILE% /Zi -+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 - @set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no -+@set LJCRTDBG=d -+@goto :ENDDEBUG - :NODEBUG -+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 -+@set LJLINK=%LJLINK% /release /incremental:no -+@set LJCRTDBG= -+:ENDDEBUG - @if "%1"=="amalg" goto :AMALGDLL - @if "%1"=="static" goto :STATIC --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :STATIC -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% - %LJCOMPILE% lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :AMALGDLL --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj - @if errorlevel 1 goto :BAD + ############################################################################## +diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat +index ae035dc..0e7eac9 100644 +--- a/src/msvcbuild.bat ++++ b/src/msvcbuild.bat +@@ -13,9 +13,7 @@ + @if not defined INCLUDE goto :FAIL + + @setlocal +-@rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK +-@set DEBUGCFLAGS= +-@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline ++@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT + @set LJLINK=link /nologo + @set LJMT=mt /nologo + @set LJLIB=lib /nologo /nodefaultlib +@@ -24,10 +22,9 @@ + @set DASC=vm_x64.dasc + @set LJDLLNAME=lua51.dll + @set LJLIBNAME=lua51.lib +-@set BUILDTYPE=release + @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c + +-%LJCOMPILE% host\minilua.c ++%LJCOMPILE% /O2 host\minilua.c + @if errorlevel 1 goto :BAD + %LJLINK% /out:minilua.exe minilua.obj + @if errorlevel 1 goto :BAD +@@ -51,7 +48,7 @@ if exist minilua.exe.manifest^ + minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% + @if errorlevel 1 goto :BAD + +-%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c ++%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c + @if errorlevel 1 goto :BAD + %LJLINK% /out:buildvm.exe buildvm*.obj + @if errorlevel 1 goto :BAD +@@ -75,26 +72,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c + + @if "%1" neq "debug" goto :NODEBUG + @shift +-@set BUILDTYPE=debug +-@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS% +-@set LJLINK=%LJLINK% /opt:ref /opt:icf /incremental:no ++@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 ++@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no ++@set LJCRTDBG=d ++@goto :ENDDEBUG + :NODEBUG +-@set LJLINK=%LJLINK% /%BUILDTYPE% ++@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 ++@set LJLINK=%LJLINK% /release /incremental:no ++@set LJCRTDBG= ++:ENDDEBUG + @if "%1"=="amalg" goto :AMALGDLL + @if "%1"=="static" goto :STATIC +-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c ++@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% ++%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c + @if errorlevel 1 goto :BAD + %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj + @if errorlevel 1 goto :BAD + @goto :MTDLL + :STATIC ++@shift ++@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% + %LJCOMPILE% lj_*.c lib_*.c + @if errorlevel 1 goto :BAD + %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj + @if errorlevel 1 goto :BAD + @goto :MTDLL + :AMALGDLL +-%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c ++@shift ++@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% ++%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c + @if errorlevel 1 goto :BAD + %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj + @if errorlevel 1 goto :BAD diff --git a/build.py b/build.py new file mode 100755 -index 0000000..9c71271 +index 0000000..3eb74ff --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ @@ -168,7 +168,7 @@ index 0000000..9c71271 + dst_dir = os.getcwd() + "/luajit" + shutil.copytree(src_dir, os.path.basename(src_dir)) + os.chdir(os.path.basename(src_dir) + "/src") -+ os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') ++ os.system('msvcbuild.bat ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') + os.makedirs(dst_dir + "/lib", exist_ok=True) + shutil.copy("lua51.lib", dst_dir + "/lib") + os.makedirs(dst_dir + "/include/luajit-2.1", exist_ok=True) diff --git a/bazel/foreign_cc/moonjit.patch b/bazel/foreign_cc/moonjit.patch index 99ac22fb04fe..5bb745875132 100644 --- a/bazel/foreign_cc/moonjit.patch +++ b/bazel/foreign_cc/moonjit.patch @@ -3,7 +3,7 @@ new file mode 100644 index 00000000..dab3606c --- /dev/null +++ b/build.py -@@ -0,0 +1,56 @@ +@@ -0,0 +1,39 @@ +#!/usr/bin/env python3 + +import argparse @@ -41,24 +41,7 @@ index 00000000..dab3606c + + os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + -+def win_main(): -+ src_dir = os.path.dirname(os.path.realpath(__file__)) -+ dst_dir = os.getcwd() + "/moonjit" -+ shutil.copytree(src_dir, os.path.basename(src_dir)) -+ os.chdir(os.path.basename(src_dir) + "/src") -+ os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') -+ os.makedirs(dst_dir + "/lib", exist_ok=True) -+ shutil.copy("lua51.lib", dst_dir + "/lib") -+ os.makedirs(dst_dir + "/include/moonjit-2.2", exist_ok=True) -+ for header in ["lauxlib.h", "luaconf.h", "lua.h", "lua.hpp", "luajit.h", "lualib.h"]: -+ shutil.copy(header, dst_dir + "/include/moonjit-2.2") -+ os.makedirs(dst_dir + "/bin", exist_ok=True) -+ shutil.copy("luajit.exe", dst_dir + "/bin") -+ -+if os.name == 'nt': -+ win_main() -+else: -+ main() ++main() + diff --git a/src/Makefile b/src/Makefile index dad9aeec..e10b3118 100644 @@ -104,78 +87,3 @@ index dad9aeec..e10b3118 100644 #E= @: ############################################################################## -diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat -index c2d2c212..71f24422 100644 ---- a/src/msvcbuild.bat -+++ b/src/msvcbuild.bat -@@ -15,7 +15,7 @@ - @setlocal - @rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK - @set DEBUGCFLAGS= --@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline -+@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT - @set LJLINK=link /nologo - @set LJMT=mt /nologo - @set LJLIB=lib /nologo /nodefaultlib -@@ -24,10 +24,9 @@ - @set DASC=vm_x86.dasc - @set LJDLLNAME=lua51.dll - @set LJLIBNAME=lua51.lib --@set BUILDTYPE=release - @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_utf8.c - --%LJCOMPILE% host\minilua.c -+%LJCOMPILE% /O2 host\minilua.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:minilua.exe minilua.obj - @if errorlevel 1 goto :BAD -@@ -50,7 +49,7 @@ if exist minilua.exe.manifest^ - minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% - @if errorlevel 1 goto :BAD - --%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c -+%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c - @if errorlevel 1 goto :BAD - %LJLINK% /out:buildvm.exe buildvm*.obj - @if errorlevel 1 goto :BAD -@@ -74,25 +73,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c - - @if "%1" neq "debug" goto :NODEBUG - @shift --@set BUILDTYPE=debug --@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS% -+@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 -+@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no -+@set LJCRTDBG=d -+@goto :ENDDEBUG - :NODEBUG --@set LJLINK=%LJLINK% /%BUILDTYPE% -+@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 -+@set LJLINK=%LJLINK% /release /incremental:no -+@set LJCRTDBG= -+:ENDDEBUG - @if "%1"=="amalg" goto :AMALGDLL - @if "%1"=="static" goto :STATIC --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :STATIC -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% - %LJCOMPILE% lj_*.c lib_*.c - @if errorlevel 1 goto :BAD - %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj - @if errorlevel 1 goto :BAD - @goto :MTDLL - :AMALGDLL --%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c -+@shift -+@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% -+%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c - @if errorlevel 1 goto :BAD - %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj - @if errorlevel 1 goto :BAD diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 3c76127ea3eb..3aafc9528d80 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -15,7 +15,7 @@ def _python_deps(): # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", # last_update = "2020-03-18" - # use_category = ["other"], + # use_category = ["devtools"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", ) pip3_import( @@ -63,7 +63,7 @@ def _python_deps(): # project_url = "https://clang.llvm.org/", # version = "10.0.1", # last_update = "2020-07-21" - # use_category = ["other"], + # use_category = ["devtools"], # cpe = "cpe:2.3:a:llvm:clang:*", ) pip3_import( @@ -75,7 +75,7 @@ def _python_deps(): # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", # last_update = "2020-03-18" - # use_category = ["other"], + # use_category = ["docs"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", ) pip3_import( @@ -87,14 +87,14 @@ def _python_deps(): # project_url = "http://thrift.apache.org/", # version = "0.11.0", # last_update = "2017-12-07" - # use_category = ["dataplane"], + # use_category = ["test"], # cpe = "cpe:2.3:a:apache:thrift:*", # project_name = "Six: Python 2 and 3 Compatibility Library", # project_url = "https://six.readthedocs.io/", # version = "1.15.0", # last_update = "2020-05-21" - # use_category = ["dataplane"], + # use_category = ["test"], ) # Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index da172933473c..6a631c5a3e6b 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -237,13 +237,15 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "LuaJIT", project_desc = "Just-In-Time compiler for Lua", project_url = "https://luajit.org", - version = "2.1.0-beta3", - sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", + # The last release version, 2.1.0-beta3 has a number of CVEs filed + # against it. These may not impact correct non-malicious Lua code, but for prudence we bump. + version = "1d8b747c161db457e032a023ebbff511f5de5ec2", + sha256 = "20a159c38a98ecdb6368e8d655343b6036622a29a1621da9dc303f7ed9bf37f3", strip_prefix = "LuaJIT-{version}", - urls = ["https://github.com/LuaJIT/LuaJIT/archive/v{version}.tar.gz"], + urls = ["https://github.com/LuaJIT/LuaJIT/archive/{version}.tar.gz"], + last_updated = "2020-10-13", use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.lua"], - last_updated = "2017-11-07", cpe = "cpe:2.3:a:luajit:luajit:*", ), com_github_moonjit_moonjit = dict( @@ -268,7 +270,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], - last_updated = "2020-06-02", + last_updated = "2020-06-03", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( @@ -521,8 +523,14 @@ REPOSITORY_LOCATIONS_SPEC = dict( version = "0.23.7", sha256 = "0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616", urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"], - use_category = ["build"], + use_category = ["build", "api"], last_updated = "2020-08-06", + implied_untracked_deps = [ + "com_github_golang_protobuf", + "io_bazel_rules_nogo", + "org_golang_google_protobuf", + "org_golang_x_tools", + ], ), rules_cc = dict( project_name = "C++ rules for Bazel", @@ -586,7 +594,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - cpe = "cpe:2.3:a:llvm:*", + cpe = "cpe:2.3:a:llvm:*:*", ), com_github_wavm_wavm = dict( project_name = "WAVM", @@ -822,8 +830,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "WebAssembly for Proxies (C++ host implementation)", project_desc = "WebAssembly for Proxies (C++ host implementation)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", - version = "49ed20e895b728aae6b811950a2939ecbaf76f7c", - sha256 = "fa03293d01450b9164f8f56ef9227301f7d1af4f373f996400f75c93f6ebc822", + version = "c5658d34979abece30882b1eeaa95b6ee965d825", + sha256 = "dc3a794424b7679c3dbcf23548e202aa01e9f9093791b95446b99e8524e03c4f", strip_prefix = "proxy-wasm-cpp-host-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -834,7 +842,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - last_updated = "2020-10-09", + last_updated = "2020-10-16", cpe = "N/A", ), # TODO: upgrade to the latest version (1.41 currently fails tests) @@ -853,14 +861,14 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Bazel rust rules", project_desc = "Bazel rust rules (used by Wasm)", project_url = "https://github.com/bazelbuild/rules_rust", - version = "fda9a1ce6482973adfda022cadbfa6b300e269c3", - sha256 = "484a2b2b67cd2d1fa1054876de7f8d291c4b203fd256bc8cbea14d749bb864ce", + version = "fb90a7484800157fbb8a5904fbeb608dc1effc0c", + sha256 = "cbb253b8c5ab1a3c1787790f900e7d6774e95ba038714fc0f710935e62f30f5f", # Last commit where "out_binary = True" works. # See: https://github.com/bazelbuild/rules_rust/issues/386 strip_prefix = "rules_rust-{version}", urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"], use_category = ["test_only"], - last_updated = "2020-10-09", + last_updated = "2020-10-15", ), rules_antlr = dict( project_name = "ANTLR Rules for Bazel", diff --git a/bazel/wasm/wasm.bzl b/bazel/wasm/wasm.bzl index 0e7a84da2e75..5a20b46837a1 100644 --- a/bazel/wasm/wasm.bzl +++ b/bazel/wasm/wasm.bzl @@ -115,7 +115,7 @@ def envoy_wasm_cc_binary(name, tags = [], **kwargs): wasm_cc_binary(name, tags, repository = "@envoy", **kwargs) def wasm_rust_binary(name, tags = [], **kwargs): - wasm_name = "_wasm_" + (name if not ".wasm" in name else name.strip(".wasm")) + wasm_name = "_wasm_" + name.replace(".", "_") kwargs.setdefault("visibility", ["//visibility:public"]) rust_binary( diff --git a/ci/Dockerfile-envoy b/ci/Dockerfile-envoy index eecb68be7d17..435250d08185 100644 --- a/ci/Dockerfile-envoy +++ b/ci/Dockerfile-envoy @@ -28,7 +28,7 @@ RUN mkdir -p /etc/envoy ARG ENVOY_BINARY_SUFFIX=_stripped ADD ${TARGETPLATFORM}/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ -ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +ADD configs/google_com_proxy.yaml /etc/envoy/envoy.yaml EXPOSE 10000 diff --git a/ci/Dockerfile-envoy-alpine b/ci/Dockerfile-envoy-alpine index de13be43162d..b7bfba617f80 100644 --- a/ci/Dockerfile-envoy-alpine +++ b/ci/Dockerfile-envoy-alpine @@ -1,7 +1,7 @@ FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 RUN mkdir -p /etc/envoy -ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml +ADD configs/google_com_proxy.yaml /etc/envoy/envoy.yaml RUN apk add --no-cache shadow su-exec \ && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows index b8f8f3c958d9..4b0db0161531 100644 --- a/ci/Dockerfile-envoy-windows +++ b/ci/Dockerfile-envoy-windows @@ -8,7 +8,7 @@ RUN setx path "%path%;c:\Program Files\envoy" ADD ["windows/amd64/envoy.exe", "C:/Program Files/envoy/"] RUN mkdir "C:\\ProgramData\\envoy" -ADD ["configs/google_com_proxy.v2.yaml", "C:/ProgramData/envoy/envoy.yaml"] +ADD ["configs/google_com_proxy.yaml", "C:/ProgramData/envoy/envoy.yaml"] # Replace temp path with Windows temp path RUN powershell -Command "(cat C:\ProgramData\envoy\envoy.yaml -raw) -replace '/tmp/','C:\Windows\Temp\' | Set-Content -Encoding Ascii C:\ProgramData\envoy\envoy.yaml" diff --git a/ci/README.md b/ci/README.md index 54dbaef534fe..ccef23fb5bf2 100644 --- a/ci/README.md +++ b/ci/README.md @@ -15,9 +15,9 @@ binary built from the latest tip of master that passed tests. ## Alpine Envoy image -Minimal images based on Alpine Linux allow for quicker deployment of Envoy. Two Alpine based images are built, -one with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols and one stripped of them (`envoyproxy/envoy-alpine`). -Both images are pushed with two different tags: `` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the +Minimal images based on Alpine Linux allow for quicker deployment of Envoy. The Alpine base image is only built with symbols stripped. +To get the binary with symbols, use the corresponding Ubuntu based debug image. The image is pushed with two different tags: +`` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the master commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of master that passed tests. ## Windows 2019 Envoy image diff --git a/ci/do_ci.sh b/ci/do_ci.sh index f3958aeaedf6..967e34558512 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -282,7 +282,7 @@ elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then "--define" "wasm=disabled" "--define" "path_normalization_by_default=true" "--define" "deprecated_features=disabled" - "--define" "use_new_codecs_in_integration_tests=true" + "--define" "use_new_codecs_in_integration_tests=false" "--define" "tcmalloc=gperftools" "--define" "zlib=ng") @@ -414,6 +414,8 @@ elif [[ "$CI_TARGET" == "docs" ]]; then # Validate dependency relationships between core/extensions and external deps. tools/dependency/validate_test.py tools/dependency/validate.py + # Validate the CVE scanner works. TODO(htuch): create a dedicated tools CI target. + python3.8 tools/dependency/cve_scan_test.py # Build docs. BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" docs/build.sh exit 0 diff --git a/ci/docker_ci.sh b/ci/docker_ci.sh index cafe0ee3ca86..3bd584923bdf 100755 --- a/ci/docker_ci.sh +++ b/ci/docker_ci.sh @@ -125,7 +125,7 @@ if is_windows; then BUILD_COMMAND=("build") else # "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. - BUILD_TYPES=("" "-debug" "-alpine" "-alpine-debug" "-google-vrp") + BUILD_TYPES=("" "-debug" "-alpine" "-google-vrp") # Configure docker-buildx tools BUILD_COMMAND=("buildx" "build") diff --git a/configs/Dockerfile b/configs/Dockerfile index 2d7b7a6a5e3b..ac1bc7aeece8 100644 --- a/configs/Dockerfile +++ b/configs/Dockerfile @@ -3,5 +3,5 @@ FROM envoyproxy/envoy-dev:latest RUN apt-get update -COPY google_com_proxy.v2.yaml /etc/envoy.yaml +COPY google_com_proxy.yaml /etc/envoy.yaml CMD /usr/local/bin/envoy -c /etc/envoy.yaml diff --git a/configs/access_log_format_helper.template.yaml b/configs/access_log_format_helper.template.yaml new file mode 100644 index 000000000000..9861a51e9bfb --- /dev/null +++ b/configs/access_log_format_helper.template.yaml @@ -0,0 +1,15 @@ +{% macro ingress_sampled_log() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"} +{% endmacro %} + +{% macro ingress_full() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n"} +{% endmacro %} + +{% macro egress_error_log() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n"} +{% endmacro %} + +{% macro egress_error_amazon_service() -%} + log_format: {text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n"} +{% endmacro %} diff --git a/configs/access_log_format_helper_v2.template.yaml b/configs/access_log_format_helper_v2.template.yaml deleted file mode 100644 index 7a5d711c088b..000000000000 --- a/configs/access_log_format_helper_v2.template.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{% macro ingress_sampled_log() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" -{% endmacro %} - -{% macro ingress_full() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" -{% endmacro %} - -{% macro egress_error_log() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n" -{% endmacro %} - -{% macro egress_error_amazon_service() -%} - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n" -{% endmacro %} diff --git a/configs/configgen.py b/configs/configgen.py index d5409c481a91..8f5e20cd562d 100755 --- a/configs/configgen.py +++ b/configs/configgen.py @@ -111,16 +111,16 @@ def generate_config(template_path, template, output_file, **context): # Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners, # as well as a listener for the double proxy to connect to via SSL client authentication. generate_config(SCRIPT_DIR, - 'envoy_front_proxy_v2.template.yaml', - '{}/envoy_front_proxy.v2.yaml'.format(OUT_DIR), + 'envoy_front_proxy.template.yaml', + '{}/envoy_front_proxy.yaml'.format(OUT_DIR), clusters=front_envoy_clusters, tracing=tracing_enabled) # Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners, # and backhauls the traffic to the main front proxy. generate_config(SCRIPT_DIR, - 'envoy_double_proxy_v2.template.yaml', - '{}/envoy_double_proxy.v2.yaml'.format(OUT_DIR), + 'envoy_double_proxy.template.yaml', + '{}/envoy_double_proxy.yaml'.format(OUT_DIR), tracing=tracing_enabled) # Generate a demo config for the service to service (local) proxy. This sets up several different @@ -132,14 +132,12 @@ def generate_config(template_path, template, output_file, **context): # that Envoy proxies to listens on its own port. # optional mongo ports: built from mongos_servers above. generate_config(SCRIPT_DIR, - 'envoy_service_to_service_v2.template.yaml', + 'envoy_service_to_service.template.yaml', '{}/envoy_service_to_service.yaml'.format(OUT_DIR), internal_virtual_hosts=service_to_service_envoy_clusters, external_virtual_hosts=external_virtual_hosts, mongos_servers=mongos_servers) -for google_ext in ['v2.yaml']: - shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR) - -shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.v3.yaml'), OUT_DIR) -shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.v3.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.yaml'), OUT_DIR) +shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.yaml'), OUT_DIR) diff --git a/configs/configgen.sh b/configs/configgen.sh index 2ef145c4af75..d68db9d46784 100755 --- a/configs/configgen.sh +++ b/configs/configgen.sh @@ -9,16 +9,20 @@ shift mkdir -p "$OUT_DIR/certs" mkdir -p "$OUT_DIR/lib" +mkdir -p "$OUT_DIR/protos" "$CONFIGGEN" "$OUT_DIR" for FILE in "$@"; do case "$FILE" in - *.pem) + *.pem|*.der) cp "$FILE" "$OUT_DIR/certs" ;; *.lua) cp "$FILE" "$OUT_DIR/lib" ;; + *.pb) + cp "$FILE" "$OUT_DIR/protos" + ;; *) FILENAME="$(echo "$FILE" | sed -e 's/.*examples\///g')" @@ -29,4 +33,4 @@ for FILE in "$@"; do done # tar is having issues with -C for some reason so just cd into OUT_DIR. -(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem lib/*.lua) +(cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem certs/*.der protos/*.pb lib/*.lua) diff --git a/configs/encapsulate_in_connect.v3.yaml b/configs/encapsulate_in_connect.yaml similarity index 100% rename from configs/encapsulate_in_connect.v3.yaml rename to configs/encapsulate_in_connect.yaml diff --git a/configs/envoy_double_proxy_v2.template.yaml b/configs/envoy_double_proxy.template.yaml similarity index 82% rename from configs/envoy_double_proxy_v2.template.yaml rename to configs/envoy_double_proxy.template.yaml index feb9f3e1f95f..aea9127c74f6 100644 --- a/configs/envoy_double_proxy_v2.template.yaml +++ b/configs/envoy_double_proxy.template.yaml @@ -11,7 +11,7 @@ transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -29,7 +29,7 @@ filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: router route_config: @@ -47,24 +47,23 @@ http_filters: - name: envoy.filters.http.health_check typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: false headers: - exact_match: /healthcheck name: :path - name: envoy.filters.http.buffer typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router typed_config: {} {% if tracing %} tracing: - operation_name: INGRESS provider: name: envoy.tracers.lightstep typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig + "@type": type.googleapis.com/envoy.config.trace.v3.LightstepConfig access_token_file: "/etc/envoy/lightstep_access_token" collector_cluster: lightstep_saas {% endif %} @@ -89,9 +88,10 @@ runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: /var/log/envoy/access_error.log - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" + log_format: + text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% if proxy_proto %} use_remote_address: true {%endif -%} @@ -141,7 +141,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: @@ -151,7 +151,7 @@ static_resources: validation_context: trusted_ca: filename: certs/cacert.pem - match_subject_alt_names: + match_subject_alt_names: exact: "front-proxy.yourcompany.net" http2_protocol_options: {} - name: lightstep_saas @@ -172,18 +172,18 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: filename: certs/cacert.pem - match_subject_alt_names: + match_subject_alt_names: exact: "collector-grpc.lightstep.com" flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.stat_sinks.statsd typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink tcp_cluster_name: statsd layered_runtime: layers: diff --git a/configs/envoy_front_proxy_v2.template.yaml b/configs/envoy_front_proxy.template.yaml similarity index 83% rename from configs/envoy_front_proxy_v2.template.yaml rename to configs/envoy_front_proxy.template.yaml index a9b9bc97f859..1dcb1e6f919f 100644 --- a/configs/envoy_front_proxy_v2.template.yaml +++ b/configs/envoy_front_proxy.template.yaml @@ -1,4 +1,4 @@ -{% import 'routing_helper_v2.template.yaml' as helper -%} +{% import 'routing_helper.template.yaml' as helper -%} {% macro router_file_content() -%}{% include kwargs['router_file'] -%}{% endmacro -%} {% macro listener(protocol, address, port_value, proxy_proto, tls, tracing) -%} name: not_required_for_static_listeners @@ -12,7 +12,7 @@ - transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: alpn_protocols: h2,http/1.1 tls_certificates: @@ -35,7 +35,7 @@ filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: router {% if proxy_proto -%} @@ -43,18 +43,18 @@ {%endif-%} stat_prefix: ingress_http route_config: - {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }} + {{ router_file_content(router_file='envoy_router.template.yaml')|indent(10) }} http_filters: - name: envoy.filters.http.health_check typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: false headers: - name: ":path" exact_match: "/healthcheck" - name: envoy.filters.http.buffer typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.ratelimit typed_config: @@ -70,11 +70,10 @@ add_user_agent: true {% if tracing %} tracing: - operation_name: INGRESS provider: name: envoy.tracers.lightstep typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig + "@type": type.googleapis.com/envoy.config.trace.v3.LightstepConfig collector_cluster: lightstep_saas access_token_file: "/etc/envoy/lightstep_access_token" {% endif %} @@ -99,9 +98,10 @@ runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/access_error.log" - format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" + log_format: + text_format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% endmacro -%} static_resources: listeners: diff --git a/configs/envoy_router_v2.template.yaml b/configs/envoy_router.template.yaml similarity index 93% rename from configs/envoy_router_v2.template.yaml rename to configs/envoy_router.template.yaml index 0d09269b6cab..338363af6c8c 100644 --- a/configs/envoy_router_v2.template.yaml +++ b/configs/envoy_router.template.yaml @@ -1,4 +1,4 @@ -{% import 'routing_helper_v2.template.yaml' as helper with context -%} +{% import 'routing_helper.template.yaml' as helper with context -%} name: local_route virtual_hosts: - name: www diff --git a/configs/envoy_service_to_service_v2.template.yaml b/configs/envoy_service_to_service.template.yaml similarity index 90% rename from configs/envoy_service_to_service_v2.template.yaml rename to configs/envoy_service_to_service.template.yaml index 4ef5ac9d1a05..9237d117f035 100644 --- a/configs/envoy_service_to_service_v2.template.yaml +++ b/configs/envoy_service_to_service.template.yaml @@ -1,5 +1,5 @@ -{% import 'routing_helper_v2.template.yaml' as helper -%} -{% import 'access_log_format_helper_v2.template.yaml' as access_log_helper -%} +{% import 'routing_helper.template.yaml' as helper -%} +{% import 'access_log_format_helper.template.yaml' as access_log_helper -%} {% macro ingress_listener(protocol, address, port_value) -%} - address: socket_address: @@ -11,7 +11,7 @@ - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: ingress_http route_config: @@ -35,7 +35,7 @@ http_filters: - name: envoy.filters.http.health_check typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: true headers: - name: ":path" @@ -43,7 +43,7 @@ cache_time: 2.5s - name: envoy.filters.http.buffer typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer + "@type": type.googleapis.com/envoy.extensions.filters.http.buffer.v3.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router typed_config: {} @@ -52,7 +52,7 @@ filter: not_health_check_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/ingress_http.log" {{ access_log_helper.ingress_full()|indent(10)}} - name: envoy.access_loggers.file @@ -81,7 +81,7 @@ runtime_key: access_log.access_error.duration - not_health_check_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/ingress_http_error.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} - name: envoy.access_loggers.file @@ -92,7 +92,7 @@ - runtime_filter: runtime_key: access_log.ingress_http typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/ingress_http_sampled.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} common_http_protocol_options: @@ -111,7 +111,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http route_config: @@ -149,7 +149,7 @@ static_resources: runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true @@ -177,7 +177,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http rds: @@ -210,7 +210,7 @@ static_resources: runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true @@ -239,7 +239,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: AUTO common_http_protocol_options: idle_timeout: 840s @@ -259,7 +259,7 @@ static_resources: retry_policy: retry_on: connect-failure {% if host.get('host_rewrite', False) %} - host_rewrite: "{{host['host_rewrite']}}" + host_rewrite_literal: "{{host['host_rewrite']}}" {% endif %} {% endfor %} http_filters: @@ -295,7 +295,7 @@ static_resources: runtime_key: access_log.access_error.duration {% endif %} typed_config: - "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog path: "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log" {% if mapping.get('is_amzn_service', False) -%} {{ access_log_helper.egress_error_amazon_service()|indent(10) }} @@ -315,12 +315,12 @@ static_resources: - filters: - name: envoy.filters.network.tcp_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: mongo_{{ key }} cluster: mongo_{{ key }} - name: envoy.filters.network.mongo_proxy typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mongo_proxy.v2.MongoProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mongo_proxy.v3.MongoProxy stat_prefix: "{{ key }}" access_log: "/var/log/envoy/mongo_{{ key }}.log" {% if value.get('ratelimit', False) %} @@ -346,7 +346,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: @@ -413,7 +413,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.main_website.com - name: local_service connect_timeout: 0.25s @@ -505,7 +505,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: @@ -552,7 +552,7 @@ flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.stat_sinks.statsd typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink tcp_cluster_name: statsd layered_runtime: layers: diff --git a/configs/freebind/freebind.yaml b/configs/freebind/freebind.yaml index 08214b8b044d..367e5ba3568a 100644 --- a/configs/freebind/freebind.yaml +++ b/configs/freebind/freebind.yaml @@ -17,7 +17,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route diff --git a/configs/google_com_proxy.v2.yaml b/configs/google_com_proxy.yaml similarity index 83% rename from configs/google_com_proxy.v2.yaml rename to configs/google_com_proxy.yaml index 01d0869ea0b5..32e79bb306a9 100644 --- a/configs/google_com_proxy.v2.yaml +++ b/configs/google_com_proxy.yaml @@ -17,7 +17,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -28,7 +28,7 @@ static_resources: - match: prefix: "/" route: - host_rewrite: www.google.com + host_rewrite_literal: www.google.com cluster: service_google http_filters: - name: envoy.filters.http.router @@ -51,5 +51,5 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.google.com diff --git a/configs/original-dst-cluster/proxy_config.yaml b/configs/original-dst-cluster/proxy_config.yaml index 9797daf46b26..b2e925957cda 100644 --- a/configs/original-dst-cluster/proxy_config.yaml +++ b/configs/original-dst-cluster/proxy_config.yaml @@ -8,7 +8,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_service diff --git a/configs/routing_helper_v2.template.yaml b/configs/routing_helper.template.yaml similarity index 100% rename from configs/routing_helper_v2.template.yaml rename to configs/routing_helper.template.yaml diff --git a/configs/terminate_connect.v3.yaml b/configs/terminate_connect.yaml similarity index 100% rename from configs/terminate_connect.v3.yaml rename to configs/terminate_connect.yaml diff --git a/configs/using_deprecated_config.v2.yaml b/configs/using_deprecated_config.yaml similarity index 89% rename from configs/using_deprecated_config.v2.yaml rename to configs/using_deprecated_config.yaml index 55ca2797acb9..a98e64f365b9 100644 --- a/configs/using_deprecated_config.v2.yaml +++ b/configs/using_deprecated_config.yaml @@ -17,7 +17,7 @@ static_resources: - filters: - name: envoy.filters.network.http_connection_manager typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route @@ -54,7 +54,7 @@ static_resources: transport_socket: name: envoy.transport_sockets.tls typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext + "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.google.com tracing: http: diff --git a/docs/BUILD b/docs/BUILD index 15db81818225..aad5c89f0b65 100644 --- a/docs/BUILD +++ b/docs/BUILD @@ -9,20 +9,24 @@ exports_files(["protodoc_manifest.yaml"]) envoy_package() -# TODO(phlax): fix failing/excluded configs -# the following config only fails on windows: -# dns-cache-circuit-breaker: "Error: unable to read file: /etc/ssl/certs/ca-certificates.crt" - filegroup( name = "configs", srcs = glob( - ["root/**/*.yaml"], + [ + "root/**/*.yaml", + "root/**/*.pb", + ], exclude = [ - "root/intro/_include/life-of-a-request.yaml", + # TODO(phlax/windows-dev): figure out how to get this working on windows + # "Error: unable to read file: /etc/ssl/certs/ca-certificates.crt" + "root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml", "root/intro/arch_overview/security/_include/ssl.yaml", + ], + ) + select({ + "//bazel:windows_x86_64": [], + "//conditions:default": [ "root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml", - "root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml", - "root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml", + "root/intro/arch_overview/security/_include/ssl.yaml", ], - ), + }), ) diff --git a/docs/build.sh b/docs/build.sh index ea46915b160e..c3f182a739c3 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -81,7 +81,7 @@ mkdir -p "${GENERATED_RST_DIR}"/intro/arch_overview/security ./docs/generate_extension_rst.py "${EXTENSION_DB_PATH}" "${GENERATED_RST_DIR}"/intro/arch_overview/security # Generate RST for external dependency docs in intro/arch_overview/security. -./docs/generate_external_dep_rst.py "${GENERATED_RST_DIR}"/intro/arch_overview/security +PYTHONPATH=. ./docs/generate_external_dep_rst.py "${GENERATED_RST_DIR}"/intro/arch_overview/security function generate_api_rst() { local proto_target diff --git a/docs/generate_external_dep_rst.py b/docs/generate_external_dep_rst.py index 3348df538837..07488d20831e 100755 --- a/docs/generate_external_dep_rst.py +++ b/docs/generate_external_dep_rst.py @@ -7,25 +7,7 @@ import sys import urllib.parse -from importlib.util import spec_from_loader, module_from_spec -from importlib.machinery import SourceFileLoader - - -# Shared Starlark/Python files must have a .bzl suffix for Starlark import, so -# we are forced to do this workaround. -def LoadModule(name, path): - spec = spec_from_loader(name, SourceFileLoader(name, path)) - module = module_from_spec(spec) - spec.loader.exec_module(module) - return module - - -envoy_repository_locations = LoadModule('envoy_repository_locations', - 'bazel/repository_locations.bzl') -api_repository_locations = LoadModule('api_repository_locations', - 'api/bazel/repository_locations.bzl') -repository_locations_utils = LoadModule('repository_locations_utils', - 'api/bazel/repository_locations_utils.bzl') +from tools.dependency import utils as dep_utils # Render a CSV table given a list of table headers, widths and list of rows @@ -110,10 +92,7 @@ def GetVersionUrl(metadata): Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe', 'last_updated']) use_categories = defaultdict(lambda: defaultdict(list)) # Bin rendered dependencies into per-use category lists. - spec_loader = repository_locations_utils.load_repository_locations_spec - spec = spec_loader(envoy_repository_locations.REPOSITORY_LOCATIONS_SPEC) - spec.update(spec_loader(api_repository_locations.REPOSITORY_LOCATIONS_SPEC)) - for k, v in spec.items(): + for k, v in dep_utils.RepositoryLocations().items(): cpe = v.get('cpe', '') if cpe == 'N/A': cpe = '' diff --git a/docs/requirements.txt b/docs/requirements.txt index e9cc323fb37f..c8e98061b50e 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -116,9 +116,9 @@ sphinxcontrib-jsmath==1.0.1 \ sphinxcontrib-qthelp==1.0.3 \ --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 -sphinxext-rediraffe==0.2.3 \ - --hash=sha256:3abc7f8c6c1fecb38e6613ffc1bfb7b0025e9cbb3929ed8aea6b20709571a69d \ - --hash=sha256:38e21589607c3149135fd94b87cdd28924fe3857e2755ff70ded948b4da26711 +sphinxext-rediraffe==0.2.4 \ + --hash=sha256:5428fb614d1fbc16964ba587aaa6b1c8ec92fd0b1d01bb6b369637446f43a27d \ + --hash=sha256:13e6474342df6643723976a3429edfc5e811e9f48b9f832c9fb6bdd9fe53fd83 sphinxcontrib-serializinghtml==1.1.4 \ --hash=sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc \ --hash=sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a diff --git a/docs/root/_include/ssl_stats.rst b/docs/root/_include/ssl_stats.rst new file mode 100644 index 000000000000..93f9b247a67e --- /dev/null +++ b/docs/root/_include/ssl_stats.rst @@ -0,0 +1,20 @@ +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + connection_error, Counter, Total TLS connection errors not including failed certificate verifications + handshake, Counter, Total successful TLS connection handshakes + session_reused, Counter, Total successful TLS session resumptions + no_certificate, Counter, Total successful TLS connections with no client certificate + fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate + fail_verify_error, Counter, Total TLS connections that failed CA verification + fail_verify_san, Counter, Total TLS connections that failed SAN verification + fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification + ocsp_staple_failed, Counter, Total TLS connections that failed compliance with the OCSP policy + ocsp_staple_omitted, Counter, Total TLS connections that succeeded without stapling an OCSP response + ocsp_staple_responses, Counter, Total TLS connections where a valid OCSP response was available (irrespective of whether the client requested stapling) + ocsp_staple_requests, Counter, Total TLS connections where the client requested an OCSP staple + ciphers., Counter, Total successful TLS connections that used cipher + curves., Counter, Total successful TLS connections that used ECDHE curve + sigalgs., Counter, Total successful TLS connections that used signature algorithm + versions., Counter, Total successful TLS connections that used protocol version diff --git a/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml b/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml index 0e7215933c4d..dcbd0d06ff63 100644 --- a/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml +++ b/docs/root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml @@ -31,7 +31,7 @@ static_resources: - match: prefix: "/route-with-filter-disabled" route: - host_rewrite: localhost + host_rewrite_literal: localhost cluster: grpc timeout: 5.00s # per_filter_config disables the filter for this route @@ -42,7 +42,7 @@ static_resources: - match: prefix: "/route-with-filter-enabled" route: - host_rewrite: localhost + host_rewrite_literal: localhost cluster: other timeout: 5.00s http_filters: diff --git a/docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml b/docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml index b2791037f7ee..f9c20ddcf2e9 100644 --- a/docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml +++ b/docs/root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml @@ -29,7 +29,7 @@ static_resources: - name: envoy.filters.http.grpc_json_transcoder typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder - proto_descriptor: "/tmp/envoy/proto.pb" + proto_descriptor: "protos/helloworld.pb" services: ["helloworld.Greeter"] print_options: add_whitespace: true diff --git a/docs/root/configuration/http/http_filters/_include/helloworld.pb b/docs/root/configuration/http/http_filters/_include/helloworld.pb new file mode 100644 index 000000000000..88eda67b2cd1 Binary files /dev/null and b/docs/root/configuration/http/http_filters/_include/helloworld.pb differ diff --git a/docs/root/configuration/http/http_filters/_include/helloworld.proto b/docs/root/configuration/http/http_filters/_include/helloworld.proto new file mode 100644 index 000000000000..9b5615252428 --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/helloworld.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package helloworld; + +import "google/api/annotations.proto"; + +// The greeting service definition. +service Greeter { + // Sends a greeting + rpc SayHello(HelloRequest) returns (HelloReply) { + option (google.api.http) = { + get: "/say" + }; + } +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst index 7969152ec85d..c89093b84658 100644 --- a/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst +++ b/docs/root/configuration/http/http_filters/grpc_json_transcoder_filter.rst @@ -29,17 +29,18 @@ To generate a protobuf descriptor set for the gRPC service, you'll also need to googleapis repository from GitHub before running protoc, as you'll need annotations.proto in your include path, to define the HTTP mapping. -.. code-block:: bash +.. code-block:: console - git clone https://github.com/googleapis/googleapis - GOOGLEAPIS_DIR= + $ git clone https://github.com/googleapis/googleapis + $ GOOGLEAPIS_DIR= -Then run protoc to generate the descriptor set from bookstore.proto: +Then run protoc to generate the descriptor set. For example using the test +:repo:`bookstore.proto ` provided in the Envoy repository: -.. code-block:: bash +.. code-block:: console - protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ - --descriptor_set_out=proto.pb test/proto/bookstore.proto + $ protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ + --descriptor_set_out=proto.pb test/proto/bookstore.proto If you have more than one proto source files, you can pass all of them in one command. @@ -56,19 +57,17 @@ For example, with the following proto example, the router will process `/hellowo as the path, so the route config prefix `/say` won't match requests to `SayHello`. If you want to match the incoming request path, set `match_incoming_request_route` to true. -.. code-block:: proto +.. literalinclude:: _include/helloworld.proto + :language: proto - package helloworld; +Assuming you have checked out the google APIs as described above, and have saved the proto file as +``protos/helloworld.proto`` you can build it with: + +.. code-block:: console + + $ protoc -I$(GOOGLEAPIS_DIR) -I. --include_imports --include_source_info \ + --descriptor_set_out=protos/helloworld.pb protos/helloworld.proto - // The greeting service definition. - service Greeter { - // Sends a greeting - rpc SayHello (HelloRequest) returns (HelloReply) { - option (google.api.http) = { - get: "/say" - }; - } - } Sending arbitrary content ------------------------- diff --git a/docs/root/configuration/http/http_filters/oauth2_filter.rst b/docs/root/configuration/http/http_filters/oauth2_filter.rst index bf10c1839e93..acbdda6780c7 100644 --- a/docs/root/configuration/http/http_filters/oauth2_filter.rst +++ b/docs/root/configuration/http/http_filters/oauth2_filter.rst @@ -14,36 +14,82 @@ OAuth2 Example configuration --------------------- -.. code-block:: - - http_filters: - - name: oauth2 - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 - token_endpoint: - cluster: oauth - uri: oauth.com/token - timeout: 3s - authorization_endpoint: https://oauth.com/oauth/authorize/ - redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" - redirect_path_matcher: - path: - exact: /callback - signout_path: - path: - exact: /signout - credentials: - client_id: foo - token_secret: - name: token - hmac_secret: - name: hmac +The following is an example configuring the filter. + +.. validated-code-block:: yaml + :type-name: envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + + config: + token_endpoint: + cluster: oauth + uri: oauth.com/token timeout: 3s - - name: envoy.router + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + credentials: + client_id: foo + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac.yaml" + +And the below code block is an example of how we employ it as one of +:ref:`HttpConnectionManager HTTP filters +` + +.. code-block:: yaml + + static_resources: + listeners: + - name: + address: + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + http_filters: + - name: envoy.filters.http.oauth2 + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.oauth2.v3alpha.OAuth2 + config: + token_endpoint: + cluster: oauth + uri: oauth.com/token + timeout: 3s + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + credentials: + client_id: foo + token_secret: + name: token + sds_config: + path: "/etc/envoy/token-secret.yaml" + hmac_secret: + name: hmac + sds_config: + path: "/etc/envoy/hmac.yaml" + - name: envoy.router clusters: - name: service - ... + # ... - name: auth connect_timeout: 5s type: LOGICAL_DNS @@ -53,21 +99,25 @@ Example configuration endpoints: - lb_endpoints: - endpoint: - address: { socket_address: { address: auth.example.com, port_value: 443 }} - tls_context: { sni: auth.example.com } + address: + socket_address: + address: auth.example.com + port_value: 443 + tls_context: + sni: auth.example.com Notes ----- -This module does not currently provide much Cross-Site-Request-Forgery protection for the redirect loop -to the OAuth server and back. +This module does not currently provide much Cross-Site-Request-Forgery protection for the redirect +loop to the OAuth server and back. The service must be served over HTTPS for this filter to work, as the cookies use `;secure`. Statistics ---------- -The OAuth filter outputs statistics in the *.* namespace. +The OAuth2 filter outputs statistics in the *.* namespace. .. csv-table:: :header: Name, Type, Description diff --git a/docs/root/configuration/listeners/lds.rst b/docs/root/configuration/listeners/lds.rst index a54c9ab89b0f..f5b8c778e95e 100644 --- a/docs/root/configuration/listeners/lds.rst +++ b/docs/root/configuration/listeners/lds.rst @@ -18,10 +18,15 @@ The semantics of listener updates are as follows: * Listeners are effectively constant once created. Thus, when a listener is updated, an entirely new listener is created (with the same listen socket). This listener goes through the same warming process described above for a newly added listener. -* When a listener is updated or removed, the old listener will be placed into a "draining" state +* When a listener is removed, the old listener will be placed into a "draining" state much like when the entire server is drained for restart. Connections owned by the listener will be gracefully closed (if possible) for some period of time before the listener is removed and any remaining connections are closed. The drain time is set via the :option:`--drain-time-s` option. +* When a tcp listener is updated, if the new listener contains a subset of filter chains in the old listener, + the connections owned by these overlapping filter chains remain open. Only the connections owned by the + removed filter chains will be drained following the above pattern. Note that if any global listener attributes are + changed, the entire listener (and all filter chains) are drained similar to removal above. See + :ref:`filter chain only update ` for detailed rules to reason about the impacted filter chains. .. note:: diff --git a/docs/root/configuration/listeners/stats.rst b/docs/root/configuration/listeners/stats.rst index eb82810f6972..4c1d032c13d9 100644 --- a/docs/root/configuration/listeners/stats.rst +++ b/docs/root/configuration/listeners/stats.rst @@ -22,22 +22,15 @@ Every listener has a statistics tree rooted at *listener.
.* with the fo downstream_pre_cx_active, Gauge, Sockets currently undergoing listener filter processing global_cx_overflow, Counter, Total connections rejected due to enforecement of the global connection limit no_filter_chain_match, Counter, Total connections that didn't match any filter chain - ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications - ssl.handshake, Counter, Total successful TLS connection handshakes - ssl.session_reused, Counter, Total successful TLS session resumptions - ssl.no_certificate, Counter, Total successful TLS connections with no client certificate - ssl.fail_verify_no_cert, Counter, Total TLS connections that failed because of missing client certificate - ssl.fail_verify_error, Counter, Total TLS connections that failed CA verification - ssl.fail_verify_san, Counter, Total TLS connections that failed SAN verification - ssl.fail_verify_cert_hash, Counter, Total TLS connections that failed certificate pinning verification - ssl.ocsp_staple_failed, Counter, Total TLS connections that failed compliance with the OCSP policy - ssl.ocsp_staple_omitted, Counter, Total TLS connections that succeeded without stapling an OCSP response - ssl.ocsp_staple_responses, Counter, Total TLS connections where a valid OCSP response was available (irrespective of whether the client requested stapling) - ssl.ocsp_staple_requests, Counter, Total TLS connections where the client requested an OCSP staple - ssl.ciphers., Counter, Total successful TLS connections that used cipher - ssl.curves., Counter, Total successful TLS connections that used ECDHE curve - ssl.sigalgs., Counter, Total successful TLS connections that used signature algorithm - ssl.versions., Counter, Total successful TLS connections that used protocol version + +.. _config_listener_stats_tls: + +TLS statistics +-------------- + +The following TLS statistics are rooted at *listener.
.ssl.*: + +.. include:: ../../_include/ssl_stats.rst .. _config_listener_stats_per_handler: diff --git a/docs/root/configuration/operations/runtime.rst b/docs/root/configuration/operations/runtime.rst index 2e72e52bb953..3ae9b3783f0e 100644 --- a/docs/root/configuration/operations/runtime.rst +++ b/docs/root/configuration/operations/runtime.rst @@ -248,7 +248,7 @@ envoy.deprecated_features:full_fieldname or envoy.deprecated_features:full_enum_ to true. For example, for a deprecated field ``Foo.Bar.Eep`` set ``envoy.deprecated_features:Foo.bar.Eep`` to ``true``. There is a production example using static runtime to allow both fail-by-default fields here: -:repo:`configs/using_deprecated_config.v2.yaml` +:repo:`configs/using_deprecated_config.yaml` Use of these override is **strongly discouraged** so please use with caution and switch to the new fields as soon as possible. Fatal-by-default configuration indicates that the removal of the old code paths is imminent. It is far better for both Envoy users and for Envoy contributors if any bugs or feature gaps diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index 74c9b495c588..874b9d9f28fa 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -219,6 +219,15 @@ are rooted at *cluster..* and contain the following statistics: external.upstream_rq_<\*>, Counter, External origin specific HTTP response codes external.upstream_rq_time, Histogram, External origin request time milliseconds +.. _config_cluster_manager_cluster_stats_tls: + +TLS statistics +-------------- + +If TLS is used by the cluster the following statistics are rooted at *cluster..ssl.*: + +.. include:: ../../../_include/ssl_stats.rst + .. _config_cluster_manager_cluster_stats_alt_tree: Alternate tree dynamic HTTP statistics diff --git a/docs/root/intro/_include/life-of-a-request.yaml b/docs/root/intro/_include/life-of-a-request.yaml index b3df4f05da9d..7006dbc24221 100644 --- a/docs/root/intro/_include/life-of-a-request.yaml +++ b/docs/root/intro/_include/life-of-a-request.yaml @@ -52,10 +52,10 @@ static_resources: path: "/foo" route: cluster: some_service - # CustomFilter and the HTTP router filter are the HTTP filter chain. - http_filters: - - name: some.customer.filter - - name: envoy.filters.http.router + # CustomFilter and the HTTP router filter are the HTTP filter chain. + http_filters: + # - name: some.customer.filter + - name: envoy.filters.http.router clusters: - name: some_service connect_timeout: 5s @@ -86,7 +86,7 @@ static_resources: # The rest of the configuration for statsd sink cluster. # statsd sink. stats_sinks: - - name: envoy.stat_sinks.statsd - typed_config: - "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink - tcp_cluster_name: some_statsd_cluster + - name: envoy.stat_sinks.statsd + typed_config: + "@type": type.googleapis.com/envoy.config.metrics.v3.StatsdSink + tcp_cluster_name: some_statsd_sink diff --git a/docs/root/intro/arch_overview/http/upgrades.rst b/docs/root/intro/arch_overview/http/upgrades.rst index bcf9f0a21fd9..4f9b62f7ded5 100644 --- a/docs/root/intro/arch_overview/http/upgrades.rst +++ b/docs/root/intro/arch_overview/http/upgrades.rst @@ -97,8 +97,8 @@ An example set up proxying SMTP would look something like this [SMTP Upstream] --- raw SMTP --- [L2 Envoy] --- SMTP tunneled over HTTP/2 --- [L1 Envoy] --- raw SMTP --- [Client] Examples of such a set up can be found in the Envoy example config :repo:`directory ` -If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.v3.yaml --base-id 1` -and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.v3.yaml` +If you run `bazel-bin/source/exe/envoy-static --config-path configs/encapsulate_in_connect.yaml --base-id 1` +and `bazel-bin/source/exe/envoy-static --config-path configs/terminate_connect.yaml` you will be running two Envoys, the first listening for TCP traffic on port 10000 and encapsulating it in an HTTP/2 CONNECT request, and the second listening for HTTP/2 on 10001, stripping the CONNECT headers, and forwarding the original TCP upstream, in this case to google.com. diff --git a/docs/root/intro/arch_overview/listeners/listeners_toc.rst b/docs/root/intro/arch_overview/listeners/listeners_toc.rst index 77c377c8cbe8..121304996fa0 100644 --- a/docs/root/intro/arch_overview/listeners/listeners_toc.rst +++ b/docs/root/intro/arch_overview/listeners/listeners_toc.rst @@ -6,6 +6,7 @@ Listeners listeners listener_filters + network_filter_chain network_filters tcp_proxy udp_proxy diff --git a/docs/root/intro/arch_overview/listeners/network_filter_chain.rst b/docs/root/intro/arch_overview/listeners/network_filter_chain.rst new file mode 100644 index 000000000000..d920deaf303d --- /dev/null +++ b/docs/root/intro/arch_overview/listeners/network_filter_chain.rst @@ -0,0 +1,30 @@ +.. _arch_overview_network_filter_chain: + +Network Filter Chain +==================== + +As discussed in the :ref:`listener ` section, network level (L3/L4) filters +form the core of Envoy connection handling. + +The network filters are chained in a ordered list known as :ref:`filter chain `. +Each listener has multiple filter chains and an optional :ref:`default filter chain `. +associated with each filter chain. If the best match filter chain cannot be found, the default filter chain will be +chosen to serve the request. If the default filter chain is not supplied, the connection will be closed. + +.. _filter_chain_only_update: + +Filter chain only update +------------------------ + +:ref:`Filter chains ` can be updated indepedently. Upon listener config +update, if the listener manager determines that the listener update is a filter chain only update, the listener update +will be executed by adding, updating and removing filter chains. The connections owned by these destroying filter chains will +be drained as described in listener drain. + +If the new :ref:`filter chain ` and the old :ref:`filter chain ` +is protobuf message equivalent, the corresponding filter chain runtime info survives. The connections owned by the +survived filter chains remain open. + +Not all the listener config updates can be executed by filter chain update. For example, if the listener metadata is +updated within the new listener config, the new metadata must be picked up by the new filter chains. In this case, the +entire listener is drained and updated. diff --git a/docs/root/intro/arch_overview/security/_include/ssl.yaml b/docs/root/intro/arch_overview/security/_include/ssl.yaml index 5d9e8ae82b63..8c74e56c8d93 100644 --- a/docs/root/intro/arch_overview/security/_include/ssl.yaml +++ b/docs/root/intro/arch_overview/security/_include/ssl.yaml @@ -5,15 +5,28 @@ static_resources: filter_chains: - filters: - name: envoy.filters.network.http_connection_manager - # ... + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + stat_prefix: ingress_http + route_config: + virtual_hosts: + - name: default + domains: "*" + routes: + - match: { prefix: "/" } + route: + cluster: some_service transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: + tls_certificates: + - certificate_chain: { filename: "certs/servercert.pem" } + private_key: { filename: "certs/serverkey.pem" } validation_context: trusted_ca: - filename: /usr/local/my-client-ca.crt + filename: certs/cacert.pem clusters: - name: some_service connect_timeout: 0.25s @@ -34,9 +47,9 @@ static_resources: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: { "filename": "/cert.crt" } - private_key: { "filename": "/cert.key" } - ocsp_response: { "filename": "/ocsp_response.der" } + certificate_chain: { "filename": "certs/servercert.pem" } + private_key: { "filename": "certs/serverkey.pem" } + ocsp_staple: { "filename": "certs/server_ocsp_resp.der" } validation_context: match_subject_alt_names: exact: "foo" diff --git a/docs/root/start/building.rst b/docs/root/start/building.rst index 39f5e23273d3..102ff52903e5 100644 --- a/docs/root/start/building.rst +++ b/docs/root/start/building.rst @@ -42,9 +42,6 @@ be found in the following repositories: binary with debug symbols on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine `_: Release binary with symbols stripped on top of a **glibc** alpine base. -* `envoyproxy/envoy-alpine-debug `_: - *Deprecated in favor of envoyproxy/envoy-debug.* Release binary with debug symbols on top of a - Release binary with debug symbols on top of a **glibc** alpine base. .. note:: @@ -59,9 +56,6 @@ be found in the following repositories: binary with debug symbols on top of an Ubuntu Bionic base. * `envoyproxy/envoy-alpine-dev `_: Release binary with symbols stripped on top of a **glibc** alpine base. -* `envoyproxy/envoy-alpine-debug-dev `_: - *Deprecated in favor of envoyproxy/envoy-debug-dev.* Release binary with debug symbols on top of a - **glibc** alpine base. In the above *dev* repositories, the *latest* tag points to the last Envoy SHA in master that passed tests. diff --git a/docs/root/start/install/ref_configs.rst b/docs/root/start/install/ref_configs.rst index 7aebc814da96..b7bb405175b2 100644 --- a/docs/root/start/install/ref_configs.rst +++ b/docs/root/start/install/ref_configs.rst @@ -23,9 +23,9 @@ source distribution includes a version of the configuration generator that uses have also included three example configuration templates for each of the above three scenarios. * Generator script: :repo:`configs/configgen.py` -* Service to service template: :repo:`configs/envoy_service_to_service_v2.template.yaml` -* Front proxy template: :repo:`configs/envoy_front_proxy_v2.template.yaml` -* Double proxy template: :repo:`configs/envoy_double_proxy_v2.template.yaml` +* Service to service template: :repo:`configs/envoy_service_to_service.template.yaml` +* Front proxy template: :repo:`configs/envoy_front_proxy.template.yaml` +* Double proxy template: :repo:`configs/envoy_double_proxy.template.yaml` To generate the example configurations run the following from the root of the repo: diff --git a/docs/root/start/start.rst b/docs/root/start/start.rst index 4d4529e2f260..79ddfc5acd0c 100644 --- a/docs/root/start/start.rst +++ b/docs/root/start/start.rst @@ -32,7 +32,7 @@ more detailed explanation of the configuration file and execution steps for the same configuration. A very minimal Envoy configuration that can be used to validate basic plain HTTP -proxying is available in :repo:`configs/google_com_proxy.v2.yaml`. This is not +proxying is available in :repo:`configs/google_com_proxy.yaml`. This is not intended to represent a realistic Envoy deployment: .. substitution-code-block:: none diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 0969e772395f..7a6f434f83b2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,7 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* +* build: the Alpine based debug images are no longer built in CI, use Ubuntu based images instead. * ext_authz filter: the deprecated field :ref:`use_alpha ` is no longer supported and cannot be set anymore. Bug Fixes @@ -16,6 +17,8 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * active http health checks: properly handles HTTP/2 GOAWAY frames from the upstream. Previously a GOAWAY frame due to a graceful listener drain could cause improper failed health checks due to streams being refused by the upstream on a connection that is going away. +* active health checks: health checks using a TLS transport socket and secrets delivered via :ref:`SDS ` will now wait until secrets are loaded before the first health check attempt. This should improve startup times by not having to wait for the :ref:`no_traffic_interval ` until the next attempt. + * http: sending CONNECT_ERROR for HTTP/2 where appropriate during CONNECT requests. Removed Config or Runtime @@ -23,6 +26,7 @@ Removed Config or Runtime *Normally occurs at the end of the* :ref:`deprecation period ` * ext_authz: removed auto ignore case in HTTP-based `ext_authz` header matching and the runtime guard `envoy.reloadable_features.ext_authz_http_service_enable_case_sensitive_string_matcher`. To ignore case, set the :ref:`ignore_case ` field to true. +* http: flip default HTTP/1 and HTTP/2 server codec implementations to new codecs that remove the use of exceptions for control flow. To revert to old codec behavior, set the runtime feature `envoy.reloadable_features.new_codec_behavior` to false. * http: removed `envoy.reloadable_features.http1_flood_protection` and legacy code path for turning flood protection off. New Features @@ -30,9 +34,12 @@ New Features * grpc: implemented header value syntax support when defining :ref:`initial metadata ` for gRPC-based `ext_authz` :ref:`HTTP ` and :ref:`network ` filters, and :ref:`ratelimit ` filters. * hds: added support for delta updates in the :ref:`HealthCheckSpecifier `, making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message, rather than the entire HDS. * health_check: added option to use :ref:`no_traffic_healthy_interval ` which allows a different no traffic interval when the host is healthy. +* listener: added an optional :ref:`default filter chain `. If this field is supplied, and none of the :ref:`filter_chains ` matches, this default filter chain is used to serve the connection. * mongo_proxy: the list of commands to produce metrics for is now :ref:`configurable `. +* ratelimit: added support for use of various :ref:`metadata ` as a ratelimit action. * ratelimit: added :ref:`disable_x_envoy_ratelimited_header ` option to disable `X-Envoy-RateLimited` header. * tcp: added a new :ref:`envoy.overload_actions.reject_incoming_connections ` action to reject incoming TCP connections. Deprecated ---------- +* ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. diff --git a/docs/root/version_history/v1.16.0.rst b/docs/root/version_history/v1.16.0.rst index 9e6dff0c83e4..c4dc4ee126c4 100644 --- a/docs/root/version_history/v1.16.0.rst +++ b/docs/root/version_history/v1.16.0.rst @@ -67,7 +67,7 @@ Bug Fixes * http: fixed bugs in datadog and squash filter's handling of responses with no bodies. * http: made the HeaderValues::prefix() method const. * jwt_authn: supports jwt payload without "iss" field. -* listener: fixed crash at listener inplace update when connetion load balancer is set. +* listener: fixed crash at listener inplace update when connection load balancer is set. * rocketmq_proxy: fixed an issue involving incorrect header lengths. In debug mode it causes crash and in release mode it causes underflow. * thrift_proxy: fixed crashing bug on request overflow. * udp_proxy: fixed a crash due to UDP packets being processed after listener removal. diff --git a/generated_api_shadow/bazel/external_deps.bzl b/generated_api_shadow/bazel/external_deps.bzl index cd9b6759f98a..588879c4bd0a 100644 --- a/generated_api_shadow/bazel/external_deps.bzl +++ b/generated_api_shadow/bazel/external_deps.bzl @@ -2,12 +2,6 @@ load("@envoy_api//bazel:repository_locations_utils.bzl", "load_repository_locati # Envoy dependencies may be annotated with the following attributes: DEPENDENCY_ANNOTATIONS = [ - # List of the categories describing how the dependency is being used. This attribute is used - # for automatic tracking of security posture of Envoy's dependencies. - # Possible values are documented in the USE_CATEGORIES list below. - # This attribute is mandatory for each dependecy. - "use_category", - # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements @@ -15,6 +9,31 @@ DEPENDENCY_ANNOTATIONS = [ # This attribute is optional for components with use categories listed in the # USE_CATEGORIES_WITH_CPE_OPTIONAL "cpe", + + # A list of extensions when 'use_category' contains 'dataplane_ext' or 'observability_ext'. + "extensions", + + # Additional dependencies loaded transitively via this dependency that are not tracked in + # Envoy (see the external dependency at the given version for information). + "implied_untracked_deps", + + # When the dependency was last updated in Envoy. + "last_updated", + + # Project metadata. + "project_desc", + "project_name", + "project_url", + + # List of the categories describing how the dependency is being used. This attribute is used + # for automatic tracking of security posture of Envoy's dependencies. + # Possible values are documented in the USE_CATEGORIES list below. + # This attribute is mandatory for each dependecy. + "use_category", + + # The dependency version. This may be either a tagged release (preferred) + # or git SHA (as an exception when no release tagged version is suitable). + "version", ] # NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed @@ -38,6 +57,10 @@ USE_CATEGORIES = [ "other", # This dependency is used only in tests. "test_only", + # Documentation generation + "docs", + # Developer tools (not used in build or docs) + "devtools", ] # Components with these use categories are not required to specify the 'cpe' @@ -62,47 +85,43 @@ def load_repository_locations(repository_locations_spec): if "project_name" not in location: _fail_missing_attribute("project_name", key) - mutable_location.pop("project_name") if "project_desc" not in location: _fail_missing_attribute("project_desc", key) - mutable_location.pop("project_desc") if "project_url" not in location: _fail_missing_attribute("project_url", key) - project_url = mutable_location.pop("project_url") + project_url = location["project_url"] if not project_url.startswith("https://") and not project_url.startswith("http://"): fail("project_url must start with https:// or http://: " + project_url) if "version" not in location: _fail_missing_attribute("version", key) - mutable_location.pop("version") if "use_category" not in location: _fail_missing_attribute("use_category", key) - use_category = mutable_location.pop("use_category") + use_category = location["use_category"] if "dataplane_ext" in use_category or "observability_ext" in use_category: if "extensions" not in location: _fail_missing_attribute("extensions", key) - mutable_location.pop("extensions") if "last_updated" not in location: _fail_missing_attribute("last_updated", key) - last_updated = mutable_location.pop("last_updated") + last_updated = location["last_updated"] # Starlark doesn't have regexes. if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-": fail("last_updated must match YYYY-DD-MM: " + last_updated) if "cpe" in location: - cpe = mutable_location.pop("cpe") + cpe = location["cpe"] # Starlark doesn't have regexes. cpe_components = len(cpe.split(":")) - # We allow cpe:2.3:a:foo:* and cpe:2.3.:a:foo:bar:* only. - cpe_components_valid = cpe_components in [5, 6] + # We allow cpe:2.3:a:foo:*:* and cpe:2.3.:a:foo:bar:* only. + cpe_components_valid = (cpe_components == 6) cpe_matches = (cpe == "N/A" or (cpe.startswith("cpe:2.3:a:") and cpe.endswith(":*") and cpe_components_valid)) if not cpe_matches: fail("CPE must match cpe:2.3:a:::*: " + cpe) @@ -113,4 +132,9 @@ def load_repository_locations(repository_locations_spec): if category not in USE_CATEGORIES: fail("Unknown use_category value '" + category + "' for dependecy " + key) + # Remove any extra annotations that we add, so that we don't confuse http_archive etc. + for annotation in DEPENDENCY_ANNOTATIONS: + if annotation in mutable_location: + mutable_location.pop(annotation) + return locations diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index 9d7bc38269e8..7376e2e2b716 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -36,7 +36,7 @@ message ListenerCollection { repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -114,6 +114,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index ff974114491e..0d073197cabd 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -66,6 +66,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto index 3c9dced082b7..fbc65d0880f3 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -39,7 +39,7 @@ message ListenerCollection { repeated udpa.core.v1.CollectionEntry entries = 1; } -// [#next-free-field: 25] +// [#next-free-field: 26] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -119,6 +119,10 @@ message Listener { // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; + // The default filter chain if none of the filter chain matches. If no default filter chain is supplied, + // the connection will be closed. The filter chain match is ignored in this field. + FilterChain default_filter_chain = 25; + // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto index 4add5ee102ee..0c75f92b4027 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto @@ -65,6 +65,18 @@ message Filter { // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // +// A different way to reason about the filter chain matches: +// Suppose there exists N filter chains. Prune the filter chain set using the above 8 steps. +// In each step, filter chains which most specifically matches the attributes continue to the next step. +// The listener guarantees at most 1 filter chain is left after all of the steps. +// +// Example: +// +// For destination port, filter chains specifying the destination port of incoming traffic are the +// most specific match. If none of the filter chains specifies the exact destination port, the filter +// chains which do not specify ports are the most specific match. Filter chains specifying the +// wrong port can never be the most specific match. +// // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // diff --git a/generated_api_shadow/envoy/config/route/v3/route_components.proto b/generated_api_shadow/envoy/config/route/v3/route_components.proto index a3a823cafe44..e203cdcf4e84 100644 --- a/generated_api_shadow/envoy/config/route/v3/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v3/route_components.proto @@ -1522,7 +1522,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; @@ -1639,11 +1639,15 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { // The key to use in the descriptor entry. string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; @@ -1657,6 +1661,35 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1679,7 +1712,14 @@ message RateLimit { HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`metadata ` field + DynamicMetaData dynamic_metadata = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Rate limit on metadata. + MetaData metadata = 8; } } diff --git a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto index 4d4a93eebe60..12c56dd834a4 100644 --- a/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto +++ b/generated_api_shadow/envoy/config/route/v4alpha/route_components.proto @@ -1526,7 +1526,7 @@ message VirtualCluster { message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; - // [#next-free-field: 8] + // [#next-free-field: 9] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; @@ -1643,11 +1643,15 @@ message RateLimit { repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } - // The following descriptor entry is appended when the dynamic metadata contains a key value: + // The following descriptor entry is appended when the + // :ref:`dynamic metadata ` contains a key value: // // .. code-block:: cpp // - // ("", "") + // ("", "") + // + // .. attention:: + // This action has been deprecated in favor of the :ref:`metadata ` action message DynamicMetaData { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; @@ -1664,6 +1668,38 @@ message RateLimit { string default_value = 3; } + // The following descriptor entry is appended when the metadata contains a key value: + // + // .. code-block:: cpp + // + // ("", "") + message MetaData { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.route.v3.RateLimit.Action.MetaData"; + + enum Source { + // Query :ref:`dynamic metadata ` + DYNAMIC = 0; + + // Query :ref:`route entry metadata ` + ROUTE_ENTRY = 1; + } + + // The key to use in the descriptor entry. + string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; + + // Metadata struct that defines the key and path to retrieve the string value. A match will + // only happen if the value in the metadata is of type string. + type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; + + // An optional value to use if *metadata_key* is empty. If not set and + // no value is present under the metadata_key then no descriptor is generated. + string default_value = 3; + + // Source of metadata + Source source = 4 [(validate.rules).enum = {defined_only: true}]; + } + oneof action_specifier { option (validate.required) = true; @@ -1686,7 +1722,14 @@ message RateLimit { HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. - DynamicMetaData dynamic_metadata = 7; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`metadata ` field + DynamicMetaData hidden_envoy_deprecated_dynamic_metadata = 7 + [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; + + // Rate limit on metadata. + MetaData metadata = 8; } } diff --git a/include/envoy/network/transport_socket.h b/include/envoy/network/transport_socket.h index fe054ce2f16d..db500f86a8a6 100644 --- a/include/envoy/network/transport_socket.h +++ b/include/envoy/network/transport_socket.h @@ -226,6 +226,13 @@ class TransportSocketFactory { */ virtual TransportSocketPtr createTransportSocket(TransportSocketOptionsSharedPtr options) const PURE; + + /** + * @param a callback to be invoked when the secrets required by the created transport + * sockets are ready. Will be invoked immediately if no secrets are required or if they + * are already loaded. + */ + virtual void addReadyCb(std::function callback) PURE; }; using TransportSocketFactoryPtr = std::unique_ptr; diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 127df14c923a..2c51a7c261c4 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -112,6 +112,15 @@ class Host : virtual public HostDescription { Network::TransportSocketOptionsSharedPtr transport_socket_options, const envoy::config::core::v3::Metadata* metadata) const PURE; + /** + * Register a callback to be invoked when secrets are ready for the transport socket that + * corresponds to the provided metadata. + * @param callback supplies the callback to be invoked. + * @param metadata supplies the metadata to be used for resolving transport socket matches. + */ + virtual void addReadyCb(std::function callback, + const envoy::config::core::v3::Metadata* metadata) const PURE; + /** * @return host specific gauges. */ diff --git a/source/common/http/codec_client.cc b/source/common/http/codec_client.cc index 7c17ce9e5b31..95ff7664bf2d 100644 --- a/source/common/http/codec_client.cc +++ b/source/common/http/codec_client.cc @@ -167,17 +167,10 @@ CodecClientProd::CodecClientProd(Type type, Network::ClientConnectionPtr&& conne break; } case Type::HTTP2: { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), random_generator, - host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get()); - } else { - codec_ = std::make_unique( - *connection_, *this, host->cluster().http2CodecStats(), random_generator, - host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, - host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get()); - } + codec_ = std::make_unique( + *connection_, *this, host->cluster().http2CodecStats(), random_generator, + host->cluster().http2Options(), Http::DEFAULT_MAX_REQUEST_HEADERS_KB, + host->cluster().maxResponseHeadersCount(), Http2::ProdNghttp2SessionFactory::get()); break; } case Type::HTTP3: { diff --git a/source/common/http/http2/codec_impl.cc b/source/common/http/http2/codec_impl.cc index d7f507f3c6fe..4af93895de4a 100644 --- a/source/common/http/http2/codec_impl.cc +++ b/source/common/http/http2/codec_impl.cc @@ -177,17 +177,10 @@ void ConnectionImpl::StreamImpl::encodeHeadersBase(const std::vector local_end_stream_ = end_stream; submitHeaders(final_headers, end_stream ? nullptr : &provider); - auto status = parent_.sendPendingFrames(); - // The RELEASE_ASSERT below does not change the existing behavior of `sendPendingFrames()`. - // The `sendPendingFrames()` used to throw on errors and the only method that was catching - // these exceptions was the `dispatch()`. The `dispatch()` method still checks and handles - // errors returned by the `sendPendingFrames()`. - // Other callers of `sendPendingFrames()` do not catch exceptions from this method and - // would cause abnormal process termination in error cases. This change replaces abnormal - // process termination from unhandled exception with the RELEASE_ASSERT. - // Further work will replace this RELEASE_ASSERT with proper error handling. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - parent_.checkProtocolConstraintViolation(); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::ClientStreamImpl::encodeHeaders(const RequestHeaderMap& headers, @@ -255,10 +248,10 @@ void ConnectionImpl::StreamImpl::encodeTrailersBase(const HeaderMap& trailers) { } } else { submitTrailers(trailers); - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - parent_.checkProtocolConstraintViolation(); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } } @@ -271,10 +264,10 @@ void ConnectionImpl::StreamImpl::encodeMetadata(const MetadataMapVector& metadat for (uint8_t flags : metadata_encoder.payloadFrameFlagBytes()) { submitMetadata(flags); } - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - parent_.checkProtocolConstraintViolation(); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::StreamImpl::readDisable(bool disable) { @@ -289,10 +282,10 @@ void ConnectionImpl::StreamImpl::readDisable(bool disable) { if (!buffersOverrun()) { nghttp2_session_consume(parent_.session_, stream_id_, unconsumed_bytes_); unconsumed_bytes_ = 0; - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - parent_.checkProtocolConstraintViolation(); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } } } @@ -418,7 +411,7 @@ ssize_t ConnectionImpl::StreamImpl::onDataSourceRead(uint64_t length, uint32_t* } } -Status ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { +void ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size_t length) { // In this callback we are writing out a raw DATA frame without copying. nghttp2 assumes that we // "just know" that the frame header is 9 bytes. // https://nghttp2.org/documentation/types.html#c.nghttp2_send_data_callback @@ -427,18 +420,16 @@ Status ConnectionImpl::StreamImpl::onDataSourceSend(const uint8_t* framehd, size parent_.protocol_constraints_.incrementOutboundDataFrameCount(); Buffer::OwnedImpl output; - auto status = parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE); - if (!status.ok()) { + parent_.addOutboundFrameFragment(output, framehd, FRAME_HEADER_SIZE); + if (!parent_.protocol_constraints_.checkOutboundFrameLimits().ok()) { ENVOY_CONN_LOG(debug, "error sending data frame: Too many frames in the outbound queue", parent_.connection_); setDetails(Http2ResponseCodeDetails::get().outbound_frame_flood); - return status; } parent_.stats_.pending_send_bytes_.sub(length); output.move(pending_send_data_, length); parent_.connection_.write(output, false); - return status; } void ConnectionImpl::ClientStreamImpl::submitHeaders(const std::vector& final_headers, @@ -474,10 +465,10 @@ void ConnectionImpl::StreamImpl::onPendingFlushTimer() { // This will emit a reset frame for this stream and close the stream locally. No reset callbacks // will be run because higher layers think the stream is already finished. resetStreamWorker(StreamResetReason::LocalReset); - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - parent_.checkProtocolConstraintViolation(); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::StreamImpl::encodeData(Buffer::Instance& data, bool end_stream) { @@ -501,11 +492,10 @@ void ConnectionImpl::StreamImpl::encodeDataHelper(Buffer::Instance& data, bool e data_deferred_ = false; } - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - parent_.checkProtocolConstraintViolation(); - + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } if (local_end_stream_ && pending_send_data_.length() > 0) { createPendingFlushTimer(); } @@ -529,10 +519,10 @@ void ConnectionImpl::StreamImpl::resetStream(StreamResetReason reason) { // We must still call sendPendingFrames() in both the deferred and not deferred path. This forces // the cleanup logic to run which will reset the stream in all cases if all data frames could not // be sent. - auto status = parent_.sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - parent_.checkProtocolConstraintViolation(); + if (parent_.sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::StreamImpl::resetStreamWorker(StreamResetReason reason) { @@ -611,11 +601,10 @@ void ConnectionImpl::sendKeepalive() { int rc = nghttp2_submit_ping(session_, 0 /*flags*/, reinterpret_cast(&ms_since_epoch)); ASSERT(rc == 0); - auto status = sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - checkProtocolConstraintViolation(); - + if (sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } keepalive_timeout_timer_->enableTimer(keepalive_timeout_); } void ConnectionImpl::onKeepaliveResponse() { @@ -707,20 +696,20 @@ void ConnectionImpl::goAway() { NGHTTP2_NO_ERROR, nullptr, 0); ASSERT(rc == 0); - auto status = sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - checkProtocolConstraintViolation(); + if (sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } void ConnectionImpl::shutdownNotice() { int rc = nghttp2_submit_shutdown_notice(session_); ASSERT(rc == 0); - auto status = sendPendingFrames(); - // See comment in the `encodeHeadersBase()` method about this RELEASE_ASSERT. - RELEASE_ASSERT(status.ok(), "sendPendingFrames() failure in non dispatching context"); - checkProtocolConstraintViolation(); + if (sendPendingFramesAndHandleError()) { + // Intended to check through coverage that this error case is tested + return; + } } Status ConnectionImpl::onBeforeFrameReceived(const nghttp2_frame_hd* hd) { @@ -938,31 +927,21 @@ int ConnectionImpl::onBeforeFrameSend(const nghttp2_frame* frame) { return 0; } -Status ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, - size_t length) { +void ConnectionImpl::addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, + size_t length) { // Reset the outbound frame type (set in the onBeforeFrameSend callback) since the // onBeforeFrameSend callback is not called for DATA frames. bool is_outbound_flood_monitored_control_frame = false; std::swap(is_outbound_flood_monitored_control_frame, is_outbound_flood_monitored_control_frame_); - auto status_or_releasor = trackOutboundFrames(is_outbound_flood_monitored_control_frame); - if (!status_or_releasor.ok()) { - return status_or_releasor.status(); - } - + auto releasor = trackOutboundFrames(is_outbound_flood_monitored_control_frame); output.add(data, length); - output.addDrainTracker(status_or_releasor.value()); - return okStatus(); + output.addDrainTracker(releasor); } -StatusOr ConnectionImpl::onSend(const uint8_t* data, size_t length) { +ssize_t ConnectionImpl::onSend(const uint8_t* data, size_t length) { ENVOY_CONN_LOG(trace, "send data: bytes={}", connection_, length); Buffer::OwnedImpl buffer; - auto status = addOutboundFrameFragment(buffer, data, length); - if (!status.ok()) { - ENVOY_CONN_LOG(debug, "error sending frame: Too many frames in the outbound queue.", - connection_); - return status; - } + addOutboundFrameFragment(buffer, data, length); // While the buffer is transient the fragment it contains will be moved into the // write_buffer_ of the underlying connection_ by the write method below. @@ -1100,15 +1079,6 @@ Status ConnectionImpl::sendPendingFrames() { const int rc = nghttp2_session_send(session_); if (rc != 0) { ASSERT(rc == NGHTTP2_ERR_CALLBACK_FAILURE); - - if (!nghttp2_callback_status_.ok()) { - return nghttp2_callback_status_; - } - - // Protocol constrain violations should set the nghttp2_callback_status_ error, and return at - // the statement above. - ASSERT(protocol_constraints_.status().ok()); - return codecProtocolError(nghttp2_strerror(rc)); } @@ -1133,7 +1103,23 @@ Status ConnectionImpl::sendPendingFrames() { } RETURN_IF_ERROR(sendPendingFrames()); } - return okStatus(); + + // After all pending frames have been written into the outbound buffer check if any of + // protocol constraints had been violated. + Status status = protocol_constraints_.checkOutboundFrameLimits(); + if (!status.ok()) { + ENVOY_CONN_LOG(debug, "error sending frames: Too many frames in the outbound queue.", + connection_); + } + return status; +} + +bool ConnectionImpl::sendPendingFramesAndHandleError() { + if (!sendPendingFrames().ok()) { + scheduleProtocolConstraintViolationCallback(); + return true; + } + return false; } void ConnectionImpl::sendSettings( @@ -1225,13 +1211,7 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { nghttp2_session_callbacks_set_send_callback( callbacks_, [](nghttp2_session*, const uint8_t* data, size_t length, int, void* user_data) -> ssize_t { - auto status_or_len = static_cast(user_data)->onSend(data, length); - if (status_or_len.ok()) { - return status_or_len.value(); - } - auto status = status_or_len.status(); - return static_cast(user_data)->setAndCheckNghttp2CallbackStatus( - std::move(status)); + return static_cast(user_data)->onSend(data, length); }); nghttp2_session_callbacks_set_send_data_callback( @@ -1239,9 +1219,8 @@ ConnectionImpl::Http2Callbacks::Http2Callbacks() { [](nghttp2_session*, nghttp2_frame* frame, const uint8_t* framehd, size_t length, nghttp2_data_source* source, void*) -> int { ASSERT(frame->data.padlen == 0); - auto status = static_cast(source->ptr)->onDataSourceSend(framehd, length); - return static_cast(source->ptr) - ->parent_.setAndCheckNghttp2CallbackStatus(std::move(status)); + static_cast(source->ptr)->onDataSourceSend(framehd, length); + return 0; }); nghttp2_session_callbacks_set_on_begin_headers_callback( @@ -1518,20 +1497,10 @@ Status ServerConnectionImpl::trackInboundFrames(const nghttp2_frame_hd* hd, return result; } -StatusOr +ProtocolConstraints::ReleasorProc ServerConnectionImpl::trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) { - auto releasor = - protocol_constraints_.incrementOutboundFrameCount(is_outbound_flood_monitored_control_frame); - if (dispatching_downstream_data_ && !protocol_constraints_.checkOutboundFrameLimits().ok()) { - return protocol_constraints_.status(); - } - return releasor; -} - -void ServerConnectionImpl::checkProtocolConstraintViolation() { - if (!protocol_constraints_.checkOutboundFrameLimits().ok()) { - scheduleProtocolConstraintViolationCallback(); - } + return protocol_constraints_.incrementOutboundFrameCount( + is_outbound_flood_monitored_control_frame); } Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { @@ -1543,12 +1512,6 @@ Http::Status ServerConnectionImpl::dispatch(Buffer::Instance& data) { } Http::Status ServerConnectionImpl::innerDispatch(Buffer::Instance& data) { - ASSERT(!dispatching_downstream_data_); - dispatching_downstream_data_ = true; - - // Make sure the dispatching_downstream_data_ is set to false when innerDispatch ends. - Cleanup cleanup([this]() { dispatching_downstream_data_ = false; }); - // Make sure downstream outbound queue was not flooded by the upstream frames. RETURN_IF_ERROR(protocol_constraints_.checkOutboundFrameLimits()); return ConnectionImpl::innerDispatch(data); diff --git a/source/common/http/http2/codec_impl.h b/source/common/http/http2/codec_impl.h index b4d15024bafc..c3c883f7dfd6 100644 --- a/source/common/http/http2/codec_impl.h +++ b/source/common/http/http2/codec_impl.h @@ -189,7 +189,7 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable& final_headers, const HeaderMap& headers); void saveHeader(HeaderString&& name, HeaderString&& value); @@ -417,21 +417,25 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable onSend(const uint8_t* data, size_t length); + ssize_t onSend(const uint8_t* data, size_t length); // Some browsers (e.g. WebKit-based browsers: https://bugs.webkit.org/show_bug.cgi?id=210108) have // a problem with processing empty trailers (END_STREAM | END_HEADERS with zero length HEADERS) of @@ -529,10 +519,10 @@ class ConnectionImpl : public virtual Connection, protected Logger::Loggable + void addOutboundFrameFragment(Buffer::OwnedImpl& output, const uint8_t* data, size_t length); + virtual ProtocolConstraints::ReleasorProc trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) PURE; virtual Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) PURE; void sendKeepalive(); @@ -580,11 +570,10 @@ class ClientConnectionImpl : public ClientConnection, public ConnectionImpl { // mitigation on the downstream connections, however there is currently no mechanism for // handling these types of errors. // TODO(yanavlasov): add flood mitigation for upstream connections as well. - StatusOr trackOutboundFrames(bool) override { + ProtocolConstraints::ReleasorProc trackOutboundFrames(bool) override { return ProtocolConstraints::ReleasorProc([]() {}); } Status trackInboundFrames(const nghttp2_frame_hd*, uint32_t) override { return okStatus(); } - void checkProtocolConstraintViolation() override {} Http::ConnectionCallbacks& callbacks_; }; @@ -607,17 +596,11 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { ConnectionCallbacks& callbacks() override { return callbacks_; } Status onBeginHeaders(const nghttp2_frame* frame) override; int onHeader(const nghttp2_frame* frame, HeaderString&& name, HeaderString&& value) override; - StatusOr + ProtocolConstraints::ReleasorProc trackOutboundFrames(bool is_outbound_flood_monitored_control_frame) override; Status trackInboundFrames(const nghttp2_frame_hd* hd, uint32_t padding_length) override; absl::optional checkHeaderNameForUnderscores(absl::string_view header_name) override; - /** - * Check protocol constraint violations outside of the dispatching context. - * This method ASSERTs if it is called in the dispatching context. - */ - void checkProtocolConstraintViolation() override; - // Http::Connection // The reason for overriding the dispatch method is to do flood mitigation only when // processing data from downstream client. Doing flood mitigation when processing upstream @@ -630,10 +613,6 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { ServerConnectionCallbacks& callbacks_; - // This flag indicates that downstream data is being dispatched and turns on flood mitigation - // in the checkMaxOutbound*Framed methods. - bool dispatching_downstream_data_{false}; - // The action to take when a request header name contains underscore characters. envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction headers_with_underscores_action_; diff --git a/source/common/http/path_utility.h b/source/common/http/path_utility.h index 62be43e2e03f..a6a99aaef78d 100644 --- a/source/common/http/path_utility.h +++ b/source/common/http/path_utility.h @@ -12,15 +12,15 @@ namespace Http { */ class PathUtil { public: - // Returns if the normalization succeeds. - // If it is successful, the path header in header path will be updated with the normalized path. + // Returns true if the normalization succeeds. + // If it is successful, the path header will be updated with the normalized path. // Requires the Path header be present. static bool canonicalPath(RequestHeaderMap& headers); // Merges two or more adjacent slashes in path part of URI into one. // Requires the Path header be present. static void mergeSlashes(RequestHeaderMap& headers); // Removes the query and/or fragment string (if present) from the input path. - // For example, this function returns "/data" for the input path "/data#fragment?param=value". + // For example, this function returns "/data" for the input path "/data?param=value#fragment". static absl::string_view removeQueryAndFragment(const absl::string_view path); }; diff --git a/source/common/network/raw_buffer_socket.h b/source/common/network/raw_buffer_socket.h index fe87bbeda605..8f17279890aa 100644 --- a/source/common/network/raw_buffer_socket.h +++ b/source/common/network/raw_buffer_socket.h @@ -32,6 +32,7 @@ class RawBufferSocketFactory : public TransportSocketFactory { // Network::TransportSocketFactory TransportSocketPtr createTransportSocket(TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; + void addReadyCb(std::function callback) override { callback(); } }; } // namespace Network diff --git a/source/common/router/router_ratelimit.cc b/source/common/router/router_ratelimit.cc index c75d58b50d9b..0774f8340be5 100644 --- a/source/common/router/router_ratelimit.cc +++ b/source/common/router/router_ratelimit.cc @@ -102,22 +102,40 @@ bool GenericKeyAction::populateDescriptor(const Router::RouteEntry&, return true; } -DynamicMetaDataAction::DynamicMetaDataAction( +MetaDataAction::MetaDataAction(const envoy::config::route::v3::RateLimit::Action::MetaData& action) + : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()), + default_value_(action.default_value()), source_(action.source()) {} + +MetaDataAction::MetaDataAction( const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action) : metadata_key_(action.metadata_key()), descriptor_key_(action.descriptor_key()), - default_value_(action.default_value()) {} + default_value_(action.default_value()), + source_(envoy::config::route::v3::RateLimit::Action::MetaData::DYNAMIC) {} -bool DynamicMetaDataAction::populateDescriptor( - const Router::RouteEntry&, RateLimit::Descriptor& descriptor, const std::string&, +bool MetaDataAction::populateDescriptor( + const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string&, const Http::HeaderMap&, const Network::Address::Instance&, const envoy::config::core::v3::Metadata* dynamic_metadata) const { - const ProtobufWkt::Value& metadata_value = - Envoy::Config::Metadata::metadataValue(dynamic_metadata, metadata_key_); + const envoy::config::core::v3::Metadata* metadata_source; + + switch (source_) { + case envoy::config::route::v3::RateLimit::Action::MetaData::DYNAMIC: + metadata_source = dynamic_metadata; + break; + case envoy::config::route::v3::RateLimit::Action::MetaData::ROUTE_ENTRY: + metadata_source = &route.metadata(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + + const std::string metadata_string_value = + Envoy::Config::Metadata::metadataValue(metadata_source, metadata_key_).string_value(); - if (!metadata_value.string_value().empty()) { - descriptor.entries_.push_back({descriptor_key_, metadata_value.string_value()}); + if (!metadata_string_value.empty()) { + descriptor.entries_.push_back({descriptor_key_, metadata_string_value}); return true; - } else if (metadata_value.string_value().empty() && !default_value_.empty()) { + } else if (metadata_string_value.empty() && !default_value_.empty()) { descriptor.entries_.push_back({descriptor_key_, default_value_}); return true; } @@ -166,7 +184,10 @@ RateLimitPolicyEntryImpl::RateLimitPolicyEntryImpl( actions_.emplace_back(new GenericKeyAction(action.generic_key())); break; case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kDynamicMetadata: - actions_.emplace_back(new DynamicMetaDataAction(action.dynamic_metadata())); + actions_.emplace_back(new MetaDataAction(action.dynamic_metadata())); + break; + case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kMetadata: + actions_.emplace_back(new MetaDataAction(action.metadata())); break; case envoy::config::route::v3::RateLimit::Action::ActionSpecifierCase::kHeaderValueMatch: actions_.emplace_back(new HeaderValueMatchAction(action.header_value_match())); diff --git a/source/common/router/router_ratelimit.h b/source/common/router/router_ratelimit.h index 9ea90a5d46b0..912606fc0da8 100644 --- a/source/common/router/router_ratelimit.h +++ b/source/common/router/router_ratelimit.h @@ -114,11 +114,13 @@ class GenericKeyAction : public RateLimitAction { }; /** - * Action for dynamic metadata rate limiting. + * Action for metadata rate limiting. */ -class DynamicMetaDataAction : public RateLimitAction { +class MetaDataAction : public RateLimitAction { public: - DynamicMetaDataAction(const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action); + MetaDataAction(const envoy::config::route::v3::RateLimit::Action::MetaData& action); + // for maintaining backward compatibility with the deprecated DynamicMetaData action + MetaDataAction(const envoy::config::route::v3::RateLimit::Action::DynamicMetaData& action); // Router::RateLimitAction bool populateDescriptor(const Router::RouteEntry& route, RateLimit::Descriptor& descriptor, const std::string& local_service_cluster, const Http::HeaderMap& headers, @@ -129,6 +131,7 @@ class DynamicMetaDataAction : public RateLimitAction { const Envoy::Config::MetadataKey metadata_key_; const std::string descriptor_key_; const std::string default_value_; + const envoy::config::route::v3::RateLimit::Action::MetaData::Source source_; }; /** diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 28fc36b5eb31..27ae0679af0b 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -79,6 +79,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.http_transport_failure_reason_in_body", "envoy.reloadable_features.http2_skip_encoding_empty_trailers", "envoy.reloadable_features.listener_in_place_filterchain_update", + "envoy.reloadable_features.new_codec_behavior", "envoy.reloadable_features.overload_manager_disable_keepalive_drain_http2", "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing", "envoy.reloadable_features.preserve_query_string_in_path_redirects", @@ -103,8 +104,6 @@ constexpr const char* disabled_runtime_features[] = { // Allow Envoy to upgrade or downgrade version of type url, should be removed when support for // v2 url is removed from codebase. "envoy.reloadable_features.enable_type_url_downgrade_and_upgrade", - // TODO(asraa) flip this feature after codec errors are handled - "envoy.reloadable_features.new_codec_behavior", // TODO(alyssawilk) flip true after the release. "envoy.reloadable_features.new_tcp_connection_pool", // Sentinel and test flag. diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 15df928be990..80a9ef9ac070 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -417,7 +417,16 @@ void ClusterManagerImpl::onClusterInit(Cluster& cluster) { // been setup for cross-thread updates to avoid needless updates during initialization. The order // of operations here is important. We start by initializing the thread aware load balancer if // needed. This must happen first so cluster updates are heard first by the load balancer. - auto cluster_data = active_clusters_.find(cluster.info()->name()); + // Also, it assures that all of clusters which this function is called should be always active. + auto cluster_data = warming_clusters_.find(cluster.info()->name()); + // We have a situation that clusters will be immediately active, such as static and primary + // cluster. So we must have this prevention logic here. + if (cluster_data != warming_clusters_.end()) { + clusterWarmingToActive(cluster.info()->name()); + updateClusterCounts(); + } + cluster_data = active_clusters_.find(cluster.info()->name()); + if (cluster_data->second->thread_aware_lb_ != nullptr) { cluster_data->second->thread_aware_lb_->initialize(); } @@ -587,17 +596,6 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::config::cluster::v3::Cl // The following init manager remove call is a NOP in the case we are already initialized. // It's just kept here to avoid additional logic. init_helper_.removeCluster(*existing_active_cluster->second->cluster_); - } else { - // Validate that warming clusters are not added to the init_helper_. - // NOTE: This loop is compiled out in optimized builds. - for (const std::list& cluster_list : - {std::cref(init_helper_.primary_init_clusters_), - std::cref(init_helper_.secondary_init_clusters_)}) { - ASSERT(!std::any_of(cluster_list.begin(), cluster_list.end(), - [&existing_warming_cluster](Cluster* cluster) { - return existing_warming_cluster->second->cluster_.get() == cluster; - })); - } } cm_stats_.cluster_modified_.inc(); } else { @@ -614,40 +612,39 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::config::cluster::v3::Cl // the future we may decide to undergo a refactor to unify the logic but the effort/risk to // do that right now does not seem worth it given that the logic is generally pretty clean // and easy to understand. - const bool use_active_map = - init_helper_.state() != ClusterManagerInitHelper::State::AllClustersInitialized; - loadCluster(cluster, version_info, true, use_active_map ? active_clusters_ : warming_clusters_); - - if (use_active_map) { + const bool all_clusters_initialized = + init_helper_.state() == ClusterManagerInitHelper::State::AllClustersInitialized; + loadCluster(cluster, version_info, true, warming_clusters_); + auto& cluster_entry = warming_clusters_.at(cluster_name); + if (!all_clusters_initialized) { ENVOY_LOG(debug, "add/update cluster {} during init", cluster_name); - auto& cluster_entry = active_clusters_.at(cluster_name); createOrUpdateThreadLocalCluster(*cluster_entry); init_helper_.addCluster(*cluster_entry->cluster_); } else { - auto& cluster_entry = warming_clusters_.at(cluster_name); ENVOY_LOG(debug, "add/update cluster {} starting warming", cluster_name); cluster_entry->cluster_->initialize([this, cluster_name] { - auto warming_it = warming_clusters_.find(cluster_name); - auto& cluster_entry = *warming_it->second; - - // If the cluster is being updated, we need to cancel any pending merged updates. - // Otherwise, applyUpdates() will fire with a dangling cluster reference. - updates_map_.erase(cluster_name); - - active_clusters_[cluster_name] = std::move(warming_it->second); - warming_clusters_.erase(warming_it); - ENVOY_LOG(debug, "warming cluster {} complete", cluster_name); - createOrUpdateThreadLocalCluster(cluster_entry); - onClusterInit(*cluster_entry.cluster_); - updateClusterCounts(); + auto state_changed_cluster_entry = warming_clusters_.find(cluster_name); + createOrUpdateThreadLocalCluster(*state_changed_cluster_entry->second); + onClusterInit(*state_changed_cluster_entry->second->cluster_); }); } - updateClusterCounts(); return true; } +void ClusterManagerImpl::clusterWarmingToActive(const std::string& cluster_name) { + auto warming_it = warming_clusters_.find(cluster_name); + ASSERT(warming_it != warming_clusters_.end()); + + // If the cluster is being updated, we need to cancel any pending merged updates. + // Otherwise, applyUpdates() will fire with a dangling cluster reference. + updates_map_.erase(cluster_name); + + active_clusters_[cluster_name] = std::move(warming_it->second); + warming_clusters_.erase(warming_it); +} + void ClusterManagerImpl::createOrUpdateThreadLocalCluster(ClusterData& cluster) { tls_->runOnAllThreads([new_cluster = cluster.cluster_->info(), thread_aware_lb_factory = cluster.loadBalancerFactory()]( @@ -702,6 +699,7 @@ bool ClusterManagerImpl::removeCluster(const std::string& cluster_name) { if (existing_warming_cluster != warming_clusters_.end() && existing_warming_cluster->second->added_via_api_) { removed = true; + init_helper_.removeCluster(*existing_warming_cluster->second->cluster_); warming_clusters_.erase(existing_warming_cluster); ENVOY_LOG(info, "removing warming cluster {}", cluster_name); } @@ -804,7 +802,9 @@ void ClusterManagerImpl::updateClusterCounts() { // Once cluster is warmed up, CDS is resumed, and ACK is sent to ADS, providing a // signal to ADS to proceed with RDS updates. // If we're in the middle of shutting down (ads_mux_ already gone) then this is irrelevant. - if (ads_mux_) { + const bool all_clusters_initialized = + init_helper_.state() == ClusterManagerInitHelper::State::AllClustersInitialized; + if (all_clusters_initialized && ads_mux_) { const auto type_urls = Config::getAllVersionTypeUrls(); const uint64_t previous_warming = cm_stats_.warming_clusters_.value(); if (previous_warming == 0 && !warming_clusters_.empty()) { diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 920681bff0ef..1aa14c4be78c 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -482,6 +482,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable prefetch_pool); diff --git a/source/common/upstream/health_checker_base_impl.cc b/source/common/upstream/health_checker_base_impl.cc index f6357559eec8..29e2aa6493c4 100644 --- a/source/common/upstream/health_checker_base_impl.cc +++ b/source/common/upstream/health_checker_base_impl.cc @@ -384,6 +384,14 @@ void HealthCheckerImplBase::ActiveHealthCheckSession::onTimeoutBase() { handleFailure(envoy::data::core::v3::NETWORK); } +void HealthCheckerImplBase::ActiveHealthCheckSession::start() { + // Start health checks only after secrets are ready for the transport socket + // that health checks will be performed on. If health checks start + // immediately, they may fail with "network" errors due to TLS credentials + // not yet being loaded, which can result in long startup times. + host_->addReadyCb([this] { onInitialInterval(); }, parent_.transportSocketMatchMetadata().get()); +} + void HealthCheckerImplBase::ActiveHealthCheckSession::onInitialInterval() { if (parent_.initial_jitter_.count() == 0) { onIntervalBase(); diff --git a/source/common/upstream/health_checker_base_impl.h b/source/common/upstream/health_checker_base_impl.h index c1e4bb7affff..9620713cf399 100644 --- a/source/common/upstream/health_checker_base_impl.h +++ b/source/common/upstream/health_checker_base_impl.h @@ -77,7 +77,7 @@ class HealthCheckerImplBase : public HealthChecker, ~ActiveHealthCheckSession() override; HealthTransition setUnhealthy(envoy::data::core::v3::HealthCheckFailureType type); void onDeferredDeleteBase(); - void start() { onInitialInterval(); } + void start(); protected: ActiveHealthCheckSession(HealthCheckerImplBase& parent, HostSharedPtr host); diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 8e6a6db3c507..48d426f65250 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -356,6 +356,14 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu return connection; } +void HostImpl::addReadyCb(std::function callback, + const envoy::config::core::v3::Metadata* metadata) const { + Network::TransportSocketFactory& factory = + (metadata != nullptr) ? cluster_->transportSocketMatcher().resolve(metadata).factory_ + : socket_factory_; + factory.addReadyCb(callback); +} + void HostImpl::weight(uint32_t new_weight) { weight_ = std::max(1U, new_weight); } std::vector HostsPerLocalityImpl::filter( diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index c74e489384f0..ed8ef917b047 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -189,6 +189,8 @@ class HostImpl : public HostDescriptionImpl, createHealthCheckConnection(Event::Dispatcher& dispatcher, Network::TransportSocketOptionsSharedPtr transport_socket_options, const envoy::config::core::v3::Metadata* metadata) const override; + void addReadyCb(std::function callback, + const envoy::config::core::v3::Metadata* metadata) const override; std::vector> gauges() const override { diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 38d86bc85709..ba050edeaa24 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -323,6 +323,9 @@ bool Filter::isBufferFull() const { } void Filter::continueDecoding() { + // After sending the check request, we don't need to buffer the data anymore. + buffer_data_ = false; + filter_return_ = FilterReturn::ContinueDecoding; if (!initiating_call_) { callbacks_->continueDecoding(); diff --git a/source/extensions/filters/http/oauth2/config.cc b/source/extensions/filters/http/oauth2/config.cc index e910db23477d..d51d798874e5 100644 --- a/source/extensions/filters/http/oauth2/config.cc +++ b/source/extensions/filters/http/oauth2/config.cc @@ -51,24 +51,32 @@ Http::FilterFactoryCb OAuth2Config::createFilterFactoryFromProtoTyped( const auto& token_secret = credentials.token_secret(); const auto& hmac_secret = credentials.hmac_secret(); - auto& secret_manager = context.clusterManager().clusterManagerFactory().secretManager(); + auto& cluster_manager = context.clusterManager(); + auto& secret_manager = cluster_manager.clusterManagerFactory().secretManager(); auto& transport_socket_factory = context.getTransportSocketFactoryContext(); auto secret_provider_token_secret = secretsProvider(token_secret, secret_manager, transport_socket_factory); + if (secret_provider_token_secret == nullptr) { + throw EnvoyException("invalid token secret configuration"); + } auto secret_provider_hmac_secret = secretsProvider(hmac_secret, secret_manager, transport_socket_factory); + if (secret_provider_hmac_secret == nullptr) { + throw EnvoyException("invalid HMAC secret configuration"); + } auto secret_reader = std::make_shared( secret_provider_token_secret, secret_provider_hmac_secret, context.api()); - auto config = std::make_shared(proto_config, context.clusterManager(), - secret_reader, context.scope(), stats_prefix); + auto config = std::make_shared(proto_config, cluster_manager, secret_reader, + context.scope(), stats_prefix); - return [&context, config](Http::FilterChainFactoryCallbacks& callbacks) -> void { - std::unique_ptr oauth_client = - std::make_unique(context.clusterManager(), config->oauthTokenEndpoint()); - callbacks.addStreamDecoderFilter( - std::make_shared(config, std::move(oauth_client), context.timeSource())); - }; + return + [&context, config, &cluster_manager](Http::FilterChainFactoryCallbacks& callbacks) -> void { + std::unique_ptr oauth_client = + std::make_unique(cluster_manager, config->oauthTokenEndpoint()); + callbacks.addStreamDecoderFilter( + std::make_shared(config, std::move(oauth_client), context.timeSource())); + }; } /* diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index dc331ef8e3df..e869e3fc9bbd 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -81,7 +81,7 @@ class HttpFilterNameValues { // AWS Lambda filter const std::string AwsLambda = "envoy.filters.http.aws_lambda"; // OAuth filter - const std::string OAuth = "envoy.filters.http.oauth"; + const std::string OAuth = "envoy.filters.http.oauth2"; }; using HttpFilterNames = ConstSingleton; diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index cf10888a6b89..d4dcd0fcefca 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -15,6 +15,7 @@ #include "extensions/transport_sockets/well_known_names.h" +#include "absl/strings/str_join.h" #include "openssl/ssl.h" namespace Envoy { @@ -138,6 +139,7 @@ void Filter::onALPN(const unsigned char* data, unsigned int len) { } protocols.emplace_back(reinterpret_cast(CBS_data(&name)), CBS_len(&name)); } + ENVOY_LOG(trace, "tls:onALPN(), ALPN: {}", absl::StrJoin(protocols, ",")); cb_->socket().setRequestedApplicationProtocols(protocols); alpn_found_ = true; } diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc index 0993f9f4c272..18a55b088e40 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.cc @@ -176,7 +176,7 @@ void DecoderImpl::initialize() { }; } -bool DecoderImpl::parseMessage(Buffer::Instance& data) { +bool DecoderImpl::parseHeader(Buffer::Instance& data) { ENVOY_LOG(trace, "postgres_proxy: parsing message, len {}", data.length()); // The minimum size of the message sufficient for parsing is 5 bytes. @@ -220,10 +220,6 @@ bool DecoderImpl::parseMessage(Buffer::Instance& data) { data.drain(startup_ ? 4 : 5); // Length plus optional 1st byte. - uint32_t bytes_to_read = message_len_ - 4; - message.assign(std::string(static_cast(data.linearize(bytes_to_read)), bytes_to_read)); - setMessage(message); - ENVOY_LOG(trace, "postgres_proxy: msg parsed"); return true; } @@ -238,7 +234,7 @@ bool DecoderImpl::onData(Buffer::Instance& data, bool frontend) { ENVOY_LOG(trace, "postgres_proxy: decoding {} bytes", data.length()); - if (!parseMessage(data)) { + if (!parseHeader(data)) { return false; } @@ -259,16 +255,25 @@ bool DecoderImpl::onData(Buffer::Instance& data, bool frontend) { } } - std::vector& actions = std::get<2>(msg.get()); - for (const auto& action : actions) { - action(this); - } - // message_len_ specifies total message length including 4 bytes long // "length" field. The length of message body is total length minus size // of "length" field (4 bytes). uint32_t bytes_to_read = message_len_ - 4; + std::vector& actions = std::get<2>(msg.get()); + if (!actions.empty()) { + // Linearize the message for processing. + message_.assign(std::string(static_cast(data.linearize(bytes_to_read)), bytes_to_read)); + + // Invoke actions associated with the type of received message. + for (const auto& action : actions) { + action(this); + } + + // Drop the linearized message. + message_.erase(); + } + ENVOY_LOG(debug, "({}) command = {} ({})", msg_processor.direction_, command_, std::get<0>(msg.get())); ENVOY_LOG(debug, "({}) length = {}", msg_processor.direction_, message_len_); diff --git a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h index dc4638b1c436..409cdbba659c 100644 --- a/source/extensions/filters/network/postgres_proxy/postgres_decoder.h +++ b/source/extensions/filters/network/postgres_proxy/postgres_decoder.h @@ -72,7 +72,6 @@ class DecoderImpl : public Decoder, Logger::Loggable { bool onData(Buffer::Instance& data, bool frontend) override; PostgresSession& getSession() override { return session_; } - void setMessage(std::string message) { message_ = message; } std::string getMessage() { return message_; } void setStartup(bool startup) { startup_ = startup; } @@ -122,7 +121,7 @@ class DecoderImpl : public Decoder, Logger::Loggable { MsgAction unknown_; }; - bool parseMessage(Buffer::Instance& data); + bool parseHeader(Buffer::Instance& data); void decode(Buffer::Instance& data); void decodeAuthentication(); void decodeBackendStatements(); diff --git a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h index 2ada9e2de17b..4fbe83286b38 100644 --- a/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h +++ b/source/extensions/quic_listeners/quiche/quic_transport_socket_factory.h @@ -24,6 +24,9 @@ class QuicTransportSocketFactoryBase : public Network::TransportSocketFactory { NOT_REACHED_GCOVR_EXCL_LINE; } bool implementsSecureTransport() const override { return true; } + + // TODO(mpuncel) only invoke callback() once secrets are ready. + void addReadyCb(std::function callback) override { callback(); }; }; // TODO(danzh): when implement ProofSource, examine of it's necessary to diff --git a/source/extensions/transport_sockets/alts/tsi_socket.h b/source/extensions/transport_sockets/alts/tsi_socket.h index 0acba405022d..7bf5877870ab 100644 --- a/source/extensions/transport_sockets/alts/tsi_socket.h +++ b/source/extensions/transport_sockets/alts/tsi_socket.h @@ -101,6 +101,9 @@ class TsiSocketFactory : public Network::TransportSocketFactory { Network::TransportSocketPtr createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; + // TODO(mpuncel) only invoke callback() once secrets are ready. + void addReadyCb(std::function callback) override { callback(); }; + private: HandshakerFactory handshaker_factory_; HandshakeValidator handshake_validator_; diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h index 4a191ebf539d..bcddef7bf547 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.h @@ -49,6 +49,7 @@ class UpstreamProxyProtocolSocketFactory : public Network::TransportSocketFactor Network::TransportSocketPtr createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; + void addReadyCb(std::function callback) override { callback(); }; private: Network::TransportSocketFactoryPtr transport_socket_factory_; diff --git a/source/extensions/transport_sockets/tap/tap.h b/source/extensions/transport_sockets/tap/tap.h index 33156b705153..d04712b2a50a 100644 --- a/source/extensions/transport_sockets/tap/tap.h +++ b/source/extensions/transport_sockets/tap/tap.h @@ -41,6 +41,8 @@ class TapSocketFactory : public Network::TransportSocketFactory, Network::TransportSocketPtr createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; + // TODO(mpuncel) only invoke callback() once secrets are ready. + void addReadyCb(std::function callback) override { callback(); }; private: Network::TransportSocketFactoryPtr transport_socket_factory_; diff --git a/source/extensions/transport_sockets/tls/ssl_socket.cc b/source/extensions/transport_sockets/tls/ssl_socket.cc index 485468443096..523242f8fada 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.cc +++ b/source/extensions/transport_sockets/tls/ssl_socket.cc @@ -355,13 +355,39 @@ bool ClientSslSocketFactory::implementsSecureTransport() const { return true; } void ClientSslSocketFactory::onAddOrUpdateSecret() { ENVOY_LOG(debug, "Secret is updated."); + bool should_run_callbacks = false; { absl::WriterMutexLock l(&ssl_ctx_mu_); ssl_ctx_ = manager_.createSslClientContext(stats_scope_, *config_); + if (ssl_ctx_) { + should_run_callbacks = true; + } + } + if (should_run_callbacks) { + for (const auto& cb : secrets_ready_callbacks_) { + cb(); + } + secrets_ready_callbacks_.clear(); } stats_.ssl_context_update_by_sds_.inc(); } +void ClientSslSocketFactory::addReadyCb(std::function callback) { + bool immediately_run_callback = false; + { + absl::ReaderMutexLock l(&ssl_ctx_mu_); + if (ssl_ctx_) { + immediately_run_callback = true; + } + } + + if (immediately_run_callback) { + callback(); + } else { + secrets_ready_callbacks_.push_back(callback); + } +} + ServerSslSocketFactory::ServerSslSocketFactory(Envoy::Ssl::ServerContextConfigPtr config, Envoy::Ssl::ContextManager& manager, Stats::Scope& stats_scope, @@ -396,13 +422,39 @@ bool ServerSslSocketFactory::implementsSecureTransport() const { return true; } void ServerSslSocketFactory::onAddOrUpdateSecret() { ENVOY_LOG(debug, "Secret is updated."); + bool should_run_callbacks = false; { absl::WriterMutexLock l(&ssl_ctx_mu_); ssl_ctx_ = manager_.createSslServerContext(stats_scope_, *config_, server_names_); + + if (ssl_ctx_) { + should_run_callbacks = true; + } + } + if (should_run_callbacks) { + for (const auto& cb : secrets_ready_callbacks_) { + cb(); + } + secrets_ready_callbacks_.clear(); } stats_.ssl_context_update_by_sds_.inc(); } +void ServerSslSocketFactory::addReadyCb(std::function callback) { + bool immediately_run_callback = false; + { + absl::ReaderMutexLock l(&ssl_ctx_mu_); + if (ssl_ctx_) { + immediately_run_callback = true; + } + } + if (immediately_run_callback) { + callback(); + } else { + secrets_ready_callbacks_.push_back(callback); + } +} + } // namespace Tls } // namespace TransportSockets } // namespace Extensions diff --git a/source/extensions/transport_sockets/tls/ssl_socket.h b/source/extensions/transport_sockets/tls/ssl_socket.h index c14cb502bed1..b0dcb139a319 100644 --- a/source/extensions/transport_sockets/tls/ssl_socket.h +++ b/source/extensions/transport_sockets/tls/ssl_socket.h @@ -109,6 +109,8 @@ class ClientSslSocketFactory : public Network::TransportSocketFactory, createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; + void addReadyCb(std::function callback) override; + // Secret::SecretCallbacks void onAddOrUpdateSecret() override; @@ -119,6 +121,7 @@ class ClientSslSocketFactory : public Network::TransportSocketFactory, Envoy::Ssl::ClientContextConfigPtr config_; mutable absl::Mutex ssl_ctx_mu_; Envoy::Ssl::ClientContextSharedPtr ssl_ctx_ ABSL_GUARDED_BY(ssl_ctx_mu_); + std::list> secrets_ready_callbacks_; }; class ServerSslSocketFactory : public Network::TransportSocketFactory, @@ -133,6 +136,8 @@ class ServerSslSocketFactory : public Network::TransportSocketFactory, createTransportSocket(Network::TransportSocketOptionsSharedPtr options) const override; bool implementsSecureTransport() const override; + void addReadyCb(std::function callback) override; + // Secret::SecretCallbacks void onAddOrUpdateSecret() override; @@ -144,6 +149,7 @@ class ServerSslSocketFactory : public Network::TransportSocketFactory, const std::vector server_names_; mutable absl::Mutex ssl_ctx_mu_; Envoy::Ssl::ServerContextSharedPtr ssl_ctx_ ABSL_GUARDED_BY(ssl_ctx_mu_); + std::list> secrets_ready_callbacks_; }; } // namespace Tls diff --git a/source/server/filter_chain_manager_impl.cc b/source/server/filter_chain_manager_impl.cc index 0de4282f3314..0e006561ce09 100644 --- a/source/server/filter_chain_manager_impl.cc +++ b/source/server/filter_chain_manager_impl.cc @@ -141,8 +141,9 @@ bool FilterChainManagerImpl::isWildcardServerName(const std::string& name) { return absl::StartsWith(name, "*."); } -void FilterChainManagerImpl::addFilterChain( +void FilterChainManagerImpl::addFilterChains( absl::Span filter_chain_span, + const envoy::config::listener::v3::FilterChain* default_filter_chain, FilterChainFactoryBuilder& filter_chain_factory_builder, FilterChainFactoryContextCreator& context_creator) { Cleanup cleanup([this]() { origin_ = absl::nullopt; }); @@ -183,8 +184,7 @@ void FilterChainManagerImpl::addFilterChain( // Reject partial wildcards, we don't match on them. for (const auto& server_name : filter_chain_match.server_names()) { - if (server_name.find('*') != std::string::npos && - !FilterChainManagerImpl::isWildcardServerName(server_name)) { + if (server_name.find('*') != std::string::npos && !isWildcardServerName(server_name)) { throw EnvoyException( fmt::format("error adding listener '{}': partial wildcards are not supported in " "\"server_names\"", @@ -208,13 +208,49 @@ void FilterChainManagerImpl::addFilterChain( filter_chain_match.server_names(), filter_chain_match.transport_protocol(), filter_chain_match.application_protocols(), filter_chain_match.source_type(), source_ips, filter_chain_match.source_ports(), filter_chain_impl); + fc_contexts_[*filter_chain] = filter_chain_impl; } convertIPsToTries(); + copyOrRebuildDefaultFilterChain(default_filter_chain, filter_chain_factory_builder, + context_creator); ENVOY_LOG(debug, "new fc_contexts has {} filter chains, including {} newly built", fc_contexts_.size(), new_filter_chain_size); } +void FilterChainManagerImpl::copyOrRebuildDefaultFilterChain( + const envoy::config::listener::v3::FilterChain* default_filter_chain, + FilterChainFactoryBuilder& filter_chain_factory_builder, + FilterChainFactoryContextCreator& context_creator) { + // Default filter chain is built exactly once. + ASSERT(!default_filter_chain_message_.has_value()); + + // Save the default filter chain message. This message could be used in next listener update. + if (default_filter_chain == nullptr) { + return; + } + default_filter_chain_message_ = absl::make_optional(*default_filter_chain); + + // Origin filter chain manager could be empty if the current is the ancestor. + const auto* origin = getOriginFilterChainManager(); + if (origin == nullptr) { + default_filter_chain_ = + filter_chain_factory_builder.buildFilterChain(*default_filter_chain, context_creator); + return; + } + + // Copy from original filter chain manager, or build new filter chain if the default filter chain + // is not equivalent to the one in the original filter chain manager. + MessageUtil eq; + if (origin->default_filter_chain_message_.has_value() && + eq(origin->default_filter_chain_message_.value(), *default_filter_chain)) { + default_filter_chain_ = origin->default_filter_chain_; + } else { + default_filter_chain_ = + filter_chain_factory_builder.buildFilterChain(*default_filter_chain, context_creator); + } +} + void FilterChainManagerImpl::addFilterChainForDestinationPorts( DestinationPortsMap& destination_ports_map, uint16_t destination_port, const std::vector& destination_ips, @@ -381,21 +417,30 @@ const Network::FilterChain* FilterChainManagerImpl::findFilterChain(const Network::ConnectionSocket& socket) const { const auto& address = socket.localAddress(); + const Network::FilterChain* best_match_filter_chain = nullptr; // Match on destination port (only for IP addresses). if (address->type() == Network::Address::Type::Ip) { const auto port_match = destination_ports_map_.find(address->ip()->port()); if (port_match != destination_ports_map_.end()) { - return findFilterChainForDestinationIP(*port_match->second.second, socket); + best_match_filter_chain = findFilterChainForDestinationIP(*port_match->second.second, socket); + if (best_match_filter_chain != nullptr) { + return best_match_filter_chain; + } else { + // There is entry for specific port but none of the filter chain matches. Instead of + // matching catch-all port 0, the fallback filter chain is returned. + return default_filter_chain_.get(); + } } } - - // Match on catch-all port 0. + // Match on catch-all port 0 if there is no specific port sub tree. const auto port_match = destination_ports_map_.find(0); if (port_match != destination_ports_map_.end()) { - return findFilterChainForDestinationIP(*port_match->second.second, socket); + best_match_filter_chain = findFilterChainForDestinationIP(*port_match->second.second, socket); } - - return nullptr; + return best_match_filter_chain != nullptr + ? best_match_filter_chain + // Neither exact port nor catch-all port matches. Use fallback filter chain. + : default_filter_chain_.get(); } const Network::FilterChain* FilterChainManagerImpl::findFilterChainForDestinationIP( diff --git a/source/server/filter_chain_manager_impl.h b/source/server/filter_chain_manager_impl.h index 7c5f830666b3..3bcf01d2ec2e 100644 --- a/source/server/filter_chain_manager_impl.h +++ b/source/server/filter_chain_manager_impl.h @@ -186,17 +186,36 @@ class FilterChainManagerImpl : public Network::FilterChainManager, // Add all filter chains into this manager. During the lifetime of FilterChainManagerImpl this // should be called at most once. - void addFilterChain( + void addFilterChains( absl::Span filter_chain_span, - FilterChainFactoryBuilder& b, FilterChainFactoryContextCreator& context_creator); + const envoy::config::listener::v3::FilterChain* default_filter_chain, + FilterChainFactoryBuilder& filter_chain_factory_builder, + FilterChainFactoryContextCreator& context_creator); + static bool isWildcardServerName(const std::string& name); // Return the current view of filter chains, keyed by filter chain message. Used by the owning // listener to calculate the intersection of filter chains with another listener. const FcContextMap& filterChainsByMessage() const { return fc_contexts_; } + const absl::optional& + defaultFilterChainMessage() const { + return default_filter_chain_message_; + } + const Network::DrainableFilterChainSharedPtr& defaultFilterChain() const { + return default_filter_chain_; + } private: void convertIPsToTries(); + + // Build default filter chain from filter chain message. Skip the build but copy from original + // filter chain manager if the default filter chain message duplicates the message in origin + // filter chain manager. Called by addFilterChains(). + void copyOrRebuildDefaultFilterChain( + const envoy::config::listener::v3::FilterChain* default_filter_chain, + FilterChainFactoryBuilder& filter_chain_factory_builder, + FilterChainFactoryContextCreator& context_creator); + using SourcePortsMap = absl::flat_hash_map; using SourcePortsMapSharedPtr = std::shared_ptr; using SourceIPsMap = absl::flat_hash_map; @@ -293,9 +312,15 @@ class FilterChainManagerImpl : public Network::FilterChainManager, // detect the filter chains in the intersection of existing listener and new listener. FcContextMap fc_contexts_; + absl::optional default_filter_chain_message_; + // The optional fallback filter chain if destination_ports_map_ does not find a matched filter + // chain. + Network::DrainableFilterChainSharedPtr default_filter_chain_; + // Mapping of FilterChain's configured destination ports, IPs, server names, transport protocols // and application protocols, using structures defined above. DestinationPortsMap destination_ports_map_; + const Network::Address::InstanceConstSharedPtr address_; // This is the reference to a factory context which all the generations of listener share. Configuration::FactoryContext& parent_context_; diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index 4f3d0b3bc523..f2dcd9d1c30f 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -481,11 +481,11 @@ void ListenerImpl::buildFilterChains() { parent_.server_.stats(), parent_.server_.singletonManager(), parent_.server_.threadLocal(), validation_visitor_, parent_.server_.api()); transport_factory_context.setInitManager(*dynamic_init_manager_); - // The init manager is a little messy. Will refactor when filter chain manager could accept - // network filter chain update. - // TODO(lambdai): create builder from filter_chain_manager to obtain the init manager ListenerFilterChainFactoryBuilder builder(*this, transport_factory_context); - filter_chain_manager_.addFilterChain(config_.filter_chains(), builder, filter_chain_manager_); + filter_chain_manager_.addFilterChains( + config_.filter_chains(), + config_.has_default_filter_chain() ? &config_.default_filter_chain() : nullptr, builder, + filter_chain_manager_); } void ListenerImpl::buildSocketOptions() { @@ -743,6 +743,15 @@ void ListenerImpl::diffFilterChain(const ListenerImpl& another_listener, callback(*message_and_filter_chain.second); } } + // Filter chain manager maintains an optional default filter chain besides the filter chains + // indexed by message. + if (auto eq = MessageUtil(); + filter_chain_manager_.defaultFilterChainMessage().has_value() && + (!another_listener.filter_chain_manager_.defaultFilterChainMessage().has_value() || + !eq(*another_listener.filter_chain_manager_.defaultFilterChainMessage(), + *filter_chain_manager_.defaultFilterChainMessage()))) { + callback(*filter_chain_manager_.defaultFilterChain()); + } } bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, @@ -752,6 +761,8 @@ bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v differencer.set_repeated_field_comparison(Protobuf::util::MessageDifferencer::AS_SET); differencer.IgnoreField( envoy::config::listener::v3::Listener::GetDescriptor()->FindFieldByName("filter_chains")); + differencer.IgnoreField(envoy::config::listener::v3::Listener::GetDescriptor()->FindFieldByName( + "default_filter_chain")); return differencer.Compare(lhs, rhs); } diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 76e509f57b16..8b95de6b6cf4 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -40,10 +40,6 @@ CODEC_TEST_DEPS = [ envoy_cc_test( name = "codec_impl_test", srcs = ["codec_impl_test.cc"], - # The default codec is the legacy codec. Override runtime flag for testing new codec. - args = [ - "--runtime-feature-override-for-tests=envoy.reloadable_features.new_codec_behavior", - ], shard_count = 5, deps = CODEC_TEST_DEPS, ) @@ -51,7 +47,7 @@ envoy_cc_test( envoy_cc_test( name = "codec_impl_legacy_test", srcs = ["codec_impl_test.cc"], - # The default codec is the legacy codec. Verify the runtime flag for the new codec is disabled. + # The default codec is the new codec. Disable runtime flag for testing old codec. args = [ "--runtime-feature-disable-for-tests=envoy.reloadable_features.new_codec_behavior", ], diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index bc8cd64953df..d8ec1e0adb55 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -2066,7 +2066,6 @@ TEST_P(Http2CodecImplTest, PingFlood) { Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior") ? "Too many control frames in the outbound queue." : "Too many frames in the outbound queue."); - EXPECT_EQ(ack_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_CONTROL_FRAMES); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } @@ -2313,7 +2312,6 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, "Too many frames in the outbound queue."); - EXPECT_EQ(frame_count, CommonUtility::OptionsLimits::DEFAULT_MAX_OUTBOUND_FRAMES); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } @@ -2592,7 +2590,10 @@ TEST_P(Http2CodecImplTest, KeepAliveCausesOutboundFlood) { // Trigger sending a PING, which should overflow the outbound frame queue and cause // client to be disconnected - EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(timeout_ms), _)); + if (!Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_codec_behavior")) { + // new codec does not schedule timeout callback if the PING had triggered flood protection + EXPECT_CALL(*timeout_timer, enableTimer(std::chrono::milliseconds(timeout_ms), _)); + } send_timer->callback_(); EXPECT_TRUE(violation_callback->enabled_); @@ -2756,14 +2757,9 @@ TestNghttp2SessionFactory ssize_t { // Cast down to MetadataTestClientConnectionImpl to leverage friendship. - auto status_or_len = - static_cast*>( - static_cast(user_data)) - ->onSend(data, length); - if (status_or_len.ok()) { - return status_or_len.value(); - } - return NGHTTP2_ERR_CALLBACK_FAILURE; + return static_cast*>( + static_cast(user_data)) + ->onSend(data, length); }); nghttp2_option_new(&options_); nghttp2_option_set_user_recv_extension_type(options_, METADATA_FRAME_TYPE); diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 7d5fba68835f..bad535cbf100 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -481,7 +481,7 @@ TEST_F(RateLimitPolicyEntryTest, GenericKeyWithEmptyDescriptorKey) { testing::ContainerEq(descriptors_)); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { +TEST_F(RateLimitPolicyEntryTest, DEPRECATED_FEATURE_TEST(DynamicMetaDataMatch)) { const std::string yaml = R"EOF( actions: - dynamic_metadata: @@ -513,11 +513,108 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataMatch) { testing::ContainerEq(descriptors_)); } +TEST_F(RateLimitPolicyEntryTest, MetaDataMatchDynamicSourceByDefault) { + const std::string yaml = R"EOF( +actions: +- metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, MetaDataMatchDynamicSource) { + const std::string yaml = R"EOF( +actions: +- metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + source: DYNAMIC + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + envoy::config::core::v3::Metadata metadata; + TestUtility::loadFromYaml(metadata_yaml, metadata); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + &metadata); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + +TEST_F(RateLimitPolicyEntryTest, MetaDataMatchRouteEntrySource) { + const std::string yaml = R"EOF( +actions: +- metadata: + descriptor_key: fake_key + default_value: fake_value + metadata_key: + key: 'envoy.xxx' + path: + - key: test + - key: prop + source: ROUTE_ENTRY + )EOF"; + + setupTest(yaml); + + std::string metadata_yaml = R"EOF( +filter_metadata: + envoy.xxx: + test: + prop: foo + )EOF"; + + TestUtility::loadFromYaml(metadata_yaml, route_.metadata_); + + rate_limit_entry_->populateDescriptors(route_, descriptors_, "", header_, default_remote_address_, + dynamic_metadata_); + + EXPECT_THAT(std::vector({{{{"fake_key", "foo"}}}}), + testing::ContainerEq(descriptors_)); +} + // Tests that the default_value is used in the descriptor when the metadata_key is empty. -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatchWithDefaultValue) { +TEST_F(RateLimitPolicyEntryTest, MetaDataNoMatchWithDefaultValue) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key default_value: fake_value metadata_key: @@ -546,10 +643,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatchWithDefaultValue) { testing::ContainerEq(descriptors_)); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) { +TEST_F(RateLimitPolicyEntryTest, MetaDataNoMatch) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key metadata_key: key: 'envoy.xxx' @@ -576,10 +673,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNoMatch) { EXPECT_TRUE(descriptors_.empty()); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) { +TEST_F(RateLimitPolicyEntryTest, MetaDataEmptyValue) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key metadata_key: key: 'envoy.xxx' @@ -606,10 +703,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataEmptyValue) { EXPECT_TRUE(descriptors_.empty()); } // Tests that no descriptor is generated when both the metadata_key and default_value are empty. -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataAndDefaultValueEmpty) { +TEST_F(RateLimitPolicyEntryTest, MetaDataAndDefaultValueEmpty) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key default_value: "" metadata_key: @@ -637,10 +734,10 @@ TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataAndDefaultValueEmpty) { EXPECT_TRUE(descriptors_.empty()); } -TEST_F(RateLimitPolicyEntryTest, DynamicMetaDataNonStringMatch) { +TEST_F(RateLimitPolicyEntryTest, MetaDataNonStringNoMatch) { const std::string yaml = R"EOF( actions: -- dynamic_metadata: +- metadata: descriptor_key: fake_key metadata_key: key: 'envoy.xxx' diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index b24c45330de5..34c31a7f4f03 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -1055,7 +1055,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { last_updated: seconds: 1234567891 nanos: 234000000 - dynamic_active_clusters: + dynamic_warming_clusters: - version_info: "version1" cluster: "@type": type.googleapis.com/envoy.config.cluster.v3.Cluster @@ -1107,7 +1107,7 @@ TEST_F(ClusterManagerImplTest, InitializeOrder) { last_updated: seconds: 1234567891 nanos: 234000000 - dynamic_warming_clusters: + dynamic_active_clusters: )EOF"); EXPECT_CALL(*cluster3, initialize(_)); diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index cd03e0130459..c73876ebefc1 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -961,6 +961,8 @@ TEST_F(HttpHealthCheckerImplTest, TlsOptions) { Network::TransportSocketFactoryPtr(socket_factory)); cluster_->info_->transport_socket_matcher_.reset(transport_socket_match); + EXPECT_CALL(*socket_factory, addReadyCb(_)) + .WillOnce(Invoke([&](std::function callback) -> void { callback(); })); EXPECT_CALL(*socket_factory, createTransportSocket(ApplicationProtocolListEq("http1"))); allocHealthChecker(yaml); @@ -2448,13 +2450,19 @@ TEST_F(HttpHealthCheckerImplTest, TransportSocketMatchCriteria) { ALL_TRANSPORT_SOCKET_MATCH_STATS(POOL_COUNTER_PREFIX(stats_store, "test"))}; auto health_check_only_socket_factory = std::make_unique(); - // We expect resolve() to be called twice, once for endpoint socket matching (with no metadata in - // this test) and once for health check socket matching. In the latter we expect metadata that - // matches the above object. + // We expect resolve() to be called 3 times, once for endpoint socket matching (with no metadata + // in this test) and twice for health check socket matching (once for checking if secrets are + // ready on the transport socket, and again for actually getting the health check transport socket + // to create a connection). In the latter 2 calls, we expect metadata that matches the above + // object. EXPECT_CALL(*transport_socket_match, resolve(nullptr)); EXPECT_CALL(*transport_socket_match, resolve(MetadataEq(metadata))) - .WillOnce(Return(TransportSocketMatcher::MatchData( - *health_check_only_socket_factory, health_transport_socket_stats, "health_check_only"))); + .Times(2) + .WillRepeatedly(Return(TransportSocketMatcher::MatchData( + *health_check_only_socket_factory, health_transport_socket_stats, "health_check_only"))) + .RetiresOnSaturation(); + EXPECT_CALL(*health_check_only_socket_factory, addReadyCb(_)) + .WillOnce(Invoke([&](std::function callback) -> void { callback(); })); // The health_check_only_socket_factory should be used to create a transport socket for the health // check connection. EXPECT_CALL(*health_check_only_socket_factory, createTransportSocket(_)); @@ -2490,6 +2498,9 @@ TEST_F(HttpHealthCheckerImplTest, NoTransportSocketMatchCriteria) { )EOF"; auto default_socket_factory = std::make_unique(); + + EXPECT_CALL(*default_socket_factory, addReadyCb(_)) + .WillOnce(Invoke([&](std::function callback) -> void { callback(); })); // The default_socket_factory should be used to create a transport socket for the health check // connection. EXPECT_CALL(*default_socket_factory, createTransportSocket(_)); diff --git a/test/common/upstream/transport_socket_matcher_test.cc b/test/common/upstream/transport_socket_matcher_test.cc index cfde130d1d1f..f770d1f4fdd8 100644 --- a/test/common/upstream/transport_socket_matcher_test.cc +++ b/test/common/upstream/transport_socket_matcher_test.cc @@ -33,6 +33,7 @@ class FakeTransportSocketFactory : public Network::TransportSocketFactory { MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket, (Network::TransportSocketOptionsSharedPtr), (const)); + MOCK_METHOD(void, addReadyCb, (std::function)); FakeTransportSocketFactory(std::string id) : id_(std::move(id)) {} std::string id() const { return id_; } @@ -48,6 +49,7 @@ class FooTransportSocketFactory MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(Network::TransportSocketPtr, createTransportSocket, (Network::TransportSocketOptionsSharedPtr), (const)); + MOCK_METHOD(void, addReadyCb, (std::function)); Network::TransportSocketFactoryPtr createTransportSocketFactory(const Protobuf::Message& proto, diff --git a/test/config/integration/BUILD b/test/config/integration/BUILD index 3baa54708100..1fbb9f235da1 100644 --- a/test/config/integration/BUILD +++ b/test/config/integration/BUILD @@ -42,5 +42,5 @@ filegroup( filegroup( name = "google_com_proxy_port_0", - srcs = ["google_com_proxy_port_0.v2.yaml"], + srcs = ["google_com_proxy_port_0.yaml"], ) diff --git a/test/config/integration/google_com_proxy_port_0.v2.yaml b/test/config/integration/google_com_proxy_port_0.yaml similarity index 83% rename from test/config/integration/google_com_proxy_port_0.v2.yaml rename to test/config/integration/google_com_proxy_port_0.yaml index c67b6845960d..236942d3379d 100644 --- a/test/config/integration/google_com_proxy_port_0.v2.yaml +++ b/test/config/integration/google_com_proxy_port_0.yaml @@ -16,7 +16,7 @@ static_resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http codec_type: AUTO route_config: @@ -26,7 +26,7 @@ static_resources: domains: ["*"] routes: - match: { prefix: "/" } - route: { host_rewrite: www.google.com, cluster: service_google } + route: { host_rewrite_literal: www.google.com, cluster: service_google } clusters: - name: service_google connect_timeout: 0.25s diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 5d153d124ddc..162c09c2f0ac 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -160,7 +160,7 @@ void testMerge() { Api::ApiPtr api = Api::createApiForTest(); const std::string overlay = "static_resources: { clusters: [{name: 'foo'}]}"; - OptionsImpl options(Server::createTestOptionsImpl("google_com_proxy.v2.yaml", overlay, + OptionsImpl options(Server::createTestOptionsImpl("google_com_proxy.yaml", overlay, Network::Address::IpVersion::v6)); envoy::config::bootstrap::v3::Bootstrap bootstrap; Server::InstanceUtil::loadBootstrapConfig(bootstrap, options, diff --git a/test/exe/main_common_test.cc b/test/exe/main_common_test.cc index 000142b7ff2e..8428cf6b43fe 100644 --- a/test/exe/main_common_test.cc +++ b/test/exe/main_common_test.cc @@ -57,7 +57,7 @@ class MainCommonTest : public testing::TestWithParamdecodeTrailers(request_trailers_)); } +// Checks that the filter initiates an authorization request when the buffer reaches maximum +// request bytes and allow_partial_message is set to true. In addition to that, after the filter +// sends the check request, data decoding continues. +TEST_F(HttpFilterTest, RequestDataWithPartialMessageThenContinueDecoding) { + InSequence s; + + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + failure_mode_allow: false + with_request_body: + max_request_bytes: 10 + allow_partial_message: true + )EOF"); + + ON_CALL(filter_callbacks_, connection()).WillByDefault(Return(&connection_)); + ON_CALL(filter_callbacks_, decodingBuffer()).WillByDefault(Return(&data_)); + EXPECT_CALL(filter_callbacks_, setDecoderBufferLimit(_)).Times(0); + EXPECT_CALL(connection_, remoteAddress()).WillOnce(ReturnRef(addr_)); + EXPECT_CALL(connection_, localAddress()).WillOnce(ReturnRef(addr_)); + + // The check call should only be called once. + EXPECT_CALL(*client_, check(_, _, _, testing::A(), _)) + .WillOnce( + WithArgs<0>(Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks) -> void { + request_callbacks_ = &callbacks; + }))); + + EXPECT_CALL(filter_callbacks_, continueDecoding()); + + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_, false)); + + data_.add("foo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + data_.add("bar"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndBuffer, filter_->decodeData(data_, false)); + + data_.add("barfoo"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + + data_.add("more data after watermark is set is possible"); + EXPECT_EQ(Http::FilterDataStatus::StopIterationAndWatermark, filter_->decodeData(data_, false)); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::OK; + request_callbacks_->onComplete(std::make_unique(response)); + + data_.add("more data after calling check request"); + EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->decodeData(data_, true)); + + EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter_->decodeTrailers(request_trailers_)); +} + // Checks that the filter initiates the authorization process only when the filter decode trailers // is called. TEST_F(HttpFilterTest, RequestDataWithSmallBuffer) { diff --git a/test/extensions/filters/http/lua/lua_filter_test.cc b/test/extensions/filters/http/lua/lua_filter_test.cc index b5b20846a119..310bafc36b39 100644 --- a/test/extensions/filters/http/lua/lua_filter_test.cc +++ b/test/extensions/filters/http/lua/lua_filter_test.cc @@ -809,13 +809,14 @@ TEST_F(LuaHttpFilterTest, HttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":method", "POST"}, + {":path", "/"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -877,13 +878,14 @@ TEST_F(LuaHttpFilterTest, HttpCallAsyncFalse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -936,13 +938,14 @@ TEST_F(LuaHttpFilterTest, HttpCallAsynchronous) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"set-cookie", "flavor=chocolate; Path=/"}, - {"set-cookie", "variant=chewy; Path=/"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"set-cookie", "flavor=chocolate; Path=/"}, + {"set-cookie", "variant=chewy; Path=/"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1004,11 +1007,11 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{{":path", "/"}, - {":method", "POST"}, - {":authority", "foo"}, - {"content-length", "11"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{{":path", "/"}, + {":method", "POST"}, + {":authority", "foo"}, + {"content-length", "11"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1027,9 +1030,9 @@ TEST_F(LuaHttpFilterTest, DoubleHttpCall) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{ - {":path", "/bar"}, {":method", "GET"}, {":authority", "foo"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/bar"}, {":method", "GET"}, {":authority", "foo"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1084,9 +1087,9 @@ TEST_F(LuaHttpFilterTest, HttpCallNoBody) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{ - {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); @@ -1142,9 +1145,9 @@ TEST_F(LuaHttpFilterTest, HttpCallImmediateResponse) { .WillOnce( Invoke([&](Http::RequestMessagePtr& message, Http::AsyncClient::Callbacks& cb, const Http::AsyncClient::RequestOptions&) -> Http::AsyncClient::Request* { - EXPECT_EQ((Http::TestRequestHeaderMapImpl{ - {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}), - message->headers()); + const Http::TestRequestHeaderMapImpl expected_headers{ + {":path", "/"}, {":method", "GET"}, {":authority", "foo"}}; + EXPECT_THAT(&message->headers(), HeaderMapEqualIgnoreOrder(&expected_headers)); callbacks = &cb; return &request; })); diff --git a/test/extensions/filters/http/lua/wrappers_test.cc b/test/extensions/filters/http/lua/wrappers_test.cc index e3b03f538eaf..990016db3f15 100644 --- a/test/extensions/filters/http/lua/wrappers_test.cc +++ b/test/extensions/filters/http/lua/wrappers_test.cc @@ -293,7 +293,6 @@ TEST_F(LuaStreamInfoWrapperTest, SetGetAndIterateDynamicMetadata) { end )EOF"}; - InSequence s; setup(SCRIPT); StreamInfo::StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem()); diff --git a/test/extensions/filters/http/oauth2/config_test.cc b/test/extensions/filters/http/oauth2/config_test.cc index 4ac59da9fe96..4be857a21d0a 100644 --- a/test/extensions/filters/http/oauth2/config_test.cc +++ b/test/extensions/filters/http/oauth2/config_test.cc @@ -22,33 +22,102 @@ namespace Oauth2 { using testing::NiceMock; using testing::Return; +namespace { + +// This loads one of the secrets in credentials, and fails the other one. +void expectInvalidSecretConfig(const std::string& failed_secret_name, + const std::string& exception_message) { + const std::string yaml = R"EOF( +config: + token_endpoint: + cluster: foo + uri: oauth.com/token + timeout: 3s + credentials: + client_id: "secret" + token_secret: + name: token + hmac_secret: + name: hmac + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout + )EOF"; + + OAuth2Config factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + NiceMock context; + + auto& secret_manager = context.cluster_manager_.cluster_manager_factory_.secretManager(); + ON_CALL(secret_manager, + findStaticGenericSecretProvider(failed_secret_name == "token" ? "hmac" : "token")) + .WillByDefault(Return(std::make_shared( + envoy::extensions::transport_sockets::tls::v3::GenericSecret()))); + + EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException, exception_message); +} + +} // namespace + TEST(ConfigTest, CreateFilter) { const std::string yaml = R"EOF( config: - token_endpoint: - cluster: foo - uri: oauth.com/token - timeout: 3s - authorization_endpoint: https://oauth.com/oauth/authorize/ - redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" - signout_path: - path: - exact: /signout + token_endpoint: + cluster: foo + uri: oauth.com/token + timeout: 3s + credentials: + client_id: "secret" + token_secret: + name: token + hmac_secret: + name: hmac + authorization_endpoint: https://oauth.com/oauth/authorize/ + redirect_uri: "%REQ(:x-forwarded-proto)%://%REQ(:authority)%/callback" + redirect_path_matcher: + path: + exact: /callback + signout_path: + path: + exact: /signout )EOF"; - envoy::extensions::filters::http::oauth2::v3alpha::OAuth2 proto_config; - MessageUtil::loadFromYaml(yaml, proto_config, ProtobufMessage::getStrictValidationVisitor()); - NiceMock factory_context; - auto& secret_manager = factory_context.cluster_manager_.cluster_manager_factory_.secretManager(); + OAuth2Config factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + Server::Configuration::MockFactoryContext context; + + // This returns non-nullptr for token_secret and hmac_secret. + auto& secret_manager = context.cluster_manager_.cluster_manager_factory_.secretManager(); ON_CALL(secret_manager, findStaticGenericSecretProvider(_)) .WillByDefault(Return(std::make_shared( envoy::extensions::transport_sockets::tls::v3::GenericSecret()))); - OAuth2Config config; - auto cb = config.createFilterFactoryFromProtoTyped(proto_config, "whatever", factory_context); + EXPECT_CALL(context, messageValidationVisitor()); + EXPECT_CALL(context, clusterManager()); + EXPECT_CALL(context, scope()); + EXPECT_CALL(context, timeSource()); + EXPECT_CALL(context, api()); + EXPECT_CALL(context, getTransportSocketFactoryContext()); + Http::FilterFactoryCb cb = factory.createFilterFactoryFromProto(*proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); + cb(filter_callback); +} + +TEST(ConfigTest, InvalidTokenSecret) { + expectInvalidSecretConfig("token", "invalid token secret configuration"); +} - NiceMock filter_callbacks; - cb(filter_callbacks); +TEST(ConfigTest, InvalidHmacSecret) { + expectInvalidSecretConfig("hmac", "invalid HMAC secret configuration"); } TEST(ConfigTest, CreateFilterMissingConfig) { @@ -65,4 +134,4 @@ TEST(ConfigTest, CreateFilterMissingConfig) { } // namespace Oauth2 } // namespace HttpFilters } // namespace Extensions -} // namespace Envoy \ No newline at end of file +} // namespace Envoy diff --git a/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml b/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml index d65f3e3aea42..237076f98045 100644 --- a/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml +++ b/test/extensions/filters/network/mysql_proxy/mysql_test_config.yaml @@ -27,10 +27,10 @@ static_resources: - filters: - name: mysql typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.mysql_proxy.v3.MySQLProxy stat_prefix: mysql_stats - name: tcp typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy + "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp_stats cluster: cluster_0 diff --git a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc index aa2d9ff2c7b7..e787a18f2d5b 100644 --- a/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc +++ b/test/extensions/filters/network/postgres_proxy/postgres_decoder_test.cc @@ -242,6 +242,9 @@ TEST_P(PostgresProxyFrontendDecoderTest, FrontendInc) { EXPECT_CALL(callbacks_, incMessagesFrontend()).Times(1); createPostgresMsg(data_, GetParam(), "SELECT 1;"); decoder_->onData(data_, true); + + // Make sure that decoder releases memory used during message processing. + ASSERT_TRUE(decoder_->getMessage().empty()); } // Run the above test for each frontend message. @@ -507,6 +510,89 @@ TEST_P(PostgresProxyFrontendEncrDecoderTest, EncyptedTraffic) { INSTANTIATE_TEST_SUITE_P(FrontendEncryptedMessagesTests, PostgresProxyFrontendEncrDecoderTest, ::testing::Values(80877103, 80877104)); +class FakeBuffer : public Buffer::Instance { +public: + MOCK_METHOD(void, addDrainTracker, (std::function), (override)); + MOCK_METHOD(void, add, (const void*, uint64_t), (override)); + MOCK_METHOD(void, addBufferFragment, (Buffer::BufferFragment&), (override)); + MOCK_METHOD(void, add, (absl::string_view), (override)); + MOCK_METHOD(void, add, (const Instance&), (override)); + MOCK_METHOD(void, prepend, (absl::string_view), (override)); + MOCK_METHOD(void, prepend, (Instance&), (override)); + MOCK_METHOD(void, commit, (Buffer::RawSlice*, uint64_t), (override)); + MOCK_METHOD(void, copyOut, (size_t, uint64_t, void*), (const, override)); + MOCK_METHOD(void, drain, (uint64_t), (override)); + MOCK_METHOD(Buffer::RawSliceVector, getRawSlices, (absl::optional), (const, override)); + MOCK_METHOD(Buffer::SliceDataPtr, extractMutableFrontSlice, (), (override)); + MOCK_METHOD(uint64_t, length, (), (const, override)); + MOCK_METHOD(void*, linearize, (uint32_t), (override)); + MOCK_METHOD(void, move, (Instance&), (override)); + MOCK_METHOD(void, move, (Instance&, uint64_t), (override)); + MOCK_METHOD(uint64_t, reserve, (uint64_t, Buffer::RawSlice*, uint64_t), (override)); + MOCK_METHOD(ssize_t, search, (const void*, uint64_t, size_t, size_t), (const, override)); + MOCK_METHOD(bool, startsWith, (absl::string_view), (const, override)); + MOCK_METHOD(std::string, toString, (), (const, override)); +}; + +// Test verifies that decoder calls Buffer::linearize method +// for messages which have associated 'action'. +TEST_F(PostgresProxyDecoderTest, Linearize) { + testing::NiceMock fake_buf; + uint8_t body[] = "test\0"; + + decoder_->setStartup(false); + + // Simulate that decoder reads message which needs processing. + // Query 'Q' message's body is just string. + // Message header is 5 bytes and body will contain string "test\0". + EXPECT_CALL(fake_buf, length).WillRepeatedly(testing::Return(10)); + // The decoder will first ask for 1-byte message type + // Then for length and finally for message body. + EXPECT_CALL(fake_buf, copyOut) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 0); + ASSERT_THAT(size, 1); + *(static_cast(data)) = 'Q'; + }) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 1); + ASSERT_THAT(size, 4); + *(static_cast(data)) = htonl(9); + }) + .WillRepeatedly([=](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 0); + ASSERT_THAT(size, 5); + memcpy(data, body, 5); + }); + + // It should call "Buffer::linearize". + EXPECT_CALL(fake_buf, linearize).WillOnce([&](uint32_t) -> void* { return body; }); + + decoder_->onData(fake_buf, false); + + // Simulate that decoder reads message which does not need processing. + // BindComplete message has type '2' and empty body. + // Total message length is equal to length of header (5 bytes). + EXPECT_CALL(fake_buf, length).WillRepeatedly(testing::Return(5)); + // The decoder will first ask for 1-byte message type and next for length. + EXPECT_CALL(fake_buf, copyOut) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 0); + ASSERT_THAT(size, 1); + *(static_cast(data)) = '2'; + }) + .WillOnce([](size_t start, uint64_t size, void* data) { + ASSERT_THAT(start, 1); + ASSERT_THAT(size, 4); + *(static_cast(data)) = htonl(4); + }); + + // Make sure that decoder does not call linearize. + EXPECT_CALL(fake_buf, linearize).Times(0); + + decoder_->onData(fake_buf, false); +} + } // namespace PostgresProxy } // namespace NetworkFilters } // namespace Extensions diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index b4bdb84e5737..b06a9ac66203 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -59,6 +59,7 @@ using testing::ContainsRegex; using testing::DoAll; using testing::InSequence; using testing::Invoke; +using testing::MockFunction; using testing::NiceMock; using testing::Return; using testing::ReturnRef; @@ -4490,6 +4491,12 @@ TEST_P(SslSocketTest, DownstreamNotReadySslSocket) { ContextManagerImpl manager(time_system_); ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, stats_store, std::vector{}); + + // Add a secrets ready callback that should not be invoked. + MockFunction mock_callback_; + EXPECT_CALL(mock_callback_, Call()).Times(0); + server_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); + auto transport_socket = server_ssl_socket_factory.createTransportSocket(nullptr); EXPECT_EQ(EMPTY_STRING, transport_socket->protocol()); EXPECT_EQ(nullptr, transport_socket->ssl()); @@ -4525,6 +4532,12 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { ContextManagerImpl manager(time_system_); ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), manager, stats_store); + + // Add a secrets ready callback that should not be invoked. + MockFunction mock_callback_; + EXPECT_CALL(mock_callback_, Call()).Times(0); + client_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); + auto transport_socket = client_ssl_socket_factory.createTransportSocket(nullptr); EXPECT_EQ(EMPTY_STRING, transport_socket->protocol()); EXPECT_EQ(nullptr, transport_socket->ssl()); @@ -4536,6 +4549,97 @@ TEST_P(SslSocketTest, UpstreamNotReadySslSocket) { EXPECT_EQ("TLS error: Secret is not supplied by SDS", transport_socket->failureReason()); } +// Validate that secrets callbacks are invoked when secrets become ready. +TEST_P(SslSocketTest, ClientAddSecretsReadyCallback) { + Stats::TestUtil::TestStore stats_store; + NiceMock local_info; + testing::NiceMock factory_context; + NiceMock init_manager; + NiceMock dispatcher; + EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); + EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); + EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); + EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + + envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext tls_context; + auto sds_secret_configs = + tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); + sds_secret_configs->set_name("abc.com"); + sds_secret_configs->mutable_sds_config(); + auto client_cfg = std::make_unique(tls_context, factory_context); + EXPECT_TRUE(client_cfg->tlsCertificates().empty()); + EXPECT_FALSE(client_cfg->isReady()); + + NiceMock context_manager; + ClientSslSocketFactory client_ssl_socket_factory(std::move(client_cfg), context_manager, + stats_store); + + // Add a secrets ready callback. It should not be invoked until onAddOrUpdateSecret() is called. + MockFunction mock_callback_; + EXPECT_CALL(mock_callback_, Call()).Times(0); + client_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); + + // Call onAddOrUpdateSecret, but return a null ssl_ctx. This should not invoke the callback. + EXPECT_CALL(context_manager, createSslClientContext(_, _)).WillOnce(Return(nullptr)); + client_ssl_socket_factory.onAddOrUpdateSecret(); + + EXPECT_CALL(mock_callback_, Call()); + Ssl::ClientContextSharedPtr mock_context = std::make_shared(); + EXPECT_CALL(context_manager, createSslClientContext(_, _)).WillOnce(Return(mock_context)); + client_ssl_socket_factory.onAddOrUpdateSecret(); + + // Add another callback, it should be invoked immediately. + MockFunction second_callback_; + EXPECT_CALL(second_callback_, Call()); + client_ssl_socket_factory.addReadyCb(second_callback_.AsStdFunction()); +} + +// Validate that secrets callbacks are invoked when secrets become ready. +TEST_P(SslSocketTest, ServerAddSecretsReadyCallback) { + Stats::TestUtil::TestStore stats_store; + NiceMock local_info; + testing::NiceMock factory_context; + NiceMock init_manager; + NiceMock dispatcher; + EXPECT_CALL(factory_context, localInfo()).WillOnce(ReturnRef(local_info)); + EXPECT_CALL(factory_context, stats()).WillOnce(ReturnRef(stats_store)); + EXPECT_CALL(factory_context, initManager()).WillRepeatedly(ReturnRef(init_manager)); + EXPECT_CALL(factory_context, dispatcher()).WillRepeatedly(ReturnRef(dispatcher)); + + envoy::extensions::transport_sockets::tls::v3::DownstreamTlsContext tls_context; + auto sds_secret_configs = + tls_context.mutable_common_tls_context()->mutable_tls_certificate_sds_secret_configs()->Add(); + sds_secret_configs->set_name("abc.com"); + sds_secret_configs->mutable_sds_config(); + auto server_cfg = std::make_unique(tls_context, factory_context); + EXPECT_TRUE(server_cfg->tlsCertificates().empty()); + EXPECT_FALSE(server_cfg->isReady()); + + NiceMock context_manager; + ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), context_manager, + stats_store, std::vector{}); + + // Add a secrets ready callback. It should not be invoked until onAddOrUpdateSecret() is called. + MockFunction mock_callback_; + EXPECT_CALL(mock_callback_, Call()).Times(0); + server_ssl_socket_factory.addReadyCb(mock_callback_.AsStdFunction()); + + // Call onAddOrUpdateSecret, but return a null ssl_ctx. This should not invoke the callback. + EXPECT_CALL(context_manager, createSslServerContext(_, _, _)).WillOnce(Return(nullptr)); + server_ssl_socket_factory.onAddOrUpdateSecret(); + + // Now return a ssl context which should result in the callback being invoked. + EXPECT_CALL(mock_callback_, Call()); + Ssl::ServerContextSharedPtr mock_context = std::make_shared(); + EXPECT_CALL(context_manager, createSslServerContext(_, _, _)).WillOnce(Return(mock_context)); + server_ssl_socket_factory.onAddOrUpdateSecret(); + + // Add another callback, it should be invoked immediately. + MockFunction second_callback_; + EXPECT_CALL(second_callback_, Call()); + server_ssl_socket_factory.addReadyCb(second_callback_.AsStdFunction()); +} + TEST_P(SslSocketTest, TestTransportSocketCallback) { // Make MockTransportSocketCallbacks. Network::MockIoHandle io_handle; diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index b49b217464bf..01aae9dc9f73 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -1132,6 +1132,26 @@ class AdsClusterV3Test : public AdsIntegrationTest { INSTANTIATE_TEST_SUITE_P(IpVersionsClientTypeDelta, AdsClusterV3Test, DELTA_SOTW_GRPC_CLIENT_INTEGRATION_PARAMS); +TEST_P(AdsClusterV3Test, BasicClusterInitialWarming) { + initialize(); + const auto cds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + const auto eds_type_url = Config::getTypeUrl( + envoy::config::core::v3::ApiVersion::V3); + + EXPECT_TRUE(compareDiscoveryRequest(cds_type_url, "", {}, {}, {}, true)); + sendDiscoveryResponse( + cds_type_url, {buildCluster("cluster_0")}, {buildCluster("cluster_0")}, {}, "1", false); + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 1); + EXPECT_TRUE(compareDiscoveryRequest(eds_type_url, "", {"cluster_0"}, {"cluster_0"}, {})); + sendDiscoveryResponse( + eds_type_url, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1", false); + + test_server_->waitForGaugeEq("cluster_manager.warming_clusters", 0); + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 2); +} + // Verify CDS is paused during cluster warming. TEST_P(AdsClusterV3Test, CdsPausedDuringWarming) { initialize(); diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 08182b8ce63d..4e0dcaf73c07 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -282,7 +282,7 @@ class LdsInplaceUpdateHttpIntegrationTest LdsInplaceUpdateHttpIntegrationTest() : HttpIntegrationTest(Http::CodecClient::Type::HTTP1, GetParam()) {} - void initialize() override { + void inplaceInitialize(bool add_default_filter_chain = false) { autonomous_upstream_ = true; setUpstreamCount(2); @@ -290,7 +290,8 @@ class LdsInplaceUpdateHttpIntegrationTest std::string tls_inspector_config = ConfigHelper::tlsInspectorFilter(); config_helper_.addListenerFilter(tls_inspector_config); config_helper_.addSslConfig(); - config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + config_helper_.addConfigModifier([this, add_default_filter_chain]( + envoy::config::bootstrap::v3::Bootstrap& bootstrap) { if (!use_default_balancer_) { bootstrap.mutable_static_resources() ->mutable_listeners(0) @@ -327,6 +328,13 @@ class LdsInplaceUpdateHttpIntegrationTest bootstrap.mutable_static_resources()->mutable_clusters()->Add()->MergeFrom( *bootstrap.mutable_static_resources()->mutable_clusters(0)); bootstrap.mutable_static_resources()->mutable_clusters(1)->set_name("cluster_1"); + + if (add_default_filter_chain) { + auto default_filter_chain = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_default_filter_chain(); + default_filter_chain->MergeFrom(*filter_chain_0); + } }); BaseIntegrationTest::initialize(); @@ -375,16 +383,20 @@ class LdsInplaceUpdateHttpIntegrationTest bool use_default_balancer_{false}; }; -// Verify that http response on filter chain 0 has "Connection: close" header when filter chain 0 -// is deleted during the listener update. +// Verify that http response on filter chain 1 and default filter chain have "Connection: close" +// header when these 2 filter chains are deleted during the listener update. TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { - initialize(); + inplaceInitialize(/*add_default_filter_chain=*/true); auto codec_client_1 = createHttpCodec("alpn1"); auto codec_client_0 = createHttpCodec("alpn0"); - Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get()]() { + auto codec_client_default = createHttpCodec("alpndefault"); + + Cleanup cleanup([c1 = codec_client_1.get(), c0 = codec_client_0.get(), + c_default = codec_client_default.get()]() { c1->close(); c0->close(); + c_default->close(); }); ConfigHelper new_config_helper(version_, *api_, MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); @@ -392,6 +404,7 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); listener->mutable_filter_chains()->RemoveLast(); + listener->clear_default_filter_chain(); }); new_config_helper.setLds("1"); @@ -399,45 +412,84 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigDeletingFilterChain) { test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 1); expectResponseHeaderConnectionClose(*codec_client_1, true); + expectResponseHeaderConnectionClose(*codec_client_default, true); + test_server_->waitForGaugeGe("listener_manager.total_filter_chains_draining", 0); expectResponseHeaderConnectionClose(*codec_client_0, false); expectConnenctionServed(); } // Verify that http clients of filter chain 0 survives if new listener config adds new filter -// chain 2. +// chain 2 and default filter chain. TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigAddingFilterChain) { - initialize(); + inplaceInitialize(); test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); auto codec_client_0 = createHttpCodec("alpn0"); Cleanup cleanup0([c0 = codec_client_0.get()]() { c0->close(); }); ConfigHelper new_config_helper(version_, *api_, MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); - new_config_helper.addConfigModifier( - [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); - *listener->mutable_filter_chains(2) - ->mutable_filter_chain_match() - ->mutable_application_protocols(0) = "alpn2"; - }); + new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_filter_chains()->Add()->MergeFrom(*listener->mutable_filter_chains(1)); + *listener->mutable_filter_chains(2) + ->mutable_filter_chain_match() + ->mutable_application_protocols(0) = "alpn2"; + auto default_filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); + default_filter_chain->MergeFrom(*listener->mutable_filter_chains(1)); + }); new_config_helper.setLds("1"); test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); auto codec_client_2 = createHttpCodec("alpn2"); - Cleanup cleanup2([c2 = codec_client_2.get()]() { c2->close(); }); + auto codec_client_default = createHttpCodec("alpndefault"); + + Cleanup cleanup2([c2 = codec_client_2.get(), c_default = codec_client_default.get()]() { + c2->close(); + c_default->close(); + }); expectResponseHeaderConnectionClose(*codec_client_2, false); + expectResponseHeaderConnectionClose(*codec_client_default, false); expectResponseHeaderConnectionClose(*codec_client_0, false); expectConnenctionServed(); } +// Verify that http clients of default filter chain is drained and recreated if the default filter +// chain updates. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, ReloadConfigUpdatingDefaultFilterChain) { + inplaceInitialize(true); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 1); + + auto codec_client_default = createHttpCodec("alpndefault"); + Cleanup cleanup0([c_default = codec_client_default.get()]() { c_default->close(); }); + ConfigHelper new_config_helper(version_, *api_, + MessageUtil::getJsonStringFromMessage(config_helper_.bootstrap())); + new_config_helper.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) + -> void { + auto default_filter_chain = + bootstrap.mutable_static_resources()->mutable_listeners(0)->mutable_default_filter_chain(); + default_filter_chain->set_name("default_filter_chain_v2"); + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_in_place_updated", 1); + test_server_->waitForCounterGe("listener_manager.listener_create_success", 2); + + auto codec_client_default_v2 = createHttpCodec("alpndefaultv2"); + + Cleanup cleanup2([c_default_v2 = codec_client_default_v2.get()]() { c_default_v2->close(); }); + expectResponseHeaderConnectionClose(*codec_client_default, true); + expectResponseHeaderConnectionClose(*codec_client_default_v2, false); + expectConnenctionServed(); +} + // Verify that balancer is inherited. Test only default balancer because ExactConnectionBalancer // is verified in filter chain add and delete test case. TEST_P(LdsInplaceUpdateHttpIntegrationTest, OverlappingFilterChainServesNewConnection) { use_default_balancer_ = true; - initialize(); + inplaceInitialize(); auto codec_client_0 = createHttpCodec("alpn0"); Cleanup cleanup([c0 = codec_client_0.get()]() { c0->close(); }); @@ -455,6 +507,8 @@ TEST_P(LdsInplaceUpdateHttpIntegrationTest, OverlappingFilterChainServesNewConne expectConnenctionServed(); } +// Verify default filter chain update is filter chain only update. +TEST_P(LdsInplaceUpdateHttpIntegrationTest, DefaultFilterChainUpdate) {} INSTANTIATE_TEST_SUITE_P(IpVersions, LdsInplaceUpdateHttpIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); diff --git a/test/mocks/network/transport_socket.h b/test/mocks/network/transport_socket.h index ee53570c20ac..af8949aea99b 100644 --- a/test/mocks/network/transport_socket.h +++ b/test/mocks/network/transport_socket.h @@ -38,6 +38,7 @@ class MockTransportSocketFactory : public TransportSocketFactory { MOCK_METHOD(bool, implementsSecureTransport, (), (const)); MOCK_METHOD(TransportSocketPtr, createTransportSocket, (TransportSocketOptionsSharedPtr), (const)); + MOCK_METHOD(void, addReadyCb, (std::function)); }; } // namespace Network diff --git a/test/mocks/ssl/mocks.cc b/test/mocks/ssl/mocks.cc index 50ed3f3ae6c0..14ee85239d87 100644 --- a/test/mocks/ssl/mocks.cc +++ b/test/mocks/ssl/mocks.cc @@ -15,6 +15,9 @@ MockClientContext::~MockClientContext() = default; MockClientContextConfig::MockClientContextConfig() = default; MockClientContextConfig::~MockClientContextConfig() = default; +MockServerContext::MockServerContext() = default; +MockServerContext::~MockServerContext() = default; + MockServerContextConfig::MockServerContextConfig() = default; MockServerContextConfig::~MockServerContextConfig() = default; diff --git a/test/mocks/ssl/mocks.h b/test/mocks/ssl/mocks.h index 6a5cbe8df649..beafd9de8720 100644 --- a/test/mocks/ssl/mocks.h +++ b/test/mocks/ssl/mocks.h @@ -97,6 +97,17 @@ class MockClientContextConfig : public ClientContextConfig { MOCK_METHOD(const std::string&, signingAlgorithmsForTest, (), (const)); }; +class MockServerContext : public ServerContext { +public: + MockServerContext(); + ~MockServerContext() override; + + MOCK_METHOD(size_t, daysUntilFirstCertExpires, (), (const)); + MOCK_METHOD(absl::optional, secondsUntilFirstOcspResponseExpires, (), (const)); + MOCK_METHOD(CertificateDetailsPtr, getCaCertInformation, (), (const)); + MOCK_METHOD(std::vector, getCertChainInformation, (), (const)); +}; + class MockServerContextConfig : public ServerContextConfig { public: MockServerContextConfig(); diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index 95183622dbb7..b5d857a5c184 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -192,6 +192,8 @@ class MockHost : public Host { MOCK_METHOD(uint32_t, priority, (), (const)); MOCK_METHOD(void, priority, (uint32_t)); MOCK_METHOD(bool, warmed, (), (const)); + MOCK_METHOD(void, addReadyCb, (std::function, const envoy::config::core::v3::Metadata*), + (const)); testing::NiceMock cluster_; Network::TransportSocketFactoryPtr socket_factory_; diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index c1e6de23ec48..c833d71f61a6 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -134,7 +134,7 @@ TEST_P(ValidationServerTest, NoopLifecycleNotifier) { // as-is. (Note, /dev/stdout as an access log file is invalid on Windows, no equivalent /dev/ // exists.) -auto testing_values = ::testing::Values("front-proxy_front-envoy.yaml", "google_com_proxy.v2.yaml", +auto testing_values = ::testing::Values("front-proxy_front-envoy.yaml", "google_com_proxy.yaml", #ifndef WIN32 "grpc-bridge_server_envoy-proxy.yaml", #endif diff --git a/test/server/filter_chain_benchmark_test.cc b/test/server/filter_chain_benchmark_test.cc index 5900d88ea974..8fc4e49bc620 100644 --- a/test/server/filter_chain_benchmark_test.cc +++ b/test/server/filter_chain_benchmark_test.cc @@ -132,14 +132,14 @@ const char YamlHeader[] = R"EOF( socket_address: { address: 127.0.0.1, port_value: 1234 } listener_filters: - name: "envoy.filters.listener.tls_inspector" - config: {} + typed_config: {} filter_chains: - filter_chain_match: # empty transport_socket: - name: tls + name: "envoy.transport_sockets.tls" typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" common_tls_context: tls_certificates: - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem" } @@ -152,9 +152,9 @@ const char YamlSingleServer[] = R"EOF( server_names: "server1.example.com" transport_protocol: "tls" transport_socket: - name: tls + name: "envoy.transport_sockets.tls" typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" common_tls_context: tls_certificates: - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_dns_cert.pem" } @@ -167,9 +167,9 @@ const char YamlSingleDstPortTop[] = R"EOF( destination_port: )EOF"; const char YamlSingleDstPortBottom[] = R"EOF( transport_socket: - name: tls + name: "envoy.transport_sockets.tls" typed_config: - "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext + "@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext" common_tls_context: tls_certificates: - certificate_chain: { filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem" } @@ -219,7 +219,8 @@ BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainManagerBuildTest) FilterChainManagerImpl filter_chain_manager{ std::make_shared("127.0.0.1", 1234), factory_context, init_manager_}; - filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager); + filter_chain_manager.addFilterChains(filter_chains_, nullptr, dummy_builder_, + filter_chain_manager); } } @@ -242,8 +243,10 @@ BENCHMARK_DEFINE_F(FilterChainBenchmarkFixture, FilterChainFindTest) std::make_shared("127.0.0.1", 1234), factory_context, init_manager_}; - filter_chain_manager.addFilterChain(filter_chains_, dummy_builder_, filter_chain_manager); + filter_chain_manager.addFilterChains(filter_chains_, nullptr, dummy_builder_, + filter_chain_manager); for (auto _ : state) { + UNREFERENCED_PARAMETER(_); for (int i = 0; i < state.range(0); i++) { filter_chain_manager.findFilterChain(sockets[i]); } diff --git a/test/server/filter_chain_manager_impl_test.cc b/test/server/filter_chain_manager_impl_test.cc index 4b78f2a70d88..92fdec6d8997 100644 --- a/test/server/filter_chain_manager_impl_test.cc +++ b/test/server/filter_chain_manager_impl_test.cc @@ -99,10 +99,12 @@ class FilterChainManagerImplTest : public testing::Test { return filter_chain_manager_.findFilterChain(*mock_socket); } - void addSingleFilterChainHelper(const envoy::config::listener::v3::FilterChain& filter_chain) { - filter_chain_manager_.addFilterChain( + void addSingleFilterChainHelper( + const envoy::config::listener::v3::FilterChain& filter_chain, + const envoy::config::listener::v3::FilterChain* fallback_filter_chain = nullptr) { + filter_chain_manager_.addFilterChains( std::vector{&filter_chain}, - filter_chain_factory_builder_, filter_chain_manager_); + fallback_filter_chain, filter_chain_factory_builder_, filter_chain_manager_); } // Intermediate states. @@ -128,6 +130,12 @@ class FilterChainManagerImplTest : public testing::Test { )EOF"; Init::ManagerImpl init_manager_{"for_filter_chain_manager_test"}; envoy::config::listener::v3::FilterChain filter_chain_template_; + std::shared_ptr build_out_filter_chain_{ + std::make_shared()}; + envoy::config::listener::v3::FilterChain fallback_filter_chain_; + std::shared_ptr build_out_fallback_filter_chain_{ + std::make_shared()}; + NiceMock filter_chain_factory_builder_; NiceMock parent_context_; // Test target. @@ -147,21 +155,37 @@ TEST_F(FilterChainManagerImplTest, AddSingleFilterChain) { EXPECT_NE(filter_chain, nullptr); } +TEST_F(FilterChainManagerImplTest, FilterChainUseFallbackIfNoFilterChainMatches) { + // The build helper will build matchable filter chain and then build the default filter chain. + EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)) + .WillOnce(Return(build_out_fallback_filter_chain_)); + EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)) + .WillOnce(Return(std::make_shared())) + .RetiresOnSaturation(); + addSingleFilterChainHelper(filter_chain_template_, &fallback_filter_chain_); + + auto filter_chain = findFilterChainHelper(10000, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); + EXPECT_NE(filter_chain, nullptr); + auto fallback_filter_chain = + findFilterChainHelper(9999, "127.0.0.1", "", "tls", {}, "8.8.8.8", 111); + EXPECT_EQ(fallback_filter_chain, build_out_fallback_filter_chain_.get()); +} + TEST_F(FilterChainManagerImplTest, LookupFilterChainContextByFilterChainMessage) { std::vector filter_chain_messages; for (int i = 0; i < 2; i++) { envoy::config::listener::v3::FilterChain new_filter_chain = filter_chain_template_; new_filter_chain.set_name(absl::StrCat("filter_chain_", i)); - // For sanity check + // For sanity check. new_filter_chain.mutable_filter_chain_match()->mutable_destination_port()->set_value(10000 + i); filter_chain_messages.push_back(std::move(new_filter_chain)); } EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2); - filter_chain_manager_.addFilterChain( + filter_chain_manager_.addFilterChains( std::vector{&filter_chain_messages[0], &filter_chain_messages[1]}, - filter_chain_factory_builder_, filter_chain_manager_); + nullptr, filter_chain_factory_builder_, filter_chain_manager_); } TEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) { @@ -176,9 +200,9 @@ TEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) { } EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(1); - filter_chain_manager_.addFilterChain( + filter_chain_manager_.addFilterChains( std::vector{&filter_chain_messages[0]}, - filter_chain_factory_builder_, filter_chain_manager_); + nullptr, filter_chain_factory_builder_, filter_chain_manager_); FilterChainManagerImpl new_filter_chain_manager{ std::make_shared("127.0.0.1", 1234), parent_context_, @@ -186,10 +210,10 @@ TEST_F(FilterChainManagerImplTest, DuplicateContextsAreNotBuilt) { // The new filter chain manager maintains 3 filter chains, but only 2 filter chain context is // built because it reuse the filter chain context in the previous filter chain manager EXPECT_CALL(filter_chain_factory_builder_, buildFilterChain(_, _)).Times(2); - new_filter_chain_manager.addFilterChain( + new_filter_chain_manager.addFilterChains( std::vector{ &filter_chain_messages[0], &filter_chain_messages[1], &filter_chain_messages[2]}, - filter_chain_factory_builder_, new_filter_chain_manager); + nullptr, filter_chain_factory_builder_, new_filter_chain_manager); } TEST_F(FilterChainManagerImplTest, CreatedFilterChainFactoryContextHasIndependentDrainClose) { diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 22d682666a0b..bbf8d6b03a1f 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -4531,6 +4531,38 @@ TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentNameNotEquivalent) { EXPECT_FALSE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); } +TEST(ListenerMessageUtilTest, ListenerDefaultFilterChainChangeIsAlwaysFilterChainOnlyChange) { + envoy::config::listener::v3::Listener listener1; + listener1.set_name("common"); + envoy::config::listener::v3::FilterChain default_filter_chain_1; + default_filter_chain_1.set_name("127.0.0.1"); + envoy::config::listener::v3::Listener listener2; + listener2.set_name("common"); + envoy::config::listener::v3::FilterChain default_filter_chain_2; + default_filter_chain_2.set_name("127.0.0.2"); + + { + listener1.clear_default_filter_chain(); + listener2.clear_default_filter_chain(); + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } + { + *listener1.mutable_default_filter_chain() = default_filter_chain_1; + listener2.clear_default_filter_chain(); + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } + { + listener1.clear_default_filter_chain(); + *listener2.mutable_default_filter_chain() = default_filter_chain_2; + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } + { + *listener1.mutable_default_filter_chain() = default_filter_chain_1; + *listener2.mutable_default_filter_chain() = default_filter_chain_2; + EXPECT_TRUE(Server::ListenerMessageUtil::filterChainOnlyChange(listener1, listener2)); + } +} + TEST(ListenerMessageUtilTest, ListenerMessageHaveDifferentFilterChainsAreEquivalent) { envoy::config::listener::v3::Listener listener1; listener1.set_name("common"); diff --git a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml index 07b6220e447f..e79bd34b52e7 100644 --- a/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml +++ b/test/server/test_data/server/zipkin_tracing_deprecated_config.yaml @@ -6,7 +6,7 @@ tracing: http: name: zipkin typed_config: - "@type": type.googleapis.com/envoy.config.trace.v2.ZipkinConfig + "@type": type.googleapis.com/envoy.config.trace.v3.ZipkinConfig collector_cluster: zipkin collector_endpoint: "/api/v1/spans" collector_endpoint_version: HTTP_JSON diff --git a/test/server/test_data/static_validation/network_filter_unknown_field.yaml b/test/server/test_data/static_validation/network_filter_unknown_field.yaml index 7c17f16cee52..6535893cce4b 100644 --- a/test/server/test_data/static_validation/network_filter_unknown_field.yaml +++ b/test/server/test_data/static_validation/network_filter_unknown_field.yaml @@ -9,7 +9,7 @@ static_resources: - filters: - name: http typed_config: - "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager codec_type: HTTP2 stat_prefix: blah route_config: {} diff --git a/test/test_common/utility.cc b/test/test_common/utility.cc index 81f2f79611dd..928cb440c5f2 100644 --- a/test/test_common/utility.cc +++ b/test/test_common/utility.cc @@ -28,6 +28,7 @@ #include "common/config/resource_name.h" #include "common/filesystem/directory.h" #include "common/filesystem/filesystem_impl.h" +#include "common/http/header_utility.h" #include "common/json/json_loader.h" #include "common/network/address_impl.h" #include "common/network/utility.h" @@ -65,23 +66,29 @@ uint64_t TestRandomGenerator::random() { return generator_(); } bool TestUtility::headerMapEqualIgnoreOrder(const Http::HeaderMap& lhs, const Http::HeaderMap& rhs) { - if (lhs.size() != rhs.size()) { - return false; - } - - bool equal = true; - rhs.iterate([&lhs, &equal](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { - // TODO(mattklein123): Handle multiple headers. - auto entry = lhs.get(Http::LowerCaseString(std::string(header.key().getStringView()))); - if (entry.empty() || entry.size() > 1 || - (entry[0]->value() != header.value().getStringView())) { - equal = false; + absl::flat_hash_set lhs_keys; + absl::flat_hash_set rhs_keys; + lhs.iterate([&lhs_keys](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const std::string key{header.key().getStringView()}; + lhs_keys.insert(key); + return Http::HeaderMap::Iterate::Continue; + }); + rhs.iterate([&lhs, &rhs, &rhs_keys](const Http::HeaderEntry& header) -> Http::HeaderMap::Iterate { + const std::string key{header.key().getStringView()}; + // Compare with canonicalized multi-value headers. This ensures we respect order within + // a header. + const auto lhs_entry = + Http::HeaderUtility::getAllOfHeaderAsString(lhs, Http::LowerCaseString(key)); + const auto rhs_entry = + Http::HeaderUtility::getAllOfHeaderAsString(rhs, Http::LowerCaseString(key)); + ASSERT(rhs_entry.result()); + if (lhs_entry.result() != rhs_entry.result()) { return Http::HeaderMap::Iterate::Break; } + rhs_keys.insert(key); return Http::HeaderMap::Iterate::Continue; }); - - return equal; + return lhs_keys.size() == rhs_keys.size(); } bool TestUtility::buffersEqual(const Buffer::Instance& lhs, const Buffer::Instance& rhs) { diff --git a/test/test_common/utility_test.cc b/test/test_common/utility_test.cc index 648d65cda365..71190a50317e 100644 --- a/test/test_common/utility_test.cc +++ b/test/test_common/utility_test.cc @@ -28,6 +28,24 @@ TEST(HeaderMapEqualIgnoreOrder, NotEqual) { EXPECT_FALSE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); } +TEST(HeaderMapEqualIgnoreOrder, MultiValue) { + { + Http::TestRequestHeaderMapImpl lhs{{"bar", "a"}, {"foo", "1"}, {"foo", "2"}}; + Http::TestRequestHeaderMapImpl rhs{{"foo", "1"}, {"bar", "a"}, {"foo", "2"}}; + EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); + } + { + Http::TestRequestHeaderMapImpl lhs{{"bar", "a"}, {"foo", "1"}, {"foo", "2"}}; + Http::TestRequestHeaderMapImpl rhs{{"foo", "2"}, {"bar", "a"}, {"foo", "1"}}; + EXPECT_FALSE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); + } + { + Http::TestRequestHeaderMapImpl lhs{{"bar", "a"}, {"foo", "1"}, {"foo", "2"}}; + Http::TestRequestHeaderMapImpl rhs{{"foo", "1,2"}, {"bar", "a"}}; + EXPECT_TRUE(TestUtility::headerMapEqualIgnoreOrder(lhs, rhs)); + } +} + TEST(ProtoEqIgnoreField, ActuallyEqual) { // Ignored field equal { diff --git a/tools/config_validation/validate_fragment.py b/tools/config_validation/validate_fragment.py index d272f37fb006..a14464ba7d46 100644 --- a/tools/config_validation/validate_fragment.py +++ b/tools/config_validation/validate_fragment.py @@ -3,7 +3,7 @@ # Example usage: # # bazel run //tools/config_validation:validate_fragment -- \ -# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/google_com_proxy.v2.yaml +# envoy.config.bootstrap.v3.Bootstrap $PWD/configs/google_com_proxy.yaml import json import pathlib diff --git a/tools/dependency/cve_scan.py b/tools/dependency/cve_scan.py new file mode 100755 index 000000000000..8dea63c8cc47 --- /dev/null +++ b/tools/dependency/cve_scan.py @@ -0,0 +1,289 @@ +#!/usr/bin/env python3 + +# Scan for any external dependencies that were last updated before known CVEs +# (and near relatives). We also try a fuzzy match on version information. + +from collections import defaultdict, namedtuple +import datetime as dt +import gzip +import json +import re +import sys +import textwrap +import urllib.request + +import utils as dep_utils + +# These CVEs are false positives for the match heuristics. An explanation is +# required when adding a new entry to this list as a comment. +IGNORES_CVES = set([ + # Node.js issue unrelated to http-parser (napi_ API implementation). + 'CVE-2020-8174', + # Node.js HTTP desync attack. Request smuggling due to CR and hyphen + # conflation in llhttp + # (https://github.com/nodejs/llhttp/commit/9d9da1d0f18599ceddd8f484df5a5ad694d23361). + # This was a result of using llparse's toLowerUnsafe() for header keys. + # http-parser uses a TOKEN method that doesn't have the same issue for + # header fields. + 'CVE-2020-8201', + # Node.js issue unrelated to http-parser. This is a DoS due to a lack of + # request/connection timeouts, see + # https://github.com/nodejs/node/commit/753f3b247a. + 'CVE-2020-8251', + # Node.js issue unrelated to http-parser (libuv). + 'CVE-2020-8252', + # Fixed via the nghttp2 1.41.0 bump in Envoy 8b6ea4. + 'CVE-2020-11080', +]) + +# Subset of CVE fields that are useful below. +Cve = namedtuple( + 'Cve', + ['id', 'description', 'cpes', 'score', 'severity', 'published_date', 'last_modified_date']) + + +class Cpe(namedtuple('CPE', ['part', 'vendor', 'product', 'version'])): + '''Model a subset of CPE fields that are used in CPE matching.''' + + @classmethod + def FromString(cls, cpe_str): + assert (cpe_str.startswith('cpe:2.3:')) + components = cpe_str.split(':') + assert (len(components) >= 6) + return cls(*components[2:6]) + + def __str__(self): + return f'cpe:2.3:{self.part}:{self.vendor}:{self.product}:{self.version}' + + def VendorNormalized(self): + '''Return a normalized CPE where only part and vendor are significant.''' + return Cpe(self.part, self.vendor, '*', '*') + + +def ParseCveJson(cve_json, cves, cpe_revmap): + '''Parse CVE JSON dictionary. + + Args: + cve_json: a NIST CVE JSON dictionary. + cves: dictionary mapping CVE ID string to Cve object (output). + cpe_revmap: a reverse map from vendor normalized CPE to CVE ID string. + ''' + + # This provides an over-approximation of possible CPEs affected by CVE nodes + # metadata; it traverses the entire AND-OR tree and just gathers every CPE + # observed. Generally we expect that most of Envoy's CVE-CPE matches to be + # simple, plus it's interesting to consumers of this data to understand when a + # CPE pops up, even in a conditional setting. + def GatherCpes(nodes, cpe_set): + for node in nodes: + for cpe_match in node.get('cpe_match', []): + cpe_set.add(Cpe.FromString(cpe_match['cpe23Uri'])) + GatherCpes(node.get('children', []), cpe_set) + + for cve in cve_json['CVE_Items']: + cve_id = cve['cve']['CVE_data_meta']['ID'] + description = cve['cve']['description']['description_data'][0]['value'] + cpe_set = set() + GatherCpes(cve['configurations']['nodes'], cpe_set) + if len(cpe_set) == 0: + continue + cvss_v3_score = cve['impact']['baseMetricV3']['cvssV3']['baseScore'] + cvss_v3_severity = cve['impact']['baseMetricV3']['cvssV3']['baseSeverity'] + + def ParseCveDate(date_str): + assert (date_str.endswith('Z')) + return dt.date.fromisoformat(date_str.split('T')[0]) + + published_date = ParseCveDate(cve['publishedDate']) + last_modified_date = ParseCveDate(cve['lastModifiedDate']) + cves[cve_id] = Cve(cve_id, description, cpe_set, cvss_v3_score, cvss_v3_severity, + published_date, last_modified_date) + for cpe in cpe_set: + cpe_revmap[str(cpe.VendorNormalized())].add(cve_id) + return cves, cpe_revmap + + +def DownloadCveData(urls): + '''Download NIST CVE JSON databases from given URLs and parse. + + Args: + urls: a list of URLs. + Returns: + cves: dictionary mapping CVE ID string to Cve object (output). + cpe_revmap: a reverse map from vendor normalized CPE to CVE ID string. + ''' + cves = {} + cpe_revmap = defaultdict(set) + for url in urls: + print(f'Loading NIST CVE database from {url}...') + with urllib.request.urlopen(url) as request: + with gzip.GzipFile(fileobj=request) as json_data: + ParseCveJson(json.loads(json_data.read()), cves, cpe_revmap) + return cves, cpe_revmap + + +def FormatCveDetails(cve, deps): + formatted_deps = ', '.join(sorted(deps)) + wrapped_description = '\n '.join(textwrap.wrap(cve.description)) + return f''' + CVE ID: {cve.id} + CVSS v3 score: {cve.score} + Severity: {cve.severity} + Published date: {cve.published_date} + Last modified date: {cve.last_modified_date} + Dependencies: {formatted_deps} + Description: {wrapped_description} + Affected CPEs: + ''' + '\n '.join(f'- {cpe}' for cpe in cve.cpes) + + +FUZZY_DATE_RE = re.compile('(\d{4}).?(\d{2}).?(\d{2})') +FUZZY_SEMVER_RE = re.compile('(\d+)[:\.\-_](\d+)[:\.\-_](\d+)') + + +def RegexGroupsMatch(regex, lhs, rhs): + '''Do two strings match modulo a regular expression? + + Args: + regex: regular expression + lhs: LHS string + rhs: RHS string + Returns: + A boolean indicating match. + ''' + lhs_match = regex.search(lhs) + if lhs_match: + rhs_match = regex.search(rhs) + if rhs_match and lhs_match.groups() == rhs_match.groups(): + return True + return False + + +def CpeMatch(cpe, dep_metadata): + '''Heuristically match dependency metadata against CPE. + + We have a number of rules below that should are easy to compute without having + to look at the dependency metadata. In the future, with additional access to + repository information we could do the following: + - For dependencies at a non-release version, walk back through git history to + the last known release version and attempt a match with this. + - For dependencies at a non-release version, use the commit date to look for a + version match where version is YYYY-MM-DD. + + Args: + cpe: Cpe object to match against. + dep_metadata: dependency metadata dictionary. + Returns: + A boolean indicating a match. + ''' + dep_cpe = Cpe.FromString(dep_metadata['cpe']) + dep_version = dep_metadata['version'] + # The 'part' and 'vendor' must be an exact match. + if cpe.part != dep_cpe.part: + return False + if cpe.vendor != dep_cpe.vendor: + return False + # We allow Envoy dependency CPEs to wildcard the 'product', this is useful for + # LLVM where multiple product need to be covered. + if dep_cpe.product != '*' and cpe.product != dep_cpe.product: + return False + # Wildcard versions always match. + if cpe.version == '*': + return True + # An exact version match is a hit. + if cpe.version == dep_version: + return True + # Allow the 'last_updated' dependency metadata to substitute for date. + # TODO(htuch): Make a finer grained distinction between Envoy update date and dependency + # release date in 'last_updated'. + # TODO(htuch): Consider fuzzier date ranges. + if cpe.version == dep_metadata['last_updated']: + return True + # Try a fuzzy date match to deal with versions like fips-20190304 in dependency version. + if RegexGroupsMatch(FUZZY_DATE_RE, dep_version, cpe.version): + return True + # Try a fuzzy semver match to deal with things like 2.1.0-beta3. + if RegexGroupsMatch(FUZZY_SEMVER_RE, dep_version, cpe.version): + return True + # Fall-thru. + return False + + +def CveMatch(cve, dep_metadata): + '''Heuristically match dependency metadata against CVE. + + In general, we allow false positives but want to keep the noise low, to avoid + the toil around having to populate IGNORES_CVES. + + Args: + cve: Cve object to match against. + dep_metadata: dependency metadata dictionary. + Returns: + A boolean indicating a match. + ''' + wildcard_version_match = False + # Consider each CPE attached to the CVE for a match against the dependency CPE. + for cpe in cve.cpes: + if CpeMatch(cpe, dep_metadata): + # Wildcard version matches need additional heuristics unrelated to CPE to + # qualify, e.g. last updated date. + if cpe.version == '*': + wildcard_version_match = True + else: + return True + if wildcard_version_match: + # If the CVE was published after the dependency was last updated, it's a + # potential match. + last_dep_update = dt.date.fromisoformat(dep_metadata['last_updated']) + if last_dep_update <= cve.published_date: + return True + return False + + +def CveScan(cves, cpe_revmap, cve_allowlist, repository_locations): + '''Scan for CVEs in a parsed NIST CVE database. + + Args: + cves: CVE dictionary as provided by DownloadCveData(). + cve_revmap: CPE-CVE reverse map as provided by DownloadCveData(). + cve_allowlist: an allowlist of CVE IDs to ignore. + repository_locations: a dictionary of dependency metadata in the format + described in api/bazel/external_deps.bzl. + Returns: + possible_cves: a dictionary mapping CVE IDs to Cve objects. + cve_deps: a dictionary mapping CVE IDs to dependency names. + ''' + possible_cves = {} + cve_deps = defaultdict(list) + for dep, metadata in repository_locations.items(): + cpe = metadata.get('cpe', 'N/A') + if cpe == 'N/A': + continue + candidate_cve_ids = cpe_revmap.get(str(Cpe.FromString(cpe).VendorNormalized()), []) + for cve_id in candidate_cve_ids: + cve = cves[cve_id] + if cve.id in cve_allowlist: + continue + if CveMatch(cve, metadata): + possible_cves[cve_id] = cve + cve_deps[cve_id].append(dep) + return possible_cves, cve_deps + + +if __name__ == '__main__': + # Allow local overrides for NIST CVE database URLs via args. + urls = sys.argv[1:] + if not urls: + # We only look back a few years, since we shouldn't have any ancient deps. + current_year = dt.datetime.now().year + scan_years = range(2018, current_year + 1) + urls = [ + f'https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-{year}.json.gz' for year in scan_years + ] + cves, cpe_revmap = DownloadCveData(urls) + possible_cves, cve_deps = CveScan(cves, cpe_revmap, IGNORES_CVES, dep_utils.RepositoryLocations()) + if possible_cves: + print('\nBased on heuristic matching with the NIST CVE database, Envoy may be vulnerable to:') + for cve_id in sorted(possible_cves): + print(f'{FormatCveDetails(possible_cves[cve_id], cve_deps[cve_id])}') + sys.exit(1) diff --git a/tools/dependency/cve_scan_test.py b/tools/dependency/cve_scan_test.py new file mode 100755 index 000000000000..afb89f83b829 --- /dev/null +++ b/tools/dependency/cve_scan_test.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python3 +"""Tests for cve_scan.""" + +from collections import defaultdict +import datetime as dt +import unittest + +import cve_scan + + +class CveScanTest(unittest.TestCase): + + def test_parse_cve_json(self): + cve_json = { + 'CVE_Items': [ + { + 'cve': { + 'CVE_data_meta': { + 'ID': 'CVE-2020-1234' + }, + 'description': { + 'description_data': [{ + 'value': 'foo' + }] + } + }, + 'configurations': { + 'nodes': [{ + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:bar:1.2.3' + }], + }], + }, + 'impact': { + 'baseMetricV3': { + 'cvssV3': { + 'baseScore': 3.4, + 'baseSeverity': 'LOW' + } + } + }, + 'publishedDate': '2020-03-17T00:59Z', + 'lastModifiedDate': '2020-04-17T00:59Z' + }, + { + 'cve': { + 'CVE_data_meta': { + 'ID': 'CVE-2020-1235' + }, + 'description': { + 'description_data': [{ + 'value': 'bar' + }] + } + }, + 'configurations': { + 'nodes': [{ + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:bar:1.2.3' + }], + 'children': [ + { + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:baz:3.2.3' + }] + }, + { + 'cpe_match': [{ + 'cpe23Uri': 'cpe:2.3:a:foo:*:*' + }, { + 'cpe23Uri': 'cpe:2.3:a:wat:bar:1.2.3' + }] + }, + ], + }], + }, + 'impact': { + 'baseMetricV3': { + 'cvssV3': { + 'baseScore': 9.9, + 'baseSeverity': 'HIGH' + } + } + }, + 'publishedDate': '2020-03-18T00:59Z', + 'lastModifiedDate': '2020-04-18T00:59Z' + }, + ] + } + cves = {} + cpe_revmap = defaultdict(set) + cve_scan.ParseCveJson(cve_json, cves, cpe_revmap) + self.maxDiff = None + self.assertDictEqual( + cves, { + 'CVE-2020-1234': + cve_scan.Cve(id='CVE-2020-1234', + description='foo', + cpes=set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]), + score=3.4, + severity='LOW', + published_date=dt.date(2020, 3, 17), + last_modified_date=dt.date(2020, 4, 17)), + 'CVE-2020-1235': + cve_scan.Cve(id='CVE-2020-1235', + description='bar', + cpes=set( + map(self.BuildCpe, [ + 'cpe:2.3:a:foo:bar:1.2.3', 'cpe:2.3:a:foo:baz:3.2.3', + 'cpe:2.3:a:foo:*:*', 'cpe:2.3:a:wat:bar:1.2.3' + ])), + score=9.9, + severity='HIGH', + published_date=dt.date(2020, 3, 18), + last_modified_date=dt.date(2020, 4, 18)) + }) + self.assertDictEqual(cpe_revmap, { + 'cpe:2.3:a:foo:*:*': {'CVE-2020-1234', 'CVE-2020-1235'}, + 'cpe:2.3:a:wat:*:*': {'CVE-2020-1235'} + }) + + def BuildCpe(self, cpe_str): + return cve_scan.Cpe.FromString(cpe_str) + + def BuildDep(self, cpe_str, version=None, last_updated=None): + return {'cpe': cpe_str, 'version': version, 'last_updated': last_updated} + + def CpeMatch(self, cpe_str, dep_cpe_str, version=None, last_updated=None): + return cve_scan.CpeMatch(self.BuildCpe(cpe_str), + self.BuildDep(dep_cpe_str, version=version, last_updated=last_updated)) + + def test_cpe_match(self): + # Mismatched part + self.assertFalse(self.CpeMatch('cpe:2.3:o:foo:bar:*', 'cpe:2.3:a:foo:bar:*')) + # Mismatched vendor + self.assertFalse(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foz:bar:*')) + # Mismatched product + self.assertFalse(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:baz:*')) + # Wildcard product + self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:*:*')) + # Wildcard version match + self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:*', 'cpe:2.3:a:foo:bar:*')) + # Exact version match + self.assertTrue(self.CpeMatch('cpe:2.3:a:foo:bar:1.2.3', 'cpe:2.3:a:foo:bar:*', + version='1.2.3')) + # Date version match + self.assertTrue( + self.CpeMatch('cpe:2.3:a:foo:bar:2020-03-05', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-03-05')) + fuzzy_version_matches = [ + ('2020-03-05', '2020-03-05'), + ('2020-03-05', '20200305'), + ('2020-03-05', 'foo-20200305-bar'), + ('2020-03-05', 'foo-2020_03_05-bar'), + ('2020-03-05', 'foo-2020-03-05-bar'), + ('1.2.3', '1.2.3'), + ('1.2.3', '1-2-3'), + ('1.2.3', '1_2_3'), + ('1.2.3', '1:2:3'), + ('1.2.3', 'foo-1-2-3-bar'), + ] + for cpe_version, dep_version in fuzzy_version_matches: + self.assertTrue( + self.CpeMatch(f'cpe:2.3:a:foo:bar:{cpe_version}', + 'cpe:2.3:a:foo:bar:*', + version=dep_version)) + fuzzy_version_no_matches = [ + ('2020-03-05', '2020-3.5'), + ('2020-03-05', '2020--03-05'), + ('1.2.3', '1@2@3'), + ('1.2.3', '1..2.3'), + ] + for cpe_version, dep_version in fuzzy_version_no_matches: + self.assertFalse( + self.CpeMatch(f'cpe:2.3:a:foo:bar:{cpe_version}', + 'cpe:2.3:a:foo:bar:*', + version=dep_version)) + + def BuildCve(self, cve_id, cpes, published_date): + return cve_scan.Cve(cve_id, + description=None, + cpes=cpes, + score=None, + severity=None, + published_date=dt.date.fromisoformat(published_date), + last_modified_date=None) + + def CveMatch(self, cve_id, cpes, published_date, dep_cpe_str, version=None, last_updated=None): + return cve_scan.CveMatch(self.BuildCve(cve_id, cpes=cpes, published_date=published_date), + self.BuildDep(dep_cpe_str, version=version, last_updated=last_updated)) + + def test_cve_match(self): + # Empty CPEs, no match + self.assertFalse(self.CveMatch('CVE-2020-123', set(), '2020-05-03', 'cpe:2.3:a:foo:bar:*')) + # Wildcard version, stale dependency match + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-05-02')) + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-05-03')) + # Wildcard version, recently updated + self.assertFalse( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:*')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + last_updated='2020-05-04')) + # Version match + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + version='1.2.3')) + # Version mismatch + self.assertFalse( + self.CveMatch('CVE-2020-123', + set([self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3')]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + version='1.2.4', + last_updated='2020-05-02')) + # Multiple CPEs, match first, don't match later. + self.assertTrue( + self.CveMatch('CVE-2020-123', + set([ + self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'), + self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1') + ]), + '2020-05-03', + 'cpe:2.3:a:foo:bar:*', + version='1.2.3')) + + def test_cve_scan(self): + cves = { + 'CVE-2020-1234': + self.BuildCve( + 'CVE-2020-1234', + set([ + self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'), + self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1') + ]), '2020-05-03'), + 'CVE-2020-1235': + self.BuildCve( + 'CVE-2020-1235', + set([ + self.BuildCpe('cpe:2.3:a:foo:bar:1.2.3'), + self.BuildCpe('cpe:2.3:a:foo:baz:3.2.1') + ]), '2020-05-03'), + 'CVE-2020-1236': + self.BuildCve('CVE-2020-1236', set([ + self.BuildCpe('cpe:2.3:a:foo:wat:1.2.3'), + ]), '2020-05-03'), + } + cpe_revmap = { + 'cpe:2.3:a:foo:*:*': ['CVE-2020-1234', 'CVE-2020-1235', 'CVE-2020-1236'], + } + cve_allowlist = ['CVE-2020-1235'] + repository_locations = { + 'bar': self.BuildDep('cpe:2.3:a:foo:bar:*', version='1.2.3'), + 'baz': self.BuildDep('cpe:2.3:a:foo:baz:*', version='3.2.1'), + 'foo': self.BuildDep('cpe:2.3:a:foo:*:*', version='1.2.3'), + 'blah': self.BuildDep('N/A'), + } + possible_cves, cve_deps = cve_scan.CveScan(cves, cpe_revmap, cve_allowlist, + repository_locations) + self.assertListEqual(sorted(possible_cves.keys()), ['CVE-2020-1234', 'CVE-2020-1236']) + self.assertDictEqual(cve_deps, { + 'CVE-2020-1234': ['bar', 'baz', 'foo'], + 'CVE-2020-1236': ['foo'] + }) + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/dependency/utils.py b/tools/dependency/utils.py new file mode 100644 index 000000000000..df1930d5cd32 --- /dev/null +++ b/tools/dependency/utils.py @@ -0,0 +1,28 @@ +# Utilities for reasoning about dependencies. + +from importlib.util import spec_from_loader, module_from_spec +from importlib.machinery import SourceFileLoader + + +# Shared Starlark/Python files must have a .bzl suffix for Starlark import, so +# we are forced to do this workaround. +def LoadModule(name, path): + spec = spec_from_loader(name, SourceFileLoader(name, path)) + module = module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +envoy_repository_locations = LoadModule('envoy_repository_locations', + 'bazel/repository_locations.bzl') +api_repository_locations = LoadModule('api_repository_locations', + 'api/bazel/repository_locations.bzl') +repository_locations_utils = LoadModule('repository_locations_utils', + 'api/bazel/repository_locations_utils.bzl') + + +def RepositoryLocations(): + spec_loader = repository_locations_utils.load_repository_locations_spec + locations = spec_loader(envoy_repository_locations.REPOSITORY_LOCATIONS_SPEC) + locations.update(spec_loader(api_repository_locations.REPOSITORY_LOCATIONS_SPEC)) + return locations diff --git a/tools/dependency/validate.py b/tools/dependency/validate.py index 1e6dc88fd343..92178b450074 100755 --- a/tools/dependency/validate.py +++ b/tools/dependency/validate.py @@ -12,36 +12,40 @@ from importlib.machinery import SourceFileLoader from importlib.util import spec_from_loader, module_from_spec -# bazel/repository_locations.bzl must have a .bzl suffix for Starlark import, so + +# Shared Starlark/Python files must have a .bzl suffix for Starlark import, so # we are forced to do this workaround. -_repository_locations_spec = spec_from_loader( - 'repository_locations', - SourceFileLoader('repository_locations', 'bazel/repository_locations.bzl')) -repository_locations = module_from_spec(_repository_locations_spec) -_repository_locations_spec.loader.exec_module(repository_locations) - -# source/extensions/extensions_build_config.bzl must have a .bzl suffix for Starlark -# import, so we are forced to do this workaround. -_extensions_build_config_spec = spec_from_loader( - 'extensions_build_config', - SourceFileLoader('extensions_build_config', 'source/extensions/extensions_build_config.bzl')) -extensions_build_config = module_from_spec(_extensions_build_config_spec) -_extensions_build_config_spec.loader.exec_module(extensions_build_config) +def LoadModule(name, path): + spec = spec_from_loader(name, SourceFileLoader(name, path)) + module = module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +envoy_repository_locations = LoadModule('envoy_repository_locations', + 'bazel/repository_locations.bzl') +api_repository_locations = LoadModule('api_repository_locations', + 'api/bazel/repository_locations.bzl') +extensions_build_config = LoadModule('extensions_build_config', + 'source/extensions/extensions_build_config.bzl') + +REPOSITORY_LOCATIONS_SPEC = dict(envoy_repository_locations.REPOSITORY_LOCATIONS_SPEC) +REPOSITORY_LOCATIONS_SPEC.update(api_repository_locations.REPOSITORY_LOCATIONS_SPEC) BAZEL_QUERY_EXTERNAL_DEP_RE = re.compile('@(\w+)//') EXTENSION_LABEL_RE = re.compile('(//source/extensions/.*):') -# TODO(htuch): Add API dependencies to metadata, shrink this set. -UNKNOWN_DEPS = [ - 'org_golang_x_tools', 'com_github_cncf_udpa', 'org_golang_google_protobuf', - 'io_bazel_rules_nogo', 'com_envoyproxy_protoc_gen_validate', 'opencensus_proto', - 'io_bazel_rules_go', 'foreign_cc_platform_utils', 'com_github_golang_protobuf', - 'com_google_googleapis' -] +# We can safely ignore these as they are from Bazel or internal repository structure. IGNORE_DEPS = set([ - 'envoy', 'envoy_api', 'envoy_api_canonical', 'platforms', 'bazel_tools', 'local_config_cc', - 'remote_coverage_tools' -] + UNKNOWN_DEPS) + 'envoy', + 'envoy_api', + 'envoy_api_canonical', + 'platforms', + 'bazel_tools', + 'local_config_cc', + 'remote_coverage_tools', + 'foreign_cc_platform_utils', +]) # Should a dependency be ignored if it's only used in test? Any changes to this @@ -78,7 +82,7 @@ def DepsByUseCategory(self, use_category): Returns: Set of dependency identifiers that match use_category. """ - return set(name for name, metadata in repository_locations.REPOSITORY_LOCATIONS_SPEC.items() + return set(name for name, metadata in REPOSITORY_LOCATIONS_SPEC.items() if use_category in metadata['use_category']) def GetMetadata(self, dependency): @@ -91,12 +95,22 @@ def GetMetadata(self, dependency): A dictionary with the repository metadata as defined in bazel/repository_locations.bzl. """ - return repository_locations.REPOSITORY_LOCATIONS_SPEC.get(dependency) + return REPOSITORY_LOCATIONS_SPEC.get(dependency) class BuildGraph(object): """Models the Bazel build graph.""" + def __init__(self, ignore_deps=IGNORE_DEPS, repository_locations_spec=REPOSITORY_LOCATIONS_SPEC): + self._ignore_deps = ignore_deps + # Reverse map from untracked dependencies implied by other deps back to the dep. + self._implied_untracked_deps_revmap = {} + for dep, metadata in repository_locations_spec.items(): + implied_untracked_deps = metadata.get('implied_untracked_deps', []) + for untracked_dep in implied_untracked_deps: + assert (untracked_dep not in self._implied_untracked_deps_revmap) + self._implied_untracked_deps_revmap[untracked_dep] = dep + def QueryExternalDeps(self, *targets): """Query the build graph for transitive external dependencies. @@ -110,12 +124,18 @@ def QueryExternalDeps(self, *targets): deps = subprocess.check_output(['bazel', 'query', deps_query], stderr=subprocess.PIPE).decode().splitlines() ext_deps = set() + implied_untracked_deps = set() for d in deps: match = BAZEL_QUERY_EXTERNAL_DEP_RE.match(d) if match: ext_dep = match.group(1) - if ext_dep not in IGNORE_DEPS: - ext_deps.add(ext_dep) + if ext_dep in self._ignore_deps: + continue + # If the dependency is untracked, add the source dependency that loaded + # it transitively. + if ext_dep in self._implied_untracked_deps_revmap: + ext_dep = self._implied_untracked_deps_revmap[ext_dep] + ext_deps.add(ext_dep) return set(ext_deps) def ListExtensions(self): @@ -192,13 +212,15 @@ def ValidateDataPlaneCoreDeps(self): '//source/common/crypto/...', '//source/common/conn_pool/...', '//source/common/formatter/...', '//source/common/http/...', '//source/common/ssl/...', '//source/common/tcp/...', '//source/common/tcp_proxy/...', '//source/common/network/...') - expected_dataplane_core_deps = self._dep_info.DepsByUseCategory('dataplane_core') + # It's hard to disentangle API and dataplane today. + expected_dataplane_core_deps = self._dep_info.DepsByUseCategory('dataplane_core').union( + self._dep_info.DepsByUseCategory('api')) bad_dataplane_core_deps = queried_dataplane_core_min_deps.difference( expected_dataplane_core_deps) if len(bad_dataplane_core_deps) > 0: raise DependencyError( f'Observed dataplane core deps {queried_dataplane_core_min_deps} is not covered by ' - '"use_category" implied core deps {expected_dataplane_core_deps}: {bad_dataplane_core_deps} ' + f'"use_category" implied core deps {expected_dataplane_core_deps}: {bad_dataplane_core_deps} ' 'are missing') def ValidateControlPlaneDeps(self): @@ -217,7 +239,9 @@ def ValidateControlPlaneDeps(self): # these paths. queried_controlplane_core_min_deps = self._build_graph.QueryExternalDeps( '//source/common/config/...') - expected_controlplane_core_deps = self._dep_info.DepsByUseCategory('controlplane') + # Controlplane will always depend on API. + expected_controlplane_core_deps = self._dep_info.DepsByUseCategory('controlplane').union( + self._dep_info.DepsByUseCategory('api')) bad_controlplane_core_deps = queried_controlplane_core_min_deps.difference( expected_controlplane_core_deps) if len(bad_controlplane_core_deps) > 0: @@ -241,19 +265,15 @@ def ValidateExtensionDeps(self, name, target): marginal_deps = queried_deps.difference(self._queried_core_deps) expected_deps = [] for d in marginal_deps: - # TODO(htuch): Ensure that queried deps are fully contained in - # repository_locations, i.e. that we're tracking with metadata all actual - # dependencies. Today, we are missing API and pip3 deps based on manual - # inspection. metadata = self._dep_info.GetMetadata(d) if metadata: use_category = metadata['use_category'] valid_use_category = any( - c in use_category for c in ['dataplane_ext', 'observability_ext', 'other']) + c in use_category for c in ['dataplane_ext', 'observability_ext', 'other', 'api']) if not valid_use_category: raise DependencyError( f'Extensions {name} depends on {d} with "use_category" not including ' - '["dataplane_ext", "observability_ext", "other"]') + '["dataplane_ext", "observability_ext", "api", "other"]') if 'extensions' in metadata: allowed_extensions = metadata['extensions'] if name not in allowed_extensions: diff --git a/tools/envoy_collect/envoy_collect.py b/tools/envoy_collect/envoy_collect.py index 60aa85e01525..ef69a6756e92 100755 --- a/tools/envoy_collect/envoy_collect.py +++ b/tools/envoy_collect/envoy_collect.py @@ -4,7 +4,7 @@ Example use: ./tools/envoy_collect.py --output-path=./envoy.tar -c - ./configs/google_com_proxy.v2.yaml --service-node foo + ./configs/google_com_proxy.yaml --service-node foo tar -tvf ./envoy.tar -rw------- htuch/eng 0 2017-08-13 21:13 access_0.log