diff --git a/CHANGELOG.md b/CHANGELOG.md index 69098b0df3fc..2b5095f4d21a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -153,6 +153,7 @@ - Fix an issue where balancer passive healthcheck would use wrong status code when kong changes status code from upstream in `header_filter` phase. [#10325](https://github.com/Kong/kong/pull/10325) + [#10592](https://github.com/Kong/kong/pull/10592) - Fix an issue where schema validations failing in a nested record did not propagate the error correctly. [#10449](https://github.com/Kong/kong/pull/10449) - Fixed an issue where dangling Unix sockets would prevent Kong from restarting in diff --git a/kong/runloop/handler.lua b/kong/runloop/handler.lua index f7a8585d9e75..4bf5f426452d 100644 --- a/kong/runloop/handler.lua +++ b/kong/runloop/handler.lua @@ -1512,7 +1512,7 @@ return { -- https://nginx.org/en/docs/http/ngx_http_upstream_module.html#variables -- because of the way of Nginx do the upstream_status variable, it may be -- a string or a number, so we need to parse it to get the status - local status = tonumber(sub(var.upstream_status or "", -3)) or ngx.status + local status = tonumber(ctx.buffered_status) or tonumber(sub(var.upstream_status or "", -3)) or ngx.status if status == 504 then balancer_data.balancer.report_timeout(balancer_data.balancer_handle) else diff --git a/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua b/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua index 45c1075ed145..bfbf6f4776e7 100644 --- a/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua +++ b/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua @@ -1256,6 +1256,77 @@ for _, strategy in helpers.each_strategy() do end end) + it("perform passive health checks in downstream status code was changed with subrequest", function() + + for nfails = 1, 3 do + + bu.begin_testcase_setup(strategy, bp) + -- configure healthchecks + local upstream_name, upstream_id = bu.add_upstream(bp, { + healthchecks = bu.healthchecks_config { + passive = { + unhealthy = { + http_failures = nfails, + } + } + } + }) + local port1 = bu.add_target(bp, upstream_id, localhost) + local port2 = bu.add_target(bp, upstream_id, localhost) + local api_host, service_id = bu.add_api(bp, upstream_name) + bp.plugins:insert({ + name = "pre-function", + service = { id = service_id }, + config = { + access = { + [[ + kong.service.request.enable_buffering() + ]], + }, + header_filter ={ + [[ + ngx.exit(200) + ]], + }, + } + }) + + bu.end_testcase_setup(strategy, bp) + + local requests = bu.SLOTS * 2 -- go round the balancer twice + + -- setup target servers: + -- server2 will only respond for part of the test, + -- then server1 will take over. + local server2_oks = math.floor(requests / 4) + local server1 = https_server.new(port1, localhost) + local server2 = https_server.new(port2, localhost) + server1:start() + server2:start() + + -- Go hit them with our test requests + local client_oks1, client_fails1 = bu.client_requests(bu.SLOTS, api_host) + assert(bu.direct_request(localhost, port2, "/unhealthy")) + local client_oks2, client_fails2 = bu.client_requests(bu.SLOTS, api_host) + + local client_oks = client_oks1 + client_oks2 + local client_fails = client_fails1 + client_fails2 + + -- collect server results; hitcount + local count1 = server1:shutdown() + local count2 = server2:shutdown() + + -- verify + assert.are.equal(requests - server2_oks - nfails, count1.ok) + assert.are.equal(server2_oks, count2.ok) + assert.are.equal(0, count1.fail) + assert.are.equal(nfails, count2.fail) + + assert.are.equal(client_oks, requests) + assert.are.equal(0, client_fails) + end + end) + it("threshold for health checks", function() local fixtures = { dns_mock = helpers.dns_mock.new()