Skip to content
This repository has been archived by the owner on Apr 27, 2021. It is now read-only.

enable setting upstreams dynamically via HTTP endpoint #16

Closed
wants to merge 34 commits into from
Closed
Show file tree
Hide file tree
Changes from 18 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
ecbc5b6
enable setting upstreams dynamically via HTTP endpoint
ElvinEfendi Mar 2, 2018
d3f4c08
added lua-resty-core as vendor
ElvinEfendi Mar 2, 2018
95891af
include lrucache
ElvinEfendi Mar 2, 2018
5f6106e
use luajit instead of standard lua
ElvinEfendi Mar 2, 2018
b1b7257
implemented a dumb balancer
ElvinEfendi Mar 2, 2018
66927ee
first working version!
ElvinEfendi Mar 2, 2018
0875dc8
move upstream_balancer out of loop
ElvinEfendi Mar 2, 2018
1962c22
skip reload and post to nginx on endpoints changes
ElvinEfendi Mar 3, 2018
81afc46
make the endpoint processing in lua side
ElvinEfendi Mar 3, 2018
760ddfb
sync with upstream
ElvinEfendi Mar 3, 2018
c8c6a89
no that resty core is part of nginx build we dont need to explicitly …
ElvinEfendi Mar 3, 2018
50b58eb
sync upstream
ElvinEfendi Mar 3, 2018
502e4a4
fix lua stuff in nginx image
ElvinEfendi Mar 3, 2018
ded0aea
adjust lua package paths
ElvinEfendi Mar 3, 2018
7bdc6ae
implemented general Nginx configuration handler
ElvinEfendi Mar 4, 2018
fac1d9a
adjust placeholder balancer
ElvinEfendi Mar 4, 2018
8ae8877
install lua-resty-balancer
ElvinEfendi Mar 4, 2018
e31b12e
proof of concept for per backend custom load balancing
ElvinEfendi Mar 4, 2018
5fe302e
dont use third party
ElvinEfendi Mar 4, 2018
1e86ac2
refactor controller changes and make it clearer
ElvinEfendi Mar 5, 2018
250680f
simplify
ElvinEfendi Mar 5, 2018
922de0e
make sure first config generation dynamically applied as well on top …
ElvinEfendi Mar 5, 2018
90cde46
add enable-dynamic-configuration cmd flag
ElvinEfendi Mar 5, 2018
93861d8
dont spam with periodic info messages
ElvinEfendi Mar 5, 2018
cf13265
use lrucache
ElvinEfendi Mar 5, 2018
ad17d78
use resty lock to avoid potential race condition
ElvinEfendi Mar 5, 2018
9206972
include lua-resty-lock in nginx image
ElvinEfendi Mar 5, 2018
6e9cfd0
do not assume same size of running and new backends
ElvinEfendi Mar 5, 2018
e347013
remove redundant method and add attribution
ElvinEfendi Mar 7, 2018
cd4ad96
add test case for new buildProxyPass logic
ElvinEfendi Mar 7, 2018
577e6b4
test IsDynamicallyConfigurable
ElvinEfendi Mar 7, 2018
aacd870
ran make code-generator
ElvinEfendi Mar 7, 2018
cba2f46
fix linting issues
ElvinEfendi Mar 7, 2018
8b550d0
update the nginx image
ElvinEfendi Mar 7, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions images/nginx/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ export OPENTRACING_CPP_VERSION=1.2.0
export ZIPKIN_CPP_VERSION=0.2.0
export JAEGER_VERSION=0.2.0
export MODSECURITY_VERSION=1.0.0
export LUA_VERSION=0.10.12rc2
export LUA_NGX_VERSION=0.10.12rc2
export LUA_UPSTREAM_VERSION=0.07
export COOKIE_FLAG_VERSION=1.1.0

Expand Down Expand Up @@ -155,7 +155,7 @@ get_src 9915ad1cf0734cc5b357b0d9ea92fec94764b4bf22f4dce185cbd65feda30ec1 \
"https://github.com/AirisX/nginx_cookie_flag_module/archive/v$COOKIE_FLAG_VERSION.tar.gz"

get_src 18edf2d18fa331265c36516a4a19ba75d26f46eafcc5e0c2d9aa6c237e8bc110 \
"https://github.com/openresty/lua-nginx-module/archive/v$LUA_VERSION.tar.gz"
"https://github.com/openresty/lua-nginx-module/archive/v$LUA_NGX_VERSION.tar.gz"

get_src 2a69815e4ae01aa8b170941a8e1a10b6f6a9aab699dee485d58f021dd933829a \
"https://github.com/openresty/lua-upstream-nginx-module/archive/v$LUA_UPSTREAM_VERSION.tar.gz"
Expand All @@ -166,6 +166,9 @@ get_src d4a9ed0d2405f41eb0178462b398afde8599c5115dcc1ff8f60e2f34a41a4c21 \
get_src 92fd006d5ca3b3266847d33410eb280122a7f6c06334715f87acce064188a02e \
"https://github.com/openresty/lua-resty-core/archive/v0.1.14rc1.tar.gz"

get_src a77bf0d7cf6a9ba017d0dc973b1a58f13e48242dd3849c5e99c07d250667c44c \
"https://github.com/openresty/lua-resty-balancer/archive/v0.02rc4.tar.gz"

get_src 1ad2e34b111c802f9d0cdf019e986909123237a28c746b21295b63c9e785d9c3 \
"http://luajit.org/download/LuaJIT-2.1.0-beta3.tar.gz"

Expand Down Expand Up @@ -197,6 +200,10 @@ make install
cd "$BUILD_PATH/lua-resty-lrucache-0.07"
make install

cd "$BUILD_PATH/lua-resty-balancer-0.02rc4"
make
make install

# build opentracing lib
cd "$BUILD_PATH/opentracing-cpp-$OPENTRACING_CPP_VERSION"
mkdir .build
Expand Down Expand Up @@ -335,7 +342,7 @@ WITH_MODULES="--add-module=$BUILD_PATH/ngx_devel_kit-$NDK_VERSION \
--add-module=$BUILD_PATH/nginx-goodies-nginx-sticky-module-ng-$STICKY_SESSIONS_VERSION \
--add-module=$BUILD_PATH/nginx-http-auth-digest-$NGINX_DIGEST_AUTH \
--add-module=$BUILD_PATH/ngx_http_substitutions_filter_module-$NGINX_SUBSTITUTIONS \
--add-module=$BUILD_PATH/lua-nginx-module-$LUA_VERSION \
--add-module=$BUILD_PATH/lua-nginx-module-$LUA_NGX_VERSION \
--add-module=$BUILD_PATH/lua-upstream-nginx-module-$LUA_UPSTREAM_VERSION \
--add-module=$BUILD_PATH/nginx_cookie_flag_module-$COOKIE_FLAG_VERSION \
--add-dynamic-module=$BUILD_PATH/nginx-opentracing-$NGINX_OPENTRACING_VERSION/opentracing \
Expand Down
73 changes: 72 additions & 1 deletion internal/ingress/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,12 @@ limitations under the License.
package controller

import (
"bytes"
"encoding/json"
"fmt"
"math/rand"
"net"
"net/http"
"reflect"
"sort"
"strconv"
Expand Down Expand Up @@ -167,11 +170,79 @@ func (n *NGINXController) syncIngress(item interface{}) error {
if !n.isForceReload() && n.runningConfig.Equal(&pcfg) {
glog.V(3).Infof("skipping backend reload (no changes detected)")
return nil
} else if !n.isForceReload() && len(n.runningConfig.Backends) == len(pcfg.Backends) {
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@zrdaley this is how I ended up implementing it. The idea is to detect only Endpoints changes in case of any other change we fall back to reloading.

// check whether the configuration change is only about endpoints

newBackends := make([]*ingress.Backend, 0, len(pcfg.Backends))
runningBackends := make([]*ingress.Backend, 0, len(n.runningConfig.Backends))
for i := 0; i < len(n.runningConfig.Backends); i++ {
newBackends = append(newBackends, pcfg.Backends[i].DeepCopy())
runningBackends = append(runningBackends, n.runningConfig.Backends[i].DeepCopy())
}

for i := 0; i < len(n.runningConfig.Backends); i++ {
pcfg.Backends[i].Endpoints = []ingress.Endpoint{}
n.runningConfig.Backends[i].Endpoints = []ingress.Endpoint{}
}
if n.runningConfig.Equal(&pcfg) {
// so configuration change is only about endpoints and force reload is not set
// we can skip reload and dynamically update endpoints
glog.Infof("only endpoints have changed, skipping reload and posting them to Nginx via HTTP request")

// reset backends
pcfg.Backends = newBackends
n.runningConfig.Backends = runningBackends

for _, backend := range pcfg.Backends {
found := false
for _, runningBackend := range n.runningConfig.Backends {
if backend.Equal(runningBackend) {
found = true
break
}
}
if !found {
// endpoints of this backend have been updated, needs to be posted to Nginx
buf, err := json.Marshal(backend)
if err != nil {
glog.Errorf("unexpected error when json encoding endpoints: \n%v", err)
// TODO(elvinefendi) implement retry logic
continue
}

// TODO(elvinefendi) set port dynamically
resp, err := http.Post("http://localhost:18080/configuration/backends/"+backend.Name, "application/json", bytes.NewReader(buf))
if err != nil {
glog.Errorf("unexpected error when POSTing HTTP request: \n%v", err)
// TODO(elvinefendi) implement retry logic
continue
}

defer func() {
if err := resp.Body.Close(); err != nil {
glog.Warningf("error while closing response body: \n%v", err)
}
}()

if resp.StatusCode != http.StatusCreated {
glog.Errorf("Unexpected error code: %v", resp.StatusCode)
} else {
glog.Infof("endpoints for " + backend.Name + " have been updated")
}
} else {
glog.Infof(backend.Name + " is same as running config. number of endpoints: " + string(len(backend.Endpoints)))
}
}

n.OnUpdate(pcfg, true)
n.runningConfig = &pcfg
return nil
}
}

glog.Infof("backend reload required")

err := n.OnUpdate(pcfg)
err := n.OnUpdate(pcfg, false)
if err != nil {
incReloadErrorCount()
glog.Errorf("unexpected failure restarting the backend: \n%v", err)
Expand Down
10 changes: 6 additions & 4 deletions internal/ingress/controller/nginx.go
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ Error: %v
//
// returning nill implies the backend will be reloaded.
// if an error is returned means requeue the update
func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration, skipReload bool) error {
cfg := n.store.GetBackendConfiguration()
cfg.Resolver = n.resolver

Expand Down Expand Up @@ -664,9 +664,11 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return err
}

o, err := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput()
if err != nil {
return fmt.Errorf("%v\n%v", err, string(o))
if !skipReload {
o, err := exec.Command(n.binary, "-s", "reload", "-c", cfgPath).CombinedOutput()
if err != nil {
return fmt.Errorf("%v\n%v", err, string(o))
}
}

return nil
Expand Down
3 changes: 2 additions & 1 deletion internal/ingress/controller/template/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,8 @@ func buildProxyPass(host string, b interface{}, loc interface{}) string {
}

// defProxyPass returns the default proxy_pass, just the name of the upstream
defProxyPass := fmt.Sprintf("proxy_pass %s://%s;", proto, upstreamName)
//defProxyPass := fmt.Sprintf("proxy_pass %s://%s;", proto, upstreamName)
defProxyPass := fmt.Sprintf("proxy_pass %s://upstream_balancer;", proto)
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this should be done only if the dynamic reconfiguration is enabled for the vhost/ingress

// if the path in the ingress rule is equals to the target: no special rewrite
if path == location.Rewrite.Target {
return defProxyPass
Expand Down
48 changes: 48 additions & 0 deletions rootfs/etc/nginx/lua/balancer.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
local ngx_balancer = require("ngx.balancer")
local ngx_upstream = require("ngx.upstream")
local math = require("math")
local json = require("cjson")
local resty_chash = require("resty.chash")
local resty_roundrobin = require("resty.roundrobin")
local configuration = require("configuration")
local util = require("util")

local DEFAULT_ALGORITHM = "round_robin"

local _M = {}

local function balance_least_conn(endpoints)
local servers, nodes = {}, {}
local str_null = string.char(0)

for _, endpoint in ipairs(endpoints) do
local id = endpoint.address .. str_null .. endpoint.port
servers[id] = endpoint
nodes[id] = 1
end

-- TODO(elvinefendi) move this out of hot path and do it in process_backends_data function instead
local chash = resty_chash:new(nodes)

local id = chash:find()
local endpoint = servers[id]
return endpoint.address, endpoint.port
end

function _M.call()
ngx_balancer.set_more_tries(1)

local lb = configuration.get_lb(ngx.var.proxy_upstream_name)
local host_port_string = lb:find()
ngx.log(ngx.INFO, "selected host_port_string: " .. host_port_string)
local host, port = util.split_pair(host_port_string, ":")

local ok, err = ngx_balancer.set_current_peer(host, port)
if ok then
ngx.log(ngx.INFO, "current peer is set to " .. host_port_string)
else
ngx.log(ngx.ERR, "error while setting current upstream peer to: " .. tostring(err))
end
end

return _M
126 changes: 126 additions & 0 deletions rootfs/etc/nginx/lua/configuration.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
local router = require("router")
local json = require("cjson")
local util = require("util")

-- key's are backend names
-- value's are respective load balancing algorithm name to use for the backend
local backend_lb_algorithms = ngx.shared.backend_lb_algorithms

-- key's are always going to be ngx.var.proxy_upstream_name, a uniqueue identifier of an app's Backend object
-- currently it is built our of namepsace, service name and service port
-- value is JSON encoded ingress.Backend object.Backend object, for more info refer to internal//ingress/types.go
local backends_data = ngx.shared.backends_data

-- TODO(elvinefendi) this is for future iteration when/if we decide for example to dynamically configure certificates
-- similar to backends_data
-- local servers_data = ngx.shared.servers_data

-- measured in seconds
-- for an Nginx worker to pick up the new list of upstream peers
-- it will take <the delay until controller POSTed the backend object to the Nginx endpoint> + BACKEND_PROCESSING_DELAY
local BACKEND_PROCESSING_DELAY = 1

local _M = {}

local lbs = {}
local backends = {}

function _M.get_lb(backend_name)
return lbs[backend_name]
end

local resty_roundrobin = require("resty.roundrobin")

-- TODO(elvinefendi) make this consider lb_alg instead of always using round robin
local function update_backend(backend)
ngx.log(ngx.INFO, "updating backend: " .. backend.name)

local servers, nodes = {}, {}

for _, endpoint in ipairs(backend.endpoints) do
id = endpoint.address .. ":" .. endpoint.port
servers[id] = endpoint
nodes[id] = 1
end

local rr = lbs[backend.name]
if rr then
rr:reinit(nodes)
else
rr = resty_roundrobin:new(nodes)
end

lbs[backend.name] = rr
backends[backend.name] = backend
end

-- this function will be periodically called in every worker to decode backends and store them in local backends variable
local function process_backends_data()
-- 0 here means get all the keys which can be slow if there are many keys
-- TODO(elvinefendi) think about storing comma separated backend names in another dictionary and using that to
-- fetch the list of them here insted of blocking the access to shared dictionary
backend_names = backends_data:get_keys(0)

for _, backend_name in pairs(backend_names) do
backend_data = backends_data:get(backend_name)

local ok, backend = pcall(json.decode, backend_data)

if ok then
if not util.deep_compare(backends[backend_name], backend, true) then
update_backend(backend)
end
else
ngx.log(ngx.ERROR, "could not parse backend_json: " .. tostring(backend))
end
end
end

function _M.init_worker()
_, err = ngx.timer.every(BACKEND_PROCESSING_DELAY, process_backends_data)
if err then
ngx.log(ngx.ERROR, "error when setting up timer.every for process_backends_data: " .. tostring(err))
end
end

function _M.call()
local r = router.new()

r:match({
POST = {
["/configuration/backends/:name"] = function(params)
ngx.req.read_body() -- explicitly read the req body

local success, err = backends_data:set(params.name, ngx.req.get_body_data())
if not success then
return err
end

-- TODO(elvinefendi) also check if it is a supported algorith
if params.lb_alg ~=nil and params.lb_alg ~= "" then
success, err = backend_lb_algorithms:set(params.name, params.lb_alg)
if not success then
return err
end
end

ngx.status = 201
ngx.log(ngx.INFO, "backend data was updated for " .. params.name .. ": " .. tostring(ngx.req.get_body_data()))
end
}
})

local ok, errmsg = r:execute(ngx.var.request_method, ngx.var.request_uri, ngx.req.get_uri_args())
if ok then
if errmsg then
ngx.status = 400
ngx.print(tostring(errmsg))
end
else
ngx.log(ngx.ERROR, tostring(errmsg))
ngx.status = 404
ngx.print("Not found!")
end
end

return _M
35 changes: 35 additions & 0 deletions rootfs/etc/nginx/lua/util.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
local _M = {}

function _M.split_pair(pair, seperator)
local i = pair:find(seperator)
if i == nil then
return pair, nil
else
local name = pair:sub(1, i - 1)
local value = pair:sub(i + 1, -1)
return name, value
end
end

local function deep_compare(t1, t2, ignore_mt)
local ty1 = type(t1)
local ty2 = type(t2)
if ty1 ~= ty2 then return false end
-- non-table types can be directly compared
if ty1 ~= 'table' and ty2 ~= 'table' then return t1 == t2 end
-- as well as tables which have the metamethod __eq
local mt = getmetatable(t1)
if not ignore_mt and mt and mt.__eq then return t1 == t2 end
for k1,v1 in pairs(t1) do
local v2 = t2[k1]
if v2 == nil or not deep_compare(v1,v2) then return false end
end
for k2,v2 in pairs(t2) do
local v1 = t1[k2]
if v1 == nil or not deep_compare(v1,v2) then return false end
end
return true
end
_M.deep_compare = deep_compare

return _M
Loading