Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[THREESCALE-9664] Add support for Redis Sentinel in edge limiting policy #1414

Draft
wants to merge 5 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions docker-compose-devel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,11 @@ services:
environment:
EDITOR: vi
TEST_NGINX_REDIS_HOST: redis
TEST_NGINX_REDIS_MASTER: redismaster
TEST_NGINX_REDIS_SENTINEL_1_HOST: sentinel-1
TEST_NGINX_REDIS_SENTINEL_2_HOST: sentinel-2
TEST_NGINX_REDIS_SENTINEL_3_HOST: sentinel-3
TEST_NGINX_REDIS_SENTINEL_PORT: 5000
TEST_NGINX_BINARY: openresty
PROJECT_PATH: /opt/app-root/src
TEST_NGINX_APICAST_PATH: /opt/app-root/src/gateway
Expand All @@ -23,3 +28,32 @@ services:
GIT_COMMITTER_EMAIL: ${GIT_COMMITTER_EMAIL:-""}
redis:
image: redis
redis-master:
image: redis
redis-slave-1:
image: redis
command: redis-server --slaveof redis-master 6379
redis-slave-2:
image: redis
command: redis-server --slaveof redis-master 6379
sentinel-1:
build:
context: ./redis-sentinel
depends_on:
- redis-master
- redis-slave-1
- redis-slave-2
sentinel-2:
build:
context: ./redis-sentinel
depends_on:
- redis-master
- redis-slave-1
- redis-slave-2
sentinel-3:
build:
context: ./redis-sentinel
depends_on:
- redis-master
- redis-slave-1
- redis-slave-2
39 changes: 20 additions & 19 deletions gateway/Roverfile.lock
Original file line number Diff line number Diff line change
@@ -1,31 +1,32 @@
argparse 0.7.1-1||production
busted 2.1.1-1||testing
date 2.2-2||production
dkjson 2.6-1||testing
argparse 0.6.0-1|412e6aca393e365f92c0315dfe50181b193f1ace|production
busted 2.1.1-1|e3ed48759b625f2e37bf02ccc057b2b98108f108|testing
date 2.2-2|8d74567cf979c1eab2c6b6ca2e3b978fa40569a2|production
dkjson 2.5-2||testing
fifo 0.2-0||development
inspect 3.1.3-0||production
inspect 3.1.1-0||production
jsonschema 0.8-0|c1d72d86bb3dc5b33da57d47febc47657d29ea74|testing
ldoc 1.4.6-2||development
liquid 0.2.0-2||production
lua-resty-env 0.4.0-1||production
lua-resty-execvp 0.1.1-1||production
lua-resty-http 0.15-0||production
lua-resty-iputils 0.3.0-2||production
lua-resty-jit-uuid 0.0.7-2||production
lua-resty-jwt 0.2.0-0||production
lua-resty-http 0.15-0|41b2e822ce5c19f64e293b7dc2d5d244e511615d|production
lua-resty-iputils 0.3.0-1|6110b41eaa52efd25e56f89e34412ab95f700d57|production
lua-resty-jit-uuid 0.0.7-2|64ae38de75c9d58f330d89e140ac872771c19223|production
lua-resty-jwt 0.2.0-0|2a62ff95eae91df6bd8655080a4b9b04c61bec6b|production
lua-resty-redis-connector 0.11.0-0||production
lua-resty-repl 0.0.6-0|3878f41b7e8f97b1c96919db19dbee9496569dda|development
lua-resty-url 0.3.5-1||production
lua-term 0.7-1||testing
lua_cliargs 3.0-2||testing
luacov 0.15.0-1||testing
luafilesystem 1.8.0-1||production,development,testing
luassert 1.9.0-1||testing
lua_cliargs 3.0-1||testing
luacov 0.13.0-1|637c48d59f722050d718d0c398f655bc7fe1707a|testing
luafilesystem 1.7.0-2|de87218e9798c4dd1a40d65403d99e9e82e1cfa0|testing,production,development
luassert 1.9.0-1|8d8dc8a54cc468048a128a867f6449a6c3fdd11a|testing
luasystem 0.2.1-0||testing
lyaml 6.2.8-1||production
markdown 0.33-1||development
lyaml 6.2.4-1||production
markdown 0.33-1|8c09109924b218aaecbfd4d4b1de538269c4d765|development
mediator_lua 1.1.2-0||testing
net-url 1.1-1||testing
nginx-lua-prometheus 0.20181120-3||production
penlight 1.13.1-1||production,development,testing
net-url 1.1-1|32acd84d06e16ddffc975adafce9cea26f3b2dd1|testing
nginx-lua-prometheus 0.20181120-3|379c0a4d4d6f3c5b0eb93691fc7e14fff498e1ca|production
penlight 1.7.0-1|1cce50d2cd3b3dd209d2aa1f5ad72398ab1e8d48|testing,production,development
router 2.1-0||production
say 1.4.1-3||testing
say 1.4.1-3|45a3057e68c52b34ab59ef167efeb2340e356661|testing
1 change: 1 addition & 0 deletions gateway/apicast-scm-1.rockspec
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ dependencies = {
'penlight',
'nginx-lua-prometheus == 0.20181120',
'lua-resty-jit-uuid',
'lua-resty-redis-connector',
}
build = {
type = "make",
Expand Down
37 changes: 36 additions & 1 deletion gateway/src/apicast/policy/rate_limit/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,43 @@ regardless of the number of APIcasts deployed, the policy provides the option
of using a shared storage. For now, it only supports Redis.

To use Redis, we just need to provide the `redis_url` attribute in the config
of the policy: `"redis_url": "redis://a_host:6379"`
of the policy:

The format for connecting directly to Redis is:

```
redis://USERNAME:PASSWORD@HOST:PORT/DB
```

The USERNAME, PASSWORD and DB fields are optional, all other components are required.

To connect using Sentinel, use:

```
sentinel://USERNAME:PASSWORD@MASTER_NAME:ROLE/DB`
```

* USERNAME, PASSWORD and DB are optional.
* MASTER_NAME identifies a group of Redis instance composed of a master and one or more slaves
* ROLE must be either `m` or `s` for master / slave respectively, if ROLE is not specified, the
client will connect to the master

A table of sentinels must also be supplied

```
"redis_url": "sentinel://mymaster:m/1"
"redis_sentinels": [
{
"url": "redis://10.7.0.1:16379"
},
{
"url": "redis://10.7.0.2:16379"
},
{
"url": "redis://10.7.0.3:16379"
},
]
```

## Limits with conditions

Expand Down
13 changes: 13 additions & 0 deletions gateway/src/apicast/policy/rate_limit/apicast-policy.json
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,19 @@
"description": "URL of Redis",
"type": "string"
},
"redis_sentiels": {
"type": "array",
"description": "Specify a list of sentinels to connect to",
"items": {
"type": "object",
"properties": {
"url": {
"type": "string",
"description": "The URL of sentinel "
}
}
}
},
"limits_exceeded_error": {
"type": "object",
"properties": {
Expand Down
9 changes: 5 additions & 4 deletions gateway/src/apicast/policy/rate_limit/rate_limit.lua
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ function _M.new(config)
self.leaky_bucket_limiters = config.leaky_bucket_limiters or {}
self.fixed_window_limiters = config.fixed_window_limiters or {}
self.redis_url = config.redis_url
self.redis_sentinels = config.redis_sentinels or {}
self.error_settings = init_error_settings(
config.limits_exceeded_error, config.configuration_error)

Expand All @@ -193,7 +194,7 @@ function _M:access(context)
local red
if self.redis_url and self.redis_url ~= '' then
local rederr
red, rederr = redis_shdict.new{ url = self.redis_url }
red, rederr = redis_shdict.new{ url = self.redis_url , sentinels = self.redis_sentinels }
if not red then
ngx.log(ngx.ERR, "failed to connect Redis: ", rederr)
error(self.error_settings, "configuration_issue")
Expand Down Expand Up @@ -252,14 +253,14 @@ function _M:access(context)
return true, delay
end

local function checkin(_, ctx, time, semaphore, redis_url, error_settings)
local function checkin(_, ctx, time, semaphore, redis_url, redis_sentinels, error_settings)
local limiters = ctx.limiters
local keys = ctx.keys
local latency = tonumber(time)
local red
if redis_url and redis_url ~= '' then
local rederr
red, rederr = redis_shdict.new{ url = redis_url }
red, rederr = redis_shdict.new{ url = redis_url , sentinels = redis_sentinels }
if not red then
ngx.log(ngx.ERR, "failed to connect Redis: ", rederr)
error(error_settings, "configuration_issue")
Expand Down Expand Up @@ -297,7 +298,7 @@ function _M:log()
-- lua_max_running_timers
-- Also, we need to check that the each timer is a new fake-request, and it
-- also consumes memory
local ok, err = ngx.timer.at(0, checkin, ngx.ctx, ngx.var.request_time, semaphore, self.redis_url, self.error_settings)
local ok, err = ngx.timer.at(0, checkin, ngx.ctx, ngx.var.request_time, semaphore, self.redis_url, self.redis_sentinels, self.error_settings)
if not ok then
ngx.log(ngx.ERR, "Failed to create timer for checkin limits, err='", err, "'")
end
Expand Down
60 changes: 28 additions & 32 deletions gateway/src/apicast/threescale_utils.lua
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
local sub = string.sub
local tonumber = tonumber

local redis = require 'resty.redis'
local redis = require 'resty.redis.connector'
local env = require 'resty.env'

local resty_resolver = require 'resty.resolver'
local resty_balancer = require 'resty.balancer'

local resty_url = require 'resty.url'

local _M = {} -- public interface

local redis_conf = {
Expand Down Expand Up @@ -120,63 +115,64 @@ function _M.resolve(host, port)
return ip, port
end


function _M.connect_redis(options)
local opts = {}

local url = options and options.url or env.get('REDIS_URL')


if url then
url = resty_url.split(url, 'redis')
if url then
opts.host = url[4]
opts.port = url[5]
opts.db = url[6] and tonumber(sub(url[6], 2))
opts.password = url[3] or url[2]
local params, err = redis.parse_dsn({url=url})
if err then
return nil, _M.error("invalid redis url ", err)
end
opts = params or {}
elseif options then
opts.host = options.host
opts.port = options.port
opts.db = options.db
opts.password = options.password
opts.master_name= options.master_name
opts.role = options.role
end

opts.timeout = options and options.timeout or redis_conf.timeout
opts.connect_timeout = options and options.timeout or redis_conf.timeout
opts.keepalive_timeout = options and options.keepalive_timeout or redis_conf.keepalive
opts.keepalive_poolsize = options and options.keepalive_poolsize or redis_conf.keepalive_poolsize

local host = opts.host or env.get('REDIS_HOST') or "127.0.0.1"
local port = opts.port or env.get('REDIS_PORT') or 6379
opts.host, opts.port = _M.resolve(host, port)

local red = redis:new()

red:set_timeout(opts.timeout)

local ok, err = red:connect(_M.resolve(host, port))
if not ok then
return nil, _M.error("failed to connect to redis on ", host, ":", port, ": ", err)
end
if options.sentinels and #options.sentinels > 0 then
local sentinels = {}

if opts.password then
ok = red:auth(opts.password)
for i, sentinel in ipairs(options.sentinels) do
local params, err = redis.parse_dsn({url=sentinel.url})
if err then
return nil, _M.error("invalid redis url ", err)
end

if not ok then
return nil, _M.error("failed to auth on redis ", host, ":", port)
params.host, params.port = _M.resolve(params.host, params.port)
sentinels[i] = params
end

opts.sentinels = sentinels
end

if opts.db then
ok = red:select(opts.db)
local rc = redis.new(opts)

if not ok then
return nil, _M.error("failed to select db ", opts.db, " on redis ", host, ":", port)
end
local red, err = rc:connect()
if not red then
return nil, _M.error("failed to connect to redis on ", host, ":", port, ": ", err)
end

return red
end

-- return ownership of this connection to the pool
function _M.release_redis(red)
red:set_keepalive(redis_conf.keepalive, redis_conf.poolsize)
redis:set_keepalive(red)
end

local xml_header_len = string.len('<?xml version="1.0" encoding="UTF-8"?>')
Expand Down
15 changes: 15 additions & 0 deletions redis-sentinel/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
FROM redis

RUN mkdir -p /redis

WORKDIR /redis

COPY sentinel.conf .
COPY entrypoint.sh /usr/local/bin/

RUN chown redis:redis /redis/* && \
chmod +x /usr/local/bin/entrypoint.sh

EXPOSE 5000

ENTRYPOINT ["entrypoint.sh"]
2 changes: 2 additions & 0 deletions redis-sentinel/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#!/bin/sh
redis-server /redis/sentinel.conf --sentinel
9 changes: 9 additions & 0 deletions redis-sentinel/sentinel.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
port 5000

dir /tmp

sentinel resolve-hostnames yes
sentinel monitor redismaster redis-master 6379 2
sentinel down-after-milliseconds redismaster 5000
sentinel parallel-syncs redismaster 1
sentinel failover-timeout redismaster 60000
Loading
Loading