[2nd attempt, the first one ate some of my formatting]
Hrm, I can't be positive the stock nginx cache modules won't work for you but my suspicion is that they would not. My reasoning is that a typical reverse cache will check its cache and try and serve anything possible out of it first, rather than first check whether or not the backend is OK. That didn't sound like what you were needing to fulfill your business requirements. Please let me know if I've misunderstood.
As to payload sizes, memcache has a built-in limit of 1 MiB, so if your payloads are only up to 500 KiB you shouldn't have a problem there.
You might want to look into the openresty srcache module. It lets you define the GET / PUT routines to populate the cache, and you could implement the GET in such a way that the first thing it did was check the backend.
So for example if you had nginx set up to use srcache:
location /memc {
internal;
lua_code_cache on;
lua_need_request_body on;
content_by_lua_file /usr/local/openresty/nginx/conf/memcache.lua;
}
location / {
set $key $uri$args;
srcache_fetch GET /memc $key;
srcache_store PUT /memc $key;
srcache_store_statuses 200 301 302;
}
and your memcache.lua held the logic for GET / PUT operations. The following is just sketched out I haven't even tried to compile it, but I think it's enough to give the idea.
--
-- memcache.lua - GET/PUT subrequest handler to get or set memcache data
--
local libstr = require "resty.string"
local libmd5 = require "resty.md5"
local memcached = require "resty.memcached"
local http = require "resty.http"
--
-- read the subrequest sent by srcache
--
local method = ngx.req.get_method()
local path = ngx.var.query_string
local body = ngx.req.get_body_data()
local md5 = libmd5:new()
if not md5 then
ngx.log(ngx.ERR, "unable to load md5")
ngx.exit(500)
end
--
-- build memcache key based on the path
--
local ok = md5:update(path)
if not ok then
ngx.log(ngx.ERR, "unable to add data to md5")
ngx.exit(500)
end
local memc_key = libstr.to_hex(md5:final())
--
-- set up the memcached connection
--
local memc = memcached:new()
-- NB: probably should do something ketama-like here
local ok, err = memc:connect("127.0.0.1", 11211)
if not ok then
ngx.log(ngx.ERR, err)
ngx.exit(500)
end
-- memcache get or set
if method == "GET" then
-- check backend first
local con = http.new()
-- TODO: handle errors
con:set_timeout(500)
local res, err = httpc:request({
path = path,
headers = {
},
})
-- TODO: handle errors
-- check res.headers content-length or res.body length
local ok, err = con:set_keepalive()
-- TODO: handle errors
-- if length is ok, send the response from the backend
-- using ngx.header and ngx.print, and returning,
-- or fall through to check the cache for a valid response
-- (this following logic could of course be moved into
-- its own function to make this code cleaner).
-- check our cache next
local res, flags, err = memc:get(memc_key)
if err then
ngx.log(ngx.ERR, err)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
end
-- sent response status and body
if not res then
local ok, err = memc:set_keepalive()
if not ok then
ngx.log(ngx.WARN, "unable to memc:set_keepalive: " .. err)
end
ngx.exit(ngx.HTTP_NOT_FOUND)
else
local ok, err = memc:set_keepalive()
if not ok then
ngx.log(ngx.WARN, "unable to memc:set_keepalive: " .. err)
end
ngx.print(res)
end
elseif method == "PUT" then
local ok, err = memc:set(memc_key, body, 0)
if not ok then
ngx.log(ngx.ERR, err)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
end
local ok, err = memc:set_keepalive()
if not ok then
ngx.log(ngx.WARN, "unable to memc:set_keepalive: " .. err)
end
ngx.exit(ngx.HTTP_CREATED)
end