Otherwise the distribution of queries to the backends is very hard
to predict since every thread uses its own counter. We used to start
only one worker thread at startup, and did not send enough queries
to get additional workers fired up.
_testServer2Port = 5351
_config_params = ['_testServerPort', '_testServer2Port']
_config_template = """
+ -- otherwise we start too many TCP workers, and as each thread
+ -- uses it own counter this makes the TCP queries distribution hard to predict
+ setMaxTCPClientThreads(1)
setServerPolicyLuaFFIPerThread("luaffiroundrobin", [[
local ffi = require("ffi")
local C = ffi.C