}
fillMSGHdr(&u.msgh, &u.iov, nullptr, 0, (char*)&(*p)[0], p->size(), &dest);
- if((ret=sendmsg(sockets[count % sockets.size()]->getHandle(),
+ if((ret=sendmsg(sockets[count % sockets.size()]->getHandle(),
&u.msgh, 0)))
if(ret < 0)
unixDie("sendmsg");
-
-
+
+
if(!(count%burst)) {
nBursts++;
// Calculate the time in nsec we need to sleep to the next burst.
We then move the 1000 unique queries to the 'known' pool.
For the next second, say 20000 qps, we know we are going to need 2000 new queries,
- so we take 2000 from the unknown pool. Then we need 18000 cache hits. We can get 1000 from
+ so we take 2000 from the unknown pool. Then we need 18000 cache hits. We can get 1000 from
the known pool, leaving us down 17000. Or, we have 3000 in total now and we need 2000. We simply
repeat the 3000 mix we have ~7 times. The 2000 can now go to the known pool too.
- For the next second, say 30000 qps, we'll need 3000 cache misses, which we get from
+ For the next second, say 30000 qps, we'll need 3000 cache misses, which we get from
the unknown pool. To this we add 3000 queries from the known pool. Next up we repeat this batch 5
times.
if (!g_quiet) {
cout<<"Generated "<<unknown.size()<<" ready to use queries"<<endl;
}
-
+
auto sockets = std::make_shared<std::vector<std::unique_ptr<Socket>>>();
ComboAddress dest;
try {
dt.set();
sendPackets(*sockets, toSend, qps, dest, ecsRange);
-
+
const auto udiff = dt.udiffNoReset();
const auto realqps=toSend.size()/(udiff/1000000.0);
if (!g_quiet) {
cout<<"Achieved "<<realqps<<" qps over "<< udiff/1000000.0<<" seconds"<<endl;
}
-
+
usleep(50000);
const auto received = g_recvcounter.load();
const auto udiffReceived = dt.udiff();