*/
unsigned int read_freq_ctr(struct freq_ctr *ctr);
+/* returns the number of remaining events that can occur on this freq counter
+ * while respecting <freq> and taking into account that <pend> events are
+ * already known to be pending. Returns 0 if limit was reached.
+ */
+unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend);
+
+/* return the expected wait time in ms before the next event may occur,
+ * respecting frequency <freq>, and assuming there may already be some pending
+ * events. It returns zero if we can proceed immediately, otherwise the wait
+ * time, which will be rounded down 1ms for better accuracy, with a minimum
+ * of one ms.
+ */
+unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend);
+
#endif /* _PROTO_FREQ_CTR_H */
/*
int cfd;
int max_accept = global.tune.maxaccept;
- while (p->feconn < p->maxconn &&
- (!p->fe_maxsps || read_freq_ctr(&p->fe_sess_per_sec) < p->fe_maxsps) &&
- max_accept--) {
+ if (p->fe_maxsps) {
+ int max = freq_ctr_remain(&p->fe_sess_per_sec, p->fe_maxsps, 0);
+ if (max_accept > max)
+ max_accept = max;
+ }
+
+ while (p->feconn < p->maxconn && max_accept--) {
struct sockaddr_storage addr;
socklen_t laddr = sizeof(addr);
#include <common/config.h>
#include <common/standard.h>
#include <common/time.h>
+#include <common/tools.h>
#include <proto/freq_ctr.h>
/* Read a frequency counter taking history into account for missing time in
* will be inaccurate still appropriate for max checking. One trick we use for
* low values is to specially handle the case where the rate is between 0 and 1
* in order to avoid flapping while waiting for the next event.
+ *
+ * For immediate limit checking, it's recommended to use freq_ctr_remain() and
+ * next_event_delay() instead which do not have the flapping correction, so
+ * that even frequencies as low as one event/period are properly handled.
*/
unsigned int read_freq_ctr(struct freq_ctr *ctr)
{
return cur + mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
}
+/* returns the number of remaining events that can occur on this freq counter
+ * while respecting <freq> and taking into account that <pend> events are
+ * already known to be pending. Returns 0 if limit was reached.
+ */
+unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
+{
+ unsigned int cur;
+ if (unlikely(ctr->curr_sec != now.tv_sec))
+ rotate_freq_ctr(ctr);
+
+ cur = mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
+ cur += ctr->curr_ctr + pend;
+
+ if (cur >= freq)
+ return 0;
+ return freq - cur;
+}
+
+/* return the expected wait time in ms before the next event may occur,
+ * respecting frequency <freq>, and assuming there may already be some pending
+ * events. It returns zero if we can proceed immediately, otherwise the wait
+ * time, which will be rounded down 1ms for better accuracy, with a minimum
+ * of one ms.
+ */
+unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
+{
+ unsigned int cur, wait;
+
+ if (unlikely(ctr->curr_sec != now.tv_sec))
+ rotate_freq_ctr(ctr);
+
+ cur = mul32hi(ctr->prev_ctr, ~curr_sec_ms_scaled);
+ cur += ctr->curr_ctr + pend;
+
+ if (cur < freq)
+ return 0;
+
+ wait = 999 / cur;
+ return MAX(wait, 1);
+}
+
/*
* Local variables:
{
struct proxy *p;
struct listener *l;
+ unsigned int wait;
p = proxy;
if (p->feconn >= p->maxconn)
goto do_block;
- if (p->fe_maxsps && read_freq_ctr(&p->fe_sess_per_sec) >= p->fe_maxsps) {
+ if (p->fe_maxsps &&
+ (wait = next_event_delay(&p->fe_sess_per_sec, p->fe_maxsps, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the
* timer will have settled down. Note that we may already be in
* IDLE state here.
*/
- int wait = 1000 / p->fe_maxsps - 1;
- wait = MAX(wait, 1);
*next = tick_first(*next, tick_add(now_ms, wait));
goto do_block;
}