#include <types/fd.h>
/* public variables */
-extern int fd_nbspec; // number of speculative events in the list
-extern int fd_nbupdt; // number of updates in the list
-extern unsigned int *fd_spec; // speculative I/O list
-extern unsigned int *fd_updt; // FD updates list
+extern unsigned int *fd_cache; // FD events cache
+extern unsigned int *fd_updt; // FD updates list
+extern int fd_cache_num; // number of events in the cache
+extern int fd_nbupdt; // number of updates in the list
/* Deletes an FD from the fdsets, and recomputes the maxfd limit.
* The file descriptor is also closed.
if (fdtab[fd].cache)
/* FD already in speculative I/O list */
return;
- fd_nbspec++;
- fdtab[fd].cache = fd_nbspec;
- fd_spec[fd_nbspec-1] = fd;
+ fd_cache_num++;
+ fdtab[fd].cache = fd_cache_num;
+ fd_cache[fd_cache_num-1] = fd;
}
/* Removes entry used by fd <fd> from the spec list and replaces it with the
if (!pos)
return;
fdtab[fd].cache = 0;
- fd_nbspec--;
- if (likely(pos <= fd_nbspec)) {
+ fd_cache_num--;
+ if (likely(pos <= fd_cache_num)) {
/* was not the last entry */
- fd = fd_spec[fd_nbspec];
- fd_spec[pos - 1] = fd;
+ fd = fd_cache[fd_cache_num];
+ fd_cache[pos - 1] = fd;
fdtab[fd].cache = pos;
}
}
obj_base_ptr(conn->target));
chunk_appendf(&trash,
- " flags=0x%08x fd=%d fd_spec_e=%02x fd_spec_p=%d updt=%d\n",
+ " flags=0x%08x fd=%d fd.state=%02x fd.cache=%d updt=%d\n",
conn->flags,
conn->t.sock.fd,
conn->t.sock.fd >= 0 ? fdtab[conn->t.sock.fd].state : 0,
/* compute the epoll_wait() timeout */
- if (fd_nbspec || run_queue || signal_queue_len) {
+ if (fd_cache_num || run_queue || signal_queue_len) {
/* Maybe we still have events in the spec list, or there are
* some tasks left pending in the run_queue, so we must not
* wait in epoll() otherwise we would delay their delivery by
timeout.tv_sec = 0;
timeout.tv_nsec = 0;
- if (!fd_nbspec && !run_queue && !signal_queue_len) {
+ if (!fd_cache_num && !run_queue && !signal_queue_len) {
if (!exp) {
delta_ms = MAX_DELAY_MS;
timeout.tv_sec = (MAX_DELAY_MS / 1000);
}
/* now let's wait for events */
- if (fd_nbspec || run_queue || signal_queue_len)
+ if (fd_cache_num || run_queue || signal_queue_len)
wait_time = 0;
else if (!exp)
wait_time = MAX_DELAY_MS;
delta.tv_sec = 0;
delta.tv_usec = 0;
- if (!fd_nbspec && !run_queue && !signal_queue_len) {
+ if (!fd_cache_num && !run_queue && !signal_queue_len) {
if (!exp) {
delta_ms = MAX_DELAY_MS;
delta.tv_sec = (MAX_DELAY_MS / 1000);
struct poller cur_poller;
int nbpollers = 0;
-/* FD status is defined by the poller's status and by the speculative I/O list */
-int fd_nbspec = 0; // number of speculative events in the list
-int fd_nbupdt = 0; // number of updates in the list
-unsigned int *fd_spec = NULL; // speculative I/O list
+unsigned int *fd_cache = NULL; // FD events cache
unsigned int *fd_updt = NULL; // FD updates list
+int fd_cache_num = 0; // number of events in the cache
+int fd_nbupdt = 0; // number of updates in the list
/* Deletes an FD from the fdsets, and recomputes the maxfd limit.
* The file descriptor is also closed.
/* now process speculative events if any */
- for (spec_idx = 0; spec_idx < fd_nbspec; ) {
- fd = fd_spec[spec_idx];
+ for (spec_idx = 0; spec_idx < fd_cache_num; ) {
+ fd = fd_cache[spec_idx];
e = fdtab[fd].state;
/*
/* if the fd was removed from the spec list, it has been
* replaced by the next one that we don't want to skip !
*/
- if (spec_idx < fd_nbspec && fd_spec[spec_idx] != fd)
+ if (spec_idx < fd_cache_num && fd_cache[spec_idx] != fd)
continue;
spec_idx++;
int p;
struct poller *bp;
- if ((fd_spec = (uint32_t *)calloc(1, sizeof(uint32_t) * global.maxsock)) == NULL)
- goto fail_spec;
+ if ((fd_cache = (uint32_t *)calloc(1, sizeof(uint32_t) * global.maxsock)) == NULL)
+ goto fail_cache;
if ((fd_updt = (uint32_t *)calloc(1, sizeof(uint32_t) * global.maxsock)) == NULL)
goto fail_updt;
return 0;
fail_updt:
- free(fd_spec);
- fail_spec:
+ free(fd_cache);
+ fail_cache:
return 0;
}
}
free(fd_updt);
- free(fd_spec);
+ free(fd_cache);
fd_updt = NULL;
- fd_spec = NULL;
+ fd_cache = NULL;
}
/*