fd_updt[fd_nbupdt++] = fd;
}
-
-#define _GET_NEXT(fd) fdtab[fd].fdcache_entry.next
-#define _GET_PREV(fd) fdtab[fd].fdcache_entry.prev
-
static inline void fd_add_to_fd_list(volatile struct fdlist *list, int fd)
{
int next;
int last;
redo_next:
- next = _GET_NEXT(fd);
- /*
- * Check that we're not already in the cache, and if not, lock us.
- * <= -3 means not in the cache, -2 means locked, -1 means we're
- * in the cache, and the last element, >= 0 gives the FD of the next
- * in the cache.
- */
+ next = fdtab[fd].cache.next;
+ /* Check that we're not already in the cache, and if not, lock us. */
if (next >= -2)
goto done;
- if (!HA_ATOMIC_CAS(&_GET_NEXT(fd), &next, -2))
+ if (!HA_ATOMIC_CAS(&fdtab[fd].cache.next, &next, -2))
goto redo_next;
__ha_barrier_store();
redo_last:
if (unlikely(last == -1)) {
/* list is empty, try to add ourselves alone so that list->last=fd */
- _GET_PREV(fd) = last;
+ fdtab[fd].cache.prev = last;
/* Make sure the "prev" store is visible before we update the last entry */
__ha_barrier_store();
/* since we're alone at the end of the list and still locked(-2),
* we know noone tried to add past us. Mark the end of list.
*/
- _GET_NEXT(fd) = -1;
+ fdtab[fd].cache.next = -1;
goto done; /* We're done ! */
} else {
/* non-empty list, add past the tail */
do {
new = fd;
old = -1;
- _GET_PREV(fd) = last;
+ fdtab[fd].cache.prev = last;
__ha_barrier_store();
* The CAS will only succeed if its next is -1,
* which means it's in the cache, and the last element.
*/
- if (likely(HA_ATOMIC_CAS(&_GET_NEXT(last), &old, new)))
+ if (likely(HA_ATOMIC_CAS(&fdtab[last].cache.next, &old, new)))
break;
goto redo_last;
} while (1);
if (unlikely(!HA_ATOMIC_CAS(&list->last, &last, fd)))
goto redo_fd_cache;
__ha_barrier_store();
- _GET_NEXT(fd) = -1;
+ fdtab[fd].cache.next = -1;
__ha_barrier_store();
done:
return;
#endif
int old;
int new = -2;
- volatile int prev;
- volatile int next;
+ int prev;
+ int next;
int last;
lock_self:
#if (defined(HA_CAS_IS_8B) || defined(HA_HAVE_CAS_DW))
next_list.next = next_list.prev = -2;
- cur_list.prev = _GET_PREV(fd);
- cur_list.next = _GET_NEXT(fd);
+ cur_list = fdtab[fd].cache;
/* First, attempt to lock our own entries */
do {
/* The FD is not in the FD cache, give up */
goto lock_self;
} while (
#ifdef HA_CAS_IS_8B
- unlikely(!HA_ATOMIC_CAS(((void **)(void *)&_GET_NEXT(fd)), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
+ unlikely(!HA_ATOMIC_CAS(((void **)(void *)&fdtab[fd].cache.next), ((void **)(void *)&cur_list), (*(void **)(void *)&next_list))))
#else
- unlikely(!__ha_cas_dw((void *)&_GET_NEXT(fd), (void *)&cur_list, (void *)&next_list)))
+ unlikely(!__ha_cas_dw((void *)&fdtab[fd].cache.next, (void *)&cur_list, (void *)&next_list)))
#endif
;
next = cur_list.next;
#else
lock_self_next:
- next = _GET_NEXT(fd);
+ next = fdtab[fd].cache.next;
if (next == -2)
goto lock_self_next;
if (next <= -3)
goto done;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(fd), &next, -2)))
+ if (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].cache.next, &next, -2)))
goto lock_self_next;
lock_self_prev:
- prev = _GET_PREV(fd);
+ prev = fdtab[fd].cache.prev;
if (prev == -2)
goto lock_self_prev;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd), &prev, -2)))
+ if (unlikely(!HA_ATOMIC_CAS(&fdtab[fd].cache.prev, &prev, -2)))
goto lock_self_prev;
#endif
__ha_barrier_store();
redo_prev:
old = fd;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_NEXT(prev), &old, new))) {
+ if (unlikely(!HA_ATOMIC_CAS(&fdtab[prev].cache.next, &old, new))) {
if (unlikely(old == -2)) {
/* Neighbour already locked, give up and
* retry again once he's done
*/
- _GET_PREV(fd) = prev;
+ fdtab[fd].cache.prev = prev;
__ha_barrier_store();
- _GET_NEXT(fd) = next;
+ fdtab[fd].cache.next = next;
__ha_barrier_store();
goto lock_self;
}
if (likely(next != -1)) {
redo_next:
old = fd;
- if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(next), &old, new))) {
+ if (unlikely(!HA_ATOMIC_CAS(&fdtab[next].cache.prev, &old, new))) {
if (unlikely(old == -2)) {
/* Neighbour already locked, give up and
* retry again once he's done
*/
if (prev != -1) {
- _GET_NEXT(prev) = fd;
+ fdtab[prev].cache.next = fd;
__ha_barrier_store();
}
- _GET_PREV(fd) = prev;
+ fdtab[fd].cache.prev = prev;
__ha_barrier_store();
- _GET_NEXT(fd) = next;
+ fdtab[fd].cache.next = next;
__ha_barrier_store();
goto lock_self;
}
*/
__ha_barrier_store();
if (likely(prev != -1))
- _GET_NEXT(prev) = next;
+ fdtab[prev].cache.next = next;
__ha_barrier_store();
if (likely(next != -1))
- _GET_PREV(next) = prev;
+ fdtab[next].cache.prev = prev;
__ha_barrier_store();
/* Ok, now we're out of the fd cache */
- _GET_NEXT(fd) = -(next + 4);
+ fdtab[fd].cache.next = -(next + 4);
__ha_barrier_store();
done:
return;
}
-#undef _GET_NEXT
-#undef _GET_PREV
-
-
/* Removes entry used by fd <fd> from the FD cache and replaces it with the
* last one.
* If the fd has no entry assigned, return immediately.
*/
#define DEAD_FD_MAGIC 0xFDDEADFD
+/* fdlist_entry: entry used by the fd cache.
+ * >= 0 means we're in the cache and gives the FD of the next in the cache,
+ * -1 means we're in the cache and the last element,
+ * -2 means the entry is locked,
+ * <= -3 means not in the cache, and next element is -4-fd
+ *
+ * It must remain 8-aligned so that aligned CAS operations may be done on both
+ * entries at once.
+ */
struct fdlist_entry {
- volatile int next;
- volatile int prev;
+ int next;
+ int prev;
} __attribute__ ((aligned(8)));
+/* head of the fd cache */
struct fdlist {
- volatile int first;
- volatile int last;
+ int first;
+ int last;
} __attribute__ ((aligned(8)));
/* info about one given fd */
unsigned long thread_mask; /* mask of thread IDs authorized to process the task */
unsigned long polled_mask; /* mask of thread IDs currently polling this fd */
unsigned long update_mask; /* mask of thread IDs having an update for fd */
- struct fdlist_entry fdcache_entry; /* Entry in the fdcache */
+ struct fdlist_entry cache; /* Entry in the fdcache */
void (*iocb)(int fd); /* I/O handler */
void *owner; /* the connection or listener associated with this fd, NULL if closed */
unsigned char state; /* FD state for read and write directions (2*3 bits) */