]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
WIP/MINOR: stick-tables: Rename stksess shards to use buckets
authorChristopher Faulet <cfaulet@haproxy.com>
Wed, 15 Oct 2025 12:34:02 +0000 (14:34 +0200)
committerChristopher Faulet <cfaulet@haproxy.com>
Tue, 21 Oct 2025 13:17:55 +0000 (15:17 +0200)
include/haproxy/stick_table-t.h
include/haproxy/stick_table.h
src/hlua_fcn.c
src/stick_table.c

index b6167e18d65a3ed7ddece56140e9f54f6763bb78..bab43ced2af8db78e7a15f0d6eea6b2dc0e439e0 100644 (file)
@@ -227,7 +227,7 @@ struct stktable {
 
                __decl_thread(HA_RWLOCK_T sh_lock); /* for the trees above */
                int next_exp;    /* Next expiration for this table */
-       } shards[CONFIG_HAP_TBL_BUCKETS];
+       } buckets[CONFIG_HAP_TBL_BUCKETS];
 
        unsigned int refcnt;     /* number of local peer over all peers sections
                                    attached to this table */
index c72b4a0d8736dc31ce2768aeb6e2ab5f73caa917..37d85faf3194212c35b9aa53e46e6bfd894bb074 100644 (file)
@@ -193,11 +193,11 @@ static inline void *stktable_data_ptr_idx(struct stktable *t, struct stksess *ts
        return __stktable_data_ptr(t, ts, type) + idx*stktable_type_size(stktable_data_types[type].std_type);
 }
 
-/* return a shard number for key <key> of len <len> present in table <t>, for
+/* return a bucket number for key <key> of len <len> present in table <t>, for
  * use with the tree indexing. The value will be from 0 to
  * CONFIG_HAP_TBL_BUCKETS-1.
  */
-static inline uint stktable_calc_shard_num(const struct stktable *t, const void *key, size_t len)
+static inline uint stktable_calc_bucket_num(const struct stktable *t, const void *key, size_t len)
 {
 #if CONFIG_HAP_TBL_BUCKETS > 1
        return XXH32(key, len, t->hash_seed) % CONFIG_HAP_TBL_BUCKETS;
@@ -219,13 +219,13 @@ static inline int __stksess_kill_if_expired(struct stktable *t, struct stksess *
  * Decrease the refcount of a stksess and release it if the refcount falls to 0
  * _AND_ if the session expired. Note,, the refcount is always decremented.
  *
- * This function locks the corresponding table shard to proceed. When this
+ * This function locks the corresponding table bucket to proceed. When this
  * function is called, the caller must be sure it owns a reference on the
  * stksess (refcount >= 1).
  */
 static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *ts)
 {
-       uint shard;
+       uint bucket;
        size_t len;
 
        if (t->expire != TICK_ETERNITY && tick_is_expired(ts->expire, now_ms)) {
@@ -234,15 +234,15 @@ static inline void stksess_kill_if_expired(struct stktable *t, struct stksess *t
                else
                        len = t->key_size;
 
-               shard = stktable_calc_shard_num(t, ts->key.key, len);
+               bucket = stktable_calc_bucket_num(t, ts->key.key, len);
 
-               /* make the compiler happy when shard is not used without threads */
-               ALREADY_CHECKED(shard);
+               /* make the compiler happy when bucket is not used without threads */
+               ALREADY_CHECKED(bucket);
 
-               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                if (!HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1))
                        __stksess_kill_if_expired(t, ts);
-               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
        }
        else
                HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1);
index 6d371640447344551fd466eecdb4d9eb051b86b0..6be8723b0cb8538a50899d093f36257fc107761d 100644 (file)
@@ -969,7 +969,7 @@ int hlua_stktable_dump(lua_State *L)
        int i;
        int skip_entry;
        void *ptr;
-       int shard = 0; // FIXME: this should be stored in the context and iterate to scan the table
+       int bucket = 0; // FIXME: this should be stored in the context and iterate to scan the table
 
        t = hlua_check_stktable(L, 1);
        type = lua_type(L, 2);
@@ -1030,17 +1030,17 @@ int hlua_stktable_dump(lua_State *L)
 
        lua_newtable(L);
 
- next_shard:
-       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
-       eb = ebmb_first(&t->shards[shard].keys);
+ next_bucket:
+       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
+       eb = ebmb_first(&t->buckets[bucket].keys);
        for (n = eb; n; n = ebmb_next(n)) {
                ts = ebmb_entry(n, struct stksess, key);
                if (!ts) {
-                       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+                       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                        goto done;
                }
                HA_ATOMIC_INC(&ts->ref_cnt);
-               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
 
                /* multi condition/value filter */
                skip_entry = 0;
@@ -1079,7 +1079,7 @@ int hlua_stktable_dump(lua_State *L)
                }
 
                if (skip_entry) {
-                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                        HA_ATOMIC_DEC(&ts->ref_cnt);
                        continue;
                }
@@ -1103,14 +1103,14 @@ int hlua_stktable_dump(lua_State *L)
                lua_newtable(L);
                hlua_stktable_entry(L, t, ts);
                lua_settable(L, -3);
-               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                HA_ATOMIC_DEC(&ts->ref_cnt);
        }
-       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
  done:
-       shard++;
-       if (shard < CONFIG_HAP_TBL_BUCKETS)
-               goto next_shard;
+       bucket++;
+       if (bucket < CONFIG_HAP_TBL_BUCKETS)
+               goto next_bucket;
 
        return 1;
 }
index cfd50193f7b28b3603933991e6548588478c63d8..7559eba8420ba0d56516bc315414b750390a8a90 100644 (file)
@@ -101,7 +101,7 @@ void __stksess_free(struct stktable *t, struct stksess *ts)
  */
 void stksess_free(struct stktable *t, struct stksess *ts)
 {
-       uint shard;
+       uint bucket;
        size_t len;
        void *data;
 
@@ -116,10 +116,10 @@ void stksess_free(struct stktable *t, struct stksess *ts)
        else
                len = t->key_size;
 
-       shard = stktable_calc_shard_num(t, ts->key.key, len);
+       bucket = stktable_calc_bucket_num(t, ts->key.key, len);
 
-       /* make the compiler happy when shard is not used without threads */
-       ALREADY_CHECKED(shard);
+       /* make the compiler happy when bucket is not used without threads */
+       ALREADY_CHECKED(bucket);
 
        __stksess_free(t, ts);
 }
@@ -157,13 +157,13 @@ int __stksess_kill(struct stktable *t, struct stksess *ts)
  * Decrease the refcount of a stksess and release it if the refcount falls to 0.
  * Returns non-zero if deleted, zero otherwise.
  *
- * This function locks the corresponding table shard to proceed. When this
+ * This function locks the corresponding table bucket to proceed. When this
  * function is called, the caller must be sure it owns a reference on the
  * stksess (refcount >= 1).
  */
 int stksess_kill(struct stktable *t, struct stksess *ts)
 {
-       uint shard;
+       uint bucket;
        size_t len;
        int ret = 0;
 
@@ -172,15 +172,15 @@ int stksess_kill(struct stktable *t, struct stksess *ts)
        else
                len = t->key_size;
 
-       shard = stktable_calc_shard_num(t, ts->key.key, len);
+       bucket = stktable_calc_bucket_num(t, ts->key.key, len);
 
-       /* make the compiler happy when shard is not used without threads */
-       ALREADY_CHECKED(shard);
+       /* make the compiler happy when bucket is not used without threads */
+       ALREADY_CHECKED(bucket);
 
-       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
        if (!HA_ATOMIC_SUB_FETCH(&ts->ref_cnt, 1))
                ret = __stksess_kill(t, ts);
-       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
 
        return ret;
 }
@@ -282,38 +282,38 @@ int stktable_trash_oldest(struct stktable *t)
        struct eb32_node *eb;
        struct mt_list link;
        int max_search; // no more than 50% misses
-       int max_per_shard;
-       int done_per_shard;
+       int max_per_bucket;
+       int done_per_bucket;
        int batched = 0;
        int to_batch;
        int failed_once = 0;
        int looped;
-       int shard;
-       int init_shard;
+       int bucket;
+       int init_bucket;
 
-       /* start from a random shard number to avoid starvation in the last ones */
-       shard = init_shard = statistical_prng_range(CONFIG_HAP_TBL_BUCKETS - 1);
+       /* start from a random bucket number to avoid starvation in the last ones */
+       bucket = init_bucket = statistical_prng_range(CONFIG_HAP_TBL_BUCKETS - 1);
 
        to_batch = STKTABLE_MAX_UPDATES_AT_ONCE;
 
        max_search = to_batch * 2; // no more than 50% misses
-       max_per_shard = (to_batch + CONFIG_HAP_TBL_BUCKETS - 1) / CONFIG_HAP_TBL_BUCKETS;
+       max_per_bucket = (to_batch + CONFIG_HAP_TBL_BUCKETS - 1) / CONFIG_HAP_TBL_BUCKETS;
 
        do {
-               done_per_shard = 0;
+               done_per_bucket = 0;
                looped = 0;
 
-               if (HA_RWLOCK_TRYWRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock) != 0) {
+               if (HA_RWLOCK_TRYWRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock) != 0) {
                        if (batched)
                                break; // no point insisting, we have or made some room
                        if (failed_once)
                                break; // already waited once, that's enough
                        failed_once = 1;
-                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                }
 
-               eb = eb32_lookup_ge(&t->shards[shard].exps, now_ms - TIMER_LOOK_BACK);
-               while (batched < to_batch && done_per_shard < max_per_shard) {
+               eb = eb32_lookup_ge(&t->buckets[bucket].exps, now_ms - TIMER_LOOK_BACK);
+               while (batched < to_batch && done_per_bucket < max_per_bucket) {
                        if (unlikely(!eb)) {
                                /* we might have reached the end of the tree, typically because
                                 * <now_ms> is in the first half and we're first scanning the last
@@ -323,7 +323,7 @@ int stktable_trash_oldest(struct stktable *t)
                                if (looped)
                                        break;
                                looped = 1;
-                               eb = eb32_first(&t->shards[shard].exps);
+                               eb = eb32_first(&t->buckets[bucket].exps);
                                if (likely(!eb))
                                        break;
                        }
@@ -351,7 +351,7 @@ int stktable_trash_oldest(struct stktable *t)
                                }
 
                                ts->exp.key = ts->expire;
-                               eb32_insert(&t->shards[shard].exps, &ts->exp);
+                               eb32_insert(&t->buckets[bucket].exps, &ts->exp);
 
                                /* the update might have jumped beyond the next element,
                                 * possibly causing a wrapping. We need to check whether
@@ -362,7 +362,7 @@ int stktable_trash_oldest(struct stktable *t)
                                 * use the current one.
                                 */
                                if (!eb)
-                                       eb = eb32_first(&t->shards[shard].exps);
+                                       eb = eb32_first(&t->buckets[bucket].exps);
 
                                if (!eb || tick_is_lt(ts->exp.key, eb->key))
                                        eb = &ts->exp;
@@ -392,14 +392,14 @@ int stktable_trash_oldest(struct stktable *t)
                        ebmb_delete(&ts->key);
                        __stksess_free(t, ts);
                        batched++;
-                       done_per_shard++;
+                       done_per_bucket++;
 
                        /* don't waste more time here it we're not alone */
                        if (failed_once)
                                break;
                }
 
-               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
 
                /*
                 * There is not much point in trying to purge more entries,
@@ -409,10 +409,10 @@ int stktable_trash_oldest(struct stktable *t)
                 */
                if (batched > 0)
                        break;
-               shard++;
-               if (shard >= CONFIG_HAP_TBL_BUCKETS)
-                       shard = 0;
-       } while (max_search > 0 && shard != init_shard);
+               bucket++;
+               if (bucket >= CONFIG_HAP_TBL_BUCKETS)
+                       bucket = 0;
+       } while (max_search > 0 && bucket != init_bucket);
 
        return batched;
 }
@@ -462,17 +462,17 @@ struct stksess *stksess_new(struct stktable *t, struct stktable_key *key)
 }
 
 /*
- * Looks in table <t> for a sticky session matching key <key> in shard <shard>.
+ * Looks in table <t> for a sticky session matching key <key> in bucket <bucket>.
  * Returns pointer on requested sticky session or NULL if none was found.
  */
-struct stksess *__stktable_lookup_key(struct stktable *t, struct stktable_key *key, uint shard)
+struct stksess *__stktable_lookup_key(struct stktable *t, struct stktable_key *key, uint bucket)
 {
        struct ebmb_node *eb;
 
        if (t->type == SMP_T_STR)
-               eb = ebst_lookup_len(&t->shards[shard].keys, key->key, key->key_len + 1 < t->key_size ? key->key_len : t->key_size - 1);
+               eb = ebst_lookup_len(&t->buckets[bucket].keys, key->key, key->key_len + 1 < t->key_size ? key->key_len : t->key_size - 1);
        else
-               eb = ebmb_lookup(&t->shards[shard].keys, key->key, t->key_size);
+               eb = ebmb_lookup(&t->buckets[bucket].keys, key->key, t->key_size);
 
        if (unlikely(!eb)) {
                /* no session found */
@@ -491,7 +491,7 @@ struct stksess *__stktable_lookup_key(struct stktable *t, struct stktable_key *k
 struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key)
 {
        struct stksess *ts;
-       uint shard;
+       uint bucket;
        size_t len;
 
        if (t->type == SMP_T_STR)
@@ -499,13 +499,13 @@ struct stksess *stktable_lookup_key(struct stktable *t, struct stktable_key *key
        else
                len = t->key_size;
 
-       shard = stktable_calc_shard_num(t, key->key, len);
+       bucket = stktable_calc_bucket_num(t, key->key, len);
 
-       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
-       ts = __stktable_lookup_key(t, key, shard);
+       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
+       ts = __stktable_lookup_key(t, key, bucket);
        if (ts)
                HA_ATOMIC_INC(&ts->ref_cnt);
-       HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+       HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
 
        return ts;
 }
@@ -520,15 +520,15 @@ struct stksess *stktable_lookup_ptr(struct stktable *t, void *ptr)
 {
        struct stksess *ts = NULL;
        struct ebmb_node *eb;
-       int shard;
+       int bucket;
 
-       for (shard = 0; shard < CONFIG_HAP_TBL_BUCKETS; shard++) {
-               HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+       for (bucket = 0; bucket < CONFIG_HAP_TBL_BUCKETS; bucket++) {
+               HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                /* linear search is performed, this could be optimized by adding
                 * an eb node dedicated to ptr lookups into stksess struct to
                 * leverage eb_lookup function instead.
                 */
-               eb = ebmb_first(&t->shards[shard].keys);
+               eb = ebmb_first(&t->buckets[bucket].keys);
                while (eb) {
                        struct stksess *cur;
 
@@ -541,7 +541,7 @@ struct stksess *stktable_lookup_ptr(struct stktable *t, void *ptr)
                }
                if (ts)
                        HA_ATOMIC_INC(&ts->ref_cnt);
-               HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                if (ts)
                        return ts;
        }
@@ -556,14 +556,14 @@ struct stksess *stktable_lookup_ptr(struct stktable *t, void *ptr)
  * <ts> must originate from a table with same key type and length than <t>,
  * else it is undefined behavior.
  */
-struct stksess *__stktable_lookup(struct stktable *t, struct stksess *ts, uint shard)
+struct stksess *__stktable_lookup(struct stktable *t, struct stksess *ts, uint bucket)
 {
        struct ebmb_node *eb;
 
        if (t->type == SMP_T_STR)
-               eb = ebst_lookup(&t->shards[shard].keys, (char *)ts->key.key);
+               eb = ebst_lookup(&t->buckets[bucket].keys, (char *)ts->key.key);
        else
-               eb = ebmb_lookup(&t->shards[shard].keys, ts->key.key, t->key_size);
+               eb = ebmb_lookup(&t->buckets[bucket].keys, ts->key.key, t->key_size);
 
        if (unlikely(!eb))
                return NULL;
@@ -583,7 +583,7 @@ struct stksess *__stktable_lookup(struct stktable *t, struct stksess *ts, uint s
 struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
 {
        struct stksess *lts;
-       uint shard;
+       uint bucket;
        size_t len;
 
        if (t->type == SMP_T_STR)
@@ -591,13 +591,13 @@ struct stksess *stktable_lookup(struct stktable *t, struct stksess *ts)
        else
                len = t->key_size;
 
-       shard = stktable_calc_shard_num(t, ts->key.key, len);
+       bucket = stktable_calc_bucket_num(t, ts->key.key, len);
 
-       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
-       lts = __stktable_lookup(t, ts, shard);
+       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
+       lts = __stktable_lookup(t, ts, bucket);
        if (lts)
                HA_ATOMIC_INC(&lts->ref_cnt);
-       HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+       HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
 
        return lts;
 }
@@ -684,14 +684,14 @@ static void stktable_release(struct stktable *t, struct stksess *ts)
  * is set. <ts> is returned if properly inserted, otherwise the one already
  * present if any.
  */
-struct stksess *__stktable_store(struct stktable *t, struct stksess *ts, uint shard)
+struct stksess *__stktable_store(struct stktable *t, struct stksess *ts, uint bucket)
 {
        struct ebmb_node *eb;
 
-       eb = ebmb_insert(&t->shards[shard].keys, &ts->key, t->key_size);
+       eb = ebmb_insert(&t->buckets[bucket].keys, &ts->key, t->key_size);
        if (likely(eb == &ts->key)) {
                ts->exp.key = ts->expire;
-               eb32_insert(&t->shards[shard].exps, &ts->exp);
+               eb32_insert(&t->buckets[bucket].exps, &ts->exp);
        }
        return ebmb_entry(eb, struct stksess, key); // most commonly this is <ts>
 }
@@ -713,21 +713,21 @@ void stktable_requeue_exp(struct stktable *t, const struct stksess *ts)
        else
                len = t->key_size;
 
-       bucket = stktable_calc_shard_num(t, ts->key.key, len);
+       bucket = stktable_calc_bucket_num(t, ts->key.key, len);
 
        /* set the task's expire to the newest expiration date. */
-       old_exp = HA_ATOMIC_LOAD(&t->shards[bucket].next_exp);
+       old_exp = HA_ATOMIC_LOAD(&t->buckets[bucket].next_exp);
        new_exp = tick_first(expire, old_exp);
 
        /* let's not go further if we're already up to date. We have
         * to make sure the compared date doesn't change under us.
         */
        if (new_exp == old_exp &&
-           HA_ATOMIC_CAS(&t->shards[bucket].next_exp, &old_exp, new_exp)) {
+           HA_ATOMIC_CAS(&t->buckets[bucket].next_exp, &old_exp, new_exp)) {
                return;
        }
 
-       while (!HA_ATOMIC_CAS(&t->shards[bucket].next_exp, &old_exp, new_exp)) {
+       while (!HA_ATOMIC_CAS(&t->buckets[bucket].next_exp, &old_exp, new_exp)) {
                if (new_exp == old_exp)
                        break;
                __ha_cpu_relax();
@@ -744,7 +744,7 @@ void stktable_requeue_exp(struct stktable *t, const struct stksess *ts)
        if (!tick_isset(old_exp) || tick_is_lt(new_exp, old_exp)) {
                int ret;
 
-               ret = MT_LIST_TRY_APPEND(&per_bucket[bucket].toadd_tables, &t->shards[bucket].in_bucket_toadd);
+               ret = MT_LIST_TRY_APPEND(&per_bucket[bucket].toadd_tables, &t->buckets[bucket].in_bucket_toadd);
                if (ret)
                        task_wakeup(per_bucket[bucket].exp_task, TASK_WOKEN_OTHER);
        }
@@ -758,7 +758,7 @@ void stktable_requeue_exp(struct stktable *t, const struct stksess *ts)
 struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *key)
 {
        struct stksess *ts, *ts2;
-       uint shard;
+       uint bucket;
        size_t len;
 
        if (!key)
@@ -769,13 +769,13 @@ struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *
        else
                len = table->key_size;
 
-       shard = stktable_calc_shard_num(table, key->key, len);
+       bucket = stktable_calc_bucket_num(table, key->key, len);
 
-       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
-       ts = __stktable_lookup_key(table, key, shard);
+       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
+       ts = __stktable_lookup_key(table, key, bucket);
        if (ts)
                HA_ATOMIC_INC(&ts->ref_cnt);
-       HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
+       HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
        if (ts)
                return ts;
 
@@ -795,12 +795,12 @@ struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key *
         * one we find.
         */
 
-       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
+       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
 
-       ts2 = __stktable_store(table, ts, shard);
+       ts2 = __stktable_store(table, ts, bucket);
 
        HA_ATOMIC_INC(&ts2->ref_cnt);
-       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
+       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
 
        if (unlikely(ts2 != ts)) {
                /* another entry was added in the mean time, let's
@@ -868,7 +868,7 @@ struct task *stktable_add_pend_updates(struct task *t, void *ctx, unsigned int s
 struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
 {
        struct stksess *ts;
-       uint shard;
+       uint bucket;
        size_t len;
 
        if (table->type == SMP_T_STR)
@@ -876,13 +876,13 @@ struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
        else
                len = table->key_size;
 
-       shard = stktable_calc_shard_num(table, nts->key.key, len);
+       bucket = stktable_calc_bucket_num(table, nts->key.key, len);
 
-       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
-       ts = __stktable_lookup(table, nts, shard);
+       HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
+       ts = __stktable_lookup(table, nts, bucket);
        if (ts) {
                HA_ATOMIC_INC(&ts->ref_cnt);
-               HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
+               HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
                return ts;
        }
        ts = nts;
@@ -890,18 +890,18 @@ struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts)
        /* let's increment it before switching to exclusive */
        HA_ATOMIC_INC(&ts->ref_cnt);
 
-       if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &table->shards[shard].sh_lock) != 0) {
+       if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock) != 0) {
                /* upgrade to seek lock failed, let's drop and take */
-               HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
-               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
+               HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
+               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
        }
        else
-               HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
+               HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
 
        /* now we're write-locked */
 
-       __stktable_store(table, ts, shard);
-       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->shards[shard].sh_lock);
+       __stktable_store(table, ts, bucket);
+       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->buckets[bucket].sh_lock);
 
        stktable_requeue_exp(table, ts);
        return ts;
@@ -920,29 +920,29 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
        struct mt_list link;
        int to_visit;
        int task_exp;
-       int shard;
+       int bucket;
 
        task_exp = TICK_ETERNITY;
 
-       shard = (ps - &per_bucket[0]);
+       bucket = (ps - &per_bucket[0]);
 
        to_visit = STKTABLE_MAX_UPDATES_AT_ONCE;
 
        /*
         * First put all the tables to be added from the list to the tree
         */
-       while ((t = MT_LIST_POP(&ps->toadd_tables, struct stktable *, shards[shard].in_bucket_toadd)) != NULL) {
-               int next_exp = HA_ATOMIC_LOAD(&t->shards[shard].next_exp);
+       while ((t = MT_LIST_POP(&ps->toadd_tables, struct stktable *, buckets[bucket].in_bucket_toadd)) != NULL) {
+               int next_exp = HA_ATOMIC_LOAD(&t->buckets[bucket].next_exp);
                /*
                 * We're already in the tree
                 */
-               if (tick_isset(t->shards[shard].in_bucket.key) &&
-                   tick_is_lt(t->shards[shard].in_bucket.key, next_exp))
+               if (tick_isset(t->buckets[bucket].in_bucket.key) &&
+                   tick_is_lt(t->buckets[bucket].in_bucket.key, next_exp))
                        continue;
 
-               eb32_delete(&t->shards[shard].in_bucket);
-               t->shards[shard].in_bucket.key = next_exp;
-               eb32_insert(&ps->tables, &t->shards[shard].in_bucket);
+               eb32_delete(&t->buckets[bucket].in_bucket);
+               t->buckets[bucket].in_bucket.key = next_exp;
+               eb32_insert(&ps->tables, &t->buckets[bucket].in_bucket);
        }
        table_eb = eb32_first(&ps->tables);
 
@@ -950,7 +950,7 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                struct eb32_node *tmpnode;
                unsigned int next_exp_table = TICK_ETERNITY;
 
-               t = eb32_entry(table_eb, struct stktable, shards[shard].in_bucket);
+               t = eb32_entry(table_eb, struct stktable, buckets[bucket].in_bucket);
 
                if (tick_is_lt(now_ms, table_eb->key)) {
                        /*
@@ -960,9 +960,9 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                                task_exp = table_eb->key;
                        break;
                }
-               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
 
-               eb = eb32_lookup_ge(&t->shards[shard].exps, now_ms - TIMER_LOOK_BACK);
+               eb = eb32_lookup_ge(&t->buckets[bucket].exps, now_ms - TIMER_LOOK_BACK);
 
                while (to_visit >= 0) {
                        if (unlikely(!eb)) {
@@ -971,7 +971,7 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                                 * half. Let's loop back to the beginning of the tree now if we
                                 * have not yet visited it.
                                 */
-                               eb = eb32_first(&t->shards[shard].exps);
+                               eb = eb32_first(&t->buckets[bucket].exps);
                                if (likely(!eb))
                                        break;
                        }
@@ -1010,7 +1010,7 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                                        continue;
 
                                ts->exp.key = ts->expire;
-                               eb32_insert(&t->shards[shard].exps, &ts->exp);
+                               eb32_insert(&t->buckets[bucket].exps, &ts->exp);
 
                                /* the update might have jumped beyond the next element,
                                 * possibly causing a wrapping. We need to check whether
@@ -1021,7 +1021,7 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                                 * use the current one.
                                 */
                                if (!eb)
-                                       eb = eb32_first(&t->shards[shard].exps);
+                                       eb = eb32_first(&t->buckets[bucket].exps);
 
                                if (!eb || tick_is_lt(ts->exp.key, eb->key))
                                        eb = &ts->exp;
@@ -1049,11 +1049,11 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
 
                /*
                 * Now find the first element, so that we can reposition
-                * the table in the shard tree.
+                * the table in the bucket tree.
                 */
-               eb = eb32_lookup_ge(&t->shards[shard].exps, now_ms - TIMER_LOOK_BACK);
+               eb = eb32_lookup_ge(&t->buckets[bucket].exps, now_ms - TIMER_LOOK_BACK);
                if (!eb)
-                       eb = eb32_first(&t->shards[shard].exps);
+                       eb = eb32_first(&t->buckets[bucket].exps);
 
                if (eb)
                        next_exp_table = eb->key;
@@ -1063,7 +1063,7 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                if (!tick_isset(task_exp) || (tick_isset(next_exp_table) && tick_is_lt(next_exp_table, task_exp)))
                        task_exp = next_exp_table;
 
-               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->shards[shard].sh_lock);
+               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->buckets[bucket].sh_lock);
                tmpnode = eb32_next(table_eb);
 
                if (table_eb->key != next_exp_table) {
@@ -1071,9 +1071,9 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                        /*
                         * We have to move the entry in the tree
                         */
-                       old_exp = HA_ATOMIC_LOAD(&t->shards[shard].next_exp);
+                       old_exp = HA_ATOMIC_LOAD(&t->buckets[bucket].next_exp);
                        if (old_exp >= table_eb->key) {
-                               HA_ATOMIC_CAS(&t->shards[shard].next_exp, &old_exp, next_exp_table);
+                               HA_ATOMIC_CAS(&t->buckets[bucket].next_exp, &old_exp, next_exp_table);
                        }
 
                        eb32_delete(table_eb);
@@ -1083,7 +1083,7 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
                         * it'll go back into the tree the next time the task runs.
                         */
                        if (next_exp_table != TICK_ETERNITY)
-                               MT_LIST_TRY_APPEND(&per_bucket[shard].toadd_tables, &t->shards[shard].in_bucket_toadd);
+                               MT_LIST_TRY_APPEND(&per_bucket[bucket].toadd_tables, &t->buckets[bucket].in_bucket_toadd);
                }
                table_eb = tmpnode;
        }
@@ -1111,17 +1111,17 @@ struct task *process_tables_expire(struct task *task, void *context, unsigned in
 int stktable_init(struct stktable *t, char **err_msg)
 {
        int peers_retval = 0;
-       int shard;
+       int bucket;
        int i;
 
        t->hash_seed = XXH64(t->id, t->idlen, 0);
 
        if (t->size) {
-               for (shard = 0; shard < CONFIG_HAP_TBL_BUCKETS; shard++) {
-                       t->shards[shard].keys = EB_ROOT_UNIQUE;
-                       memset(&t->shards[shard].exps, 0, sizeof(t->shards[shard].exps));
-                       HA_RWLOCK_INIT(&t->shards[shard].sh_lock);
-                       MT_LIST_INIT(&t->shards[shard].in_bucket_toadd);
+               for (bucket = 0; bucket < CONFIG_HAP_TBL_BUCKETS; bucket++) {
+                       t->buckets[bucket].keys = EB_ROOT_UNIQUE;
+                       memset(&t->buckets[bucket].exps, 0, sizeof(t->buckets[bucket].exps));
+                       HA_RWLOCK_INIT(&t->buckets[bucket].sh_lock);
+                       MT_LIST_INIT(&t->buckets[bucket].in_bucket_toadd);
                }
 
                MT_LIST_INIT(&t->updates);
@@ -1197,8 +1197,8 @@ void stktable_deinit(struct stktable *t)
                return;
        for (i = 0; i < CONFIG_HAP_TBL_BUCKETS; i++) {
                HA_SPIN_LOCK(OTHER_LOCK, &per_bucket[i].lock);
-               eb32_delete(&t->shards[i].in_bucket);
-               MT_LIST_DELETE(&t->shards[i].in_bucket_toadd);
+               eb32_delete(&t->buckets[i].in_bucket);
+               MT_LIST_DELETE(&t->buckets[i].in_bucket_toadd);
                HA_SPIN_UNLOCK(OTHER_LOCK, &per_bucket[i].lock);
        }
        tasklet_free(t->updt_task);
@@ -5694,7 +5694,7 @@ static int cli_io_handler_table(struct appctx *appctx)
        struct ebmb_node *eb;
        int skip_entry;
        int show = ctx->action == STK_CLI_ACT_SHOW;
-       int shard = ctx->tree_head;
+       int bucket = ctx->tree_head;
 
        /*
         * We have 3 possible states in ctx->state :
@@ -5720,30 +5720,30 @@ static int cli_io_handler_table(struct appctx *appctx)
                        }
 
                        if (ctx->t->size) {
-                               if (show && !shard && !table_dump_head_to_buffer(&trash, appctx, ctx->t, ctx->target))
+                               if (show && !bucket && !table_dump_head_to_buffer(&trash, appctx, ctx->t, ctx->target))
                                        return 0;
 
                                if (ctx->target &&
                                    (strm_li(s)->bind_conf->level & ACCESS_LVL_MASK) >= ACCESS_LVL_OPER) {
                                        /* dump entries only if table explicitly requested */
-                                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
-                                       eb = ebmb_first(&ctx->t->shards[shard].keys);
+                                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
+                                       eb = ebmb_first(&ctx->t->buckets[bucket].keys);
                                        if (eb) {
                                                ctx->entry = ebmb_entry(eb, struct stksess, key);
                                                HA_ATOMIC_INC(&ctx->entry->ref_cnt);
                                                ctx->state = STATE_DUMP;
-                                               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
+                                               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
                                                break;
                                        }
-                                       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
+                                       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
 
-                                       /* we come here if we didn't find any entry in this shard */
-                                       shard = ++ctx->tree_head;
-                                       if (shard < CONFIG_HAP_TBL_BUCKETS)
-                                               break; // try again on new shard
+                                       /* we come here if we didn't find any entry in this bucket */
+                                       bucket = ++ctx->tree_head;
+                                       if (bucket < CONFIG_HAP_TBL_BUCKETS)
+                                               break; // try again on new bucket
 
                                        /* fall through next table */
-                                       shard = ctx->tree_head = 0;
+                                       bucket = ctx->tree_head = 0;
                                }
                        }
                        ctx->t = ctx->t->next;
@@ -5829,7 +5829,7 @@ static int cli_io_handler_table(struct appctx *appctx)
 
                        HA_RWLOCK_RDUNLOCK(STK_SESS_LOCK, &ctx->entry->lock);
 
-                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
+                       HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
                        HA_ATOMIC_DEC(&ctx->entry->ref_cnt);
 
                        eb = ebmb_next(&ctx->entry->key);
@@ -5841,7 +5841,7 @@ static int cli_io_handler_table(struct appctx *appctx)
                                else if (!skip_entry && !ctx->entry->ref_cnt)
                                        __stksess_kill(ctx->t, old);
                                HA_ATOMIC_INC(&ctx->entry->ref_cnt);
-                               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
+                               HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
                                break;
                        }
 
@@ -5851,11 +5851,11 @@ static int cli_io_handler_table(struct appctx *appctx)
                        else if (!skip_entry && !HA_ATOMIC_LOAD(&ctx->entry->ref_cnt))
                                __stksess_kill(ctx->t, ctx->entry);
 
-                       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->shards[shard].sh_lock);
+                       HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &ctx->t->buckets[bucket].sh_lock);
 
-                       shard = ++ctx->tree_head;
-                       if (shard >= CONFIG_HAP_TBL_BUCKETS) {
-                               shard = ctx->tree_head = 0;
+                       bucket = ++ctx->tree_head;
+                       if (bucket >= CONFIG_HAP_TBL_BUCKETS) {
+                               bucket = ctx->tree_head = 0;
                                ctx->t = ctx->t->next;
                        }
                        ctx->state = STATE_NEXT;
@@ -5931,7 +5931,7 @@ static void stkt_late_init(void)
                per_bucket[i].tables = EB_ROOT;
                per_bucket[i].exp_task = task_new_on(i % global.nbthread);
                if (per_bucket[i].exp_task == NULL) {
-                       ha_alert("Failed to allocate per-shard task!\n");
+                       ha_alert("Failed to allocate per-bucket task!\n");
                        exit(1);
                }
                per_bucket[i].exp_task->process = process_tables_expire;