]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
rhashtable: Restore insecure_elasticity toggle
authorHerbert Xu <herbert@gondor.apana.org.au>
Sat, 18 Apr 2026 01:41:21 +0000 (09:41 +0800)
committerTejun Heo <tj@kernel.org>
Sun, 19 Apr 2026 15:47:21 +0000 (05:47 -1000)
Some users of rhashtable cannot handle insertion failures, and
are happy to accept the consequences of a hash table that having
very long chains.

Restore the insecure_elasticity toggle for these users.  In
addition to disabling the chain length checks, this also removes
the emergency resize that would otherwise occur when the hash
table occupancy hits 100% (an async resize is still scheduled
at 75%).

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Tejun Heo <tj@kernel.org>
include/linux/rhashtable-types.h
include/linux/rhashtable.h
lib/rhashtable.c

index 015c8298bebc45c20ccc120e061978f2c5f9e25a..72082428d6c6e64c203cc2277890f82a9d982518 100644 (file)
@@ -49,6 +49,7 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
  * @head_offset: Offset of rhash_head in struct to be hashed
  * @max_size: Maximum size while expanding
  * @min_size: Minimum size while shrinking
+ * @insecure_elasticity: Set to true to disable chain length checks
  * @automatic_shrinking: Enable automatic shrinking of tables
  * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
  * @obj_hashfn: Function to hash object
@@ -61,6 +62,7 @@ struct rhashtable_params {
        u16                     head_offset;
        unsigned int            max_size;
        u16                     min_size;
+       bool                    insecure_elasticity;
        bool                    automatic_shrinking;
        rht_hashfn_t            hashfn;
        rht_obj_hashfn_t        obj_hashfn;
index 0480509a6339332491ca1adf6a17a1fe35c6928f..7def3f0f556b53a9a7fccebc8f0573f8df0b99c5 100644 (file)
@@ -821,14 +821,15 @@ slow_path:
                goto out;
        }
 
-       if (elasticity <= 0)
+       if (elasticity <= 0 && !params.insecure_elasticity)
                goto slow_path;
 
        data = ERR_PTR(-E2BIG);
        if (unlikely(rht_grow_above_max(ht, tbl)))
                goto out_unlock;
 
-       if (unlikely(rht_grow_above_100(ht, tbl)))
+       if (unlikely(rht_grow_above_100(ht, tbl)) &&
+           !params.insecure_elasticity)
                goto slow_path;
 
        /* Inserting at head of list makes unlocking free. */
index 6074ed5f66f3fd7c14ec0da6215b89daa41eca4a..fb2b7bc137bae1692cb5e8df3d3fff6e4905dfc2 100644 (file)
@@ -538,7 +538,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
                return NULL;
        }
 
-       if (elasticity <= 0)
+       if (elasticity <= 0 && !ht->p.insecure_elasticity)
                return ERR_PTR(-EAGAIN);
 
        return ERR_PTR(-ENOENT);
@@ -568,7 +568,8 @@ static struct bucket_table *rhashtable_insert_one(
        if (unlikely(rht_grow_above_max(ht, tbl)))
                return ERR_PTR(-E2BIG);
 
-       if (unlikely(rht_grow_above_100(ht, tbl)))
+       if (unlikely(rht_grow_above_100(ht, tbl)) &&
+           !ht->p.insecure_elasticity)
                return ERR_PTR(-EAGAIN);
 
        head = rht_ptr(bkt, tbl, hash);