void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
+int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires_shared(RCU);
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
+ __acquires_shared(RCU)
{
(void)rhashtable_walk_start_check(iter);
}
void *rhashtable_walk_next(struct rhashtable_iter *iter);
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
-void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases_shared(RCU);
void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
static inline unsigned long rht_lock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt)
+ __acquires(__bitlock(0, bkt))
{
unsigned long flags;
static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bucket,
unsigned int subclass)
+ __acquires(__bitlock(0, bucket))
{
unsigned long flags;
static inline void rht_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
struct rhash_lock_head __rcu **bkt,
struct rhash_head *obj,
unsigned long flags)
+ __releases(__bitlock(0, bkt))
{
if (rht_is_a_nulls(obj))
obj = NULL;
lock_map_release(&tbl->dep_map);
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
- __release(bitlock);
+ __release(__bitlock(0, bkt));
local_irq_restore(flags);
}
struct rhashtable *ht, const void *key,
const struct rhashtable_params params,
const enum rht_lookup_freq freq)
+ __must_hold_shared(RCU)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
static __always_inline void *rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_NORMAL);
static __always_inline void *rhashtable_lookup_likely(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(ht, key, params,
RHT_LOOKUP_LIKELY);
static __always_inline struct rhlist_head *rhltable_lookup(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_NORMAL);
static __always_inline struct rhlist_head *rhltable_lookup_likely(
struct rhltable *hlt, const void *key,
const struct rhashtable_params params)
+ __must_hold_shared(RCU)
{
struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params,
RHT_LOOKUP_LIKELY);
static int rhashtable_rehash_alloc(struct rhashtable *ht,
struct bucket_table *old_tbl,
unsigned int size)
+ __must_hold(&ht->mutex)
{
struct bucket_table *new_tbl;
int err;
* bucket locks or concurrent RCU protected lookups and traversals.
*/
static int rhashtable_shrink(struct rhashtable *ht)
+ __must_hold(&ht->mutex)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
unsigned int nelems = atomic_read(&ht->nelems);
* resize events and always continue.
*/
int rhashtable_walk_start_check(struct rhashtable_iter *iter)
- __acquires(RCU)
+ __acquires_shared(RCU)
{
struct rhashtable *ht = iter->ht;
bool rhlist = ht->rhlist;
* hash table.
*/
void rhashtable_walk_stop(struct rhashtable_iter *iter)
- __releases(RCU)
{
struct rhashtable *ht;
struct bucket_table *tbl = iter->walker.tbl;