static struct deferred_split *folio_split_queue_lock(struct folio *folio)
{
- return split_queue_lock(folio_nid(folio), folio_memcg(folio));
+ struct deferred_split *queue;
+
+ rcu_read_lock();
+ queue = split_queue_lock(folio_nid(folio), folio_memcg(folio));
+ /*
+ * The memcg destruction path is acquiring the split queue lock for
+ * reparenting. Once you have it locked, it's safe to drop the rcu lock.
+ */
+ rcu_read_unlock();
+
+ return queue;
}
static struct deferred_split *
folio_split_queue_lock_irqsave(struct folio *folio, unsigned long *flags)
{
- return split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
+ struct deferred_split *queue;
+
+ rcu_read_lock();
+ queue = split_queue_lock_irqsave(folio_nid(folio), folio_memcg(folio), flags);
+ rcu_read_unlock();
+
+ return queue;
}
static inline void split_queue_unlock(struct deferred_split *queue)