FRRS_AVAILABLE);
}
+static void fuse_uring_teardown_all_queues(struct fuse_ring *ring)
+{
+ int qid;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
+
+ if (!queue)
+ continue;
+
+ fuse_uring_teardown_entries(queue);
+ }
+}
+
/*
* Log state debug info
*/
static void fuse_uring_async_stop_queues(struct work_struct *work)
{
- int qid;
struct fuse_ring *ring =
container_of(work, struct fuse_ring, async_teardown_work.work);
- /* XXX code dup */
- for (qid = 0; qid < ring->nr_queues; qid++) {
- struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
-
- if (!queue)
- continue;
-
- fuse_uring_teardown_entries(queue);
- }
+ fuse_uring_teardown_all_queues(ring);
/*
* Some ring entries might be in the middle of IO operations,
*/
void fuse_uring_stop_queues(struct fuse_ring *ring)
{
- int qid;
-
- for (qid = 0; qid < ring->nr_queues; qid++) {
- struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]);
-
- if (!queue)
- continue;
-
- fuse_uring_teardown_entries(queue);
- }
+ fuse_uring_teardown_all_queues(ring);
if (atomic_read(&ring->queue_refs) > 0) {
ring->teardown_time = jiffies;