]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring/rsrc: clean up io_coalesce_buffer()
authorPavel Begunkov <asml.silence@gmail.com>
Sat, 19 Apr 2025 17:47:05 +0000 (18:47 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 21 Apr 2025 11:10:04 +0000 (05:10 -0600)
We don't need special handling for the first page in
io_coalesce_buffer(), move it inside the loop.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Anuj Gupta <anuj20.g@samsung.com>
Link: https://lore.kernel.org/r/ad698cddc1eadb3d92a7515e95bb13f79420323d.1745083025.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/rsrc.c

index 6bf8dff4adf3b857e8abc5cce21a4d5df715b7c8..5d25f3391650cab621f268f1a79a844028626014 100644 (file)
@@ -685,37 +685,34 @@ static bool io_coalesce_buffer(struct page ***pages, int *nr_pages,
                                struct io_imu_folio_data *data)
 {
        struct page **page_array = *pages, **new_array = NULL;
-       int nr_pages_left = *nr_pages, i, j;
-       int nr_folios = data->nr_folios;
+       unsigned nr_pages_left = *nr_pages;
+       unsigned nr_folios = data->nr_folios;
+       unsigned i, j;
 
        /* Store head pages only*/
-       new_array = kvmalloc_array(nr_folios, sizeof(struct page *),
-                                       GFP_KERNEL);
+       new_array = kvmalloc_array(nr_folios, sizeof(struct page *), GFP_KERNEL);
        if (!new_array)
                return false;
 
-       new_array[0] = compound_head(page_array[0]);
-       /*
-        * The pages are bound to the folio, it doesn't
-        * actually unpin them but drops all but one reference,
-        * which is usually put down by io_buffer_unmap().
-        */
-       if (data->nr_pages_head > 1)
-               unpin_user_folio(page_folio(new_array[0]), data->nr_pages_head - 1);
-
-       j = data->nr_pages_head;
-       nr_pages_left -= data->nr_pages_head;
-       for (i = 1; i < nr_folios; i++) {
-               unsigned int nr_unpin;
-
-               new_array[i] = page_array[j];
-               nr_unpin = min_t(unsigned int, nr_pages_left - 1,
-                                       data->nr_pages_mid - 1);
-               if (nr_unpin)
-                       unpin_user_folio(page_folio(new_array[i]), nr_unpin);
-               j += data->nr_pages_mid;
-               nr_pages_left -= data->nr_pages_mid;
+       for (i = 0, j = 0; i < nr_folios; i++) {
+               struct page *p = compound_head(page_array[j]);
+               struct folio *folio = page_folio(p);
+               unsigned int nr;
+
+               WARN_ON_ONCE(i > 0 && p != page_array[j]);
+
+               nr = i ? data->nr_pages_mid : data->nr_pages_head;
+               nr = min(nr, nr_pages_left);
+               /* Drop all but one ref, the entire folio will remain pinned. */
+               if (nr > 1)
+                       unpin_user_folio(folio, nr - 1);
+               j += nr;
+               nr_pages_left -= nr;
+               new_array[i] = p;
        }
+
+       WARN_ON_ONCE(j != *nr_pages);
+
        kvfree(page_array);
        *pages = new_array;
        *nr_pages = nr_folios;