From 2db297185cbc567bdbf1f0cc4f75d6cb46eebcf8 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 20 Mar 2025 06:23:38 -0700 Subject: [PATCH] fix up 6.1 io_uring change --- ...p-larger-than-what-the-user-asks-for.patch | 4 +-- ...fix-corner-case-forgetting-to-vunmap.patch | 4 +-- ...map_pfn_range-for-mapping-rings-sqes.patch | 32 ++++++++++++------- 3 files changed, 24 insertions(+), 16 deletions(-) diff --git a/queue-6.1/io_uring-don-t-attempt-to-mmap-larger-than-what-the-user-asks-for.patch b/queue-6.1/io_uring-don-t-attempt-to-mmap-larger-than-what-the-user-asks-for.patch index b7b698e479..69a3a65661 100644 --- a/queue-6.1/io_uring-don-t-attempt-to-mmap-larger-than-what-the-user-asks-for.patch +++ b/queue-6.1/io_uring-don-t-attempt-to-mmap-larger-than-what-the-user-asks-for.patch @@ -30,7 +30,7 @@ Signed-off-by: Greg Kroah-Hartman --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c -@@ -3236,6 +3236,7 @@ static __cold int io_uring_mmap(struct f +@@ -3237,6 +3237,7 @@ static __cold int io_uring_mmap(struct f struct io_ring_ctx *ctx = file->private_data; size_t sz = vma->vm_end - vma->vm_start; long offset = vma->vm_pgoff << PAGE_SHIFT; @@ -38,7 +38,7 @@ Signed-off-by: Greg Kroah-Hartman unsigned long pfn; void *ptr; -@@ -3246,8 +3247,8 @@ static __cold int io_uring_mmap(struct f +@@ -3247,8 +3248,8 @@ static __cold int io_uring_mmap(struct f switch (offset & IORING_OFF_MMAP_MASK) { case IORING_OFF_SQ_RING: case IORING_OFF_CQ_RING: diff --git a/queue-6.1/io_uring-fix-corner-case-forgetting-to-vunmap.patch b/queue-6.1/io_uring-fix-corner-case-forgetting-to-vunmap.patch index 64f58e0235..8b3f070960 100644 --- a/queue-6.1/io_uring-fix-corner-case-forgetting-to-vunmap.patch +++ b/queue-6.1/io_uring-fix-corner-case-forgetting-to-vunmap.patch @@ -29,7 +29,7 @@ Signed-off-by: Greg Kroah-Hartman --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c -@@ -2610,6 +2610,8 @@ static void *io_pages_map(struct page ** +@@ -2611,6 +2611,8 @@ static void *io_pages_map(struct page ** ret = io_mem_alloc_compound(pages, nr_pages, size, gfp); if (!IS_ERR(ret)) goto done; @@ -38,7 +38,7 @@ Signed-off-by: Greg Kroah-Hartman ret = io_mem_alloc_single(pages, nr_pages, size, gfp); if (!IS_ERR(ret)) { -@@ -2618,7 +2620,7 @@ done: +@@ -2619,7 +2621,7 @@ done: *npages = nr_pages; return ret; } diff --git a/queue-6.1/io_uring-get-rid-of-remap_pfn_range-for-mapping-rings-sqes.patch b/queue-6.1/io_uring-get-rid-of-remap_pfn_range-for-mapping-rings-sqes.patch index b0f846239f..83b08cb50f 100644 --- a/queue-6.1/io_uring-get-rid-of-remap_pfn_range-for-mapping-rings-sqes.patch +++ b/queue-6.1/io_uring-get-rid-of-remap_pfn_range-for-mapping-rings-sqes.patch @@ -1,4 +1,4 @@ -From 6c7c93b9211b6a6c625d4e8102659470742cb27f Mon Sep 17 00:00:00 2001 +From 8c3571a1ebeca6037fa49f6192fd8fe45c61245c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Mar 2024 09:56:14 -0600 Subject: io_uring: get rid of remap_pfn_range() for mapping rings/sqes @@ -24,9 +24,9 @@ Signed-off-by: Greg Kroah-Hartman --- include/linux/io_uring_types.h | 5 + include/uapi/linux/io_uring.h | 1 - io_uring/io_uring.c | 132 ++++++++++++++++++++++++++++++++++++----- + io_uring/io_uring.c | 133 ++++++++++++++++++++++++++++++++++++----- io_uring/io_uring.h | 2 - 4 files changed, 124 insertions(+), 16 deletions(-) + 4 files changed, 125 insertions(+), 16 deletions(-) --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -54,7 +54,15 @@ Signed-off-by: Greg Kroah-Hartman * Filled with the offset for mmap(2) --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c -@@ -2513,37 +2513,118 @@ static int io_cqring_wait(struct io_ring +@@ -71,6 +71,7 @@ + #include + #include + #include ++#include + #include + + #define CREATE_TRACE_POINTS +@@ -2513,37 +2514,118 @@ static int io_cqring_wait(struct io_ring return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; } @@ -132,14 +140,14 @@ Signed-off-by: Greg Kroah-Hartman - gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; void *ret; + int i; -+ + +- ret = (void *) __get_free_pages(gfp, get_order(size)); + for (i = 0; i < nr_pages; i++) { + pages[i] = alloc_page(gfp); + if (!pages[i]) + goto err; + } - -- ret = (void *) __get_free_pages(gfp, get_order(size)); ++ + ret = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); if (ret) return ret; @@ -183,7 +191,7 @@ Signed-off-by: Greg Kroah-Hartman static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, unsigned int cq_entries, size_t *sq_offset) { -@@ -3125,11 +3206,9 @@ static void *io_uring_validate_mmap_requ +@@ -3125,11 +3207,9 @@ static void *io_uring_validate_mmap_requ switch (offset) { case IORING_OFF_SQ_RING: case IORING_OFF_CQ_RING: @@ -197,7 +205,7 @@ Signed-off-by: Greg Kroah-Hartman default: return ERR_PTR(-EINVAL); } -@@ -3141,11 +3220,22 @@ static void *io_uring_validate_mmap_requ +@@ -3141,11 +3221,22 @@ static void *io_uring_validate_mmap_requ return ptr; } @@ -220,7 +228,7 @@ Signed-off-by: Greg Kroah-Hartman unsigned long pfn; void *ptr; -@@ -3153,6 +3243,16 @@ static __cold int io_uring_mmap(struct f +@@ -3153,6 +3244,16 @@ static __cold int io_uring_mmap(struct f if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -237,7 +245,7 @@ Signed-off-by: Greg Kroah-Hartman pfn = virt_to_phys(ptr) >> PAGE_SHIFT; return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); } -@@ -3443,7 +3543,7 @@ static __cold int io_allocate_scq_urings +@@ -3443,7 +3544,7 @@ static __cold int io_allocate_scq_urings if (size == SIZE_MAX) return -EOVERFLOW; @@ -246,7 +254,7 @@ Signed-off-by: Greg Kroah-Hartman if (IS_ERR(rings)) return PTR_ERR(rings); -@@ -3463,7 +3563,7 @@ static __cold int io_allocate_scq_urings +@@ -3463,7 +3564,7 @@ static __cold int io_allocate_scq_urings return -EOVERFLOW; } -- 2.47.2