]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/rxe: Remove redundant page presence check
authorDaisuke Matsuda <dskmtsd@gmail.com>
Wed, 11 Jun 2025 16:27:58 +0000 (16:27 +0000)
committerLeon Romanovsky <leon@kernel.org>
Thu, 12 Jun 2025 11:07:40 +0000 (07:07 -0400)
hmm_pfn_to_page() does not return NULL. ib_umem_odp_map_dma_and_lock()
should return an error in case the target pages cannot be mapped until
timeout, so these checks can safely be removed.

Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
Signed-off-by: Daisuke Matsuda <dskmtsd@gmail.com>
Link: https://patch.msgid.link/20250611162758.10000-1-dskmtsd@gmail.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/sw/rxe/rxe_odp.c

index 6313680e9d40d47f566b8bf021b5f967769b5988..01a59d3f8ed477bcd58fd846be7a35678363269f 100644 (file)
@@ -203,8 +203,6 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
 
                page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
                user_va = kmap_local_page(page);
-               if (!user_va)
-                       return -EFAULT;
 
                src = (dir == RXE_TO_MR_OBJ) ? addr : user_va;
                dest = (dir == RXE_TO_MR_OBJ) ? user_va : addr;
@@ -283,17 +281,15 @@ static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova,
                return RESPST_ERR_RKEY_VIOLATION;
        }
 
-       idx = rxe_odp_iova_to_index(umem_odp, iova);
        page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
-       page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
-       if (!page)
-               return RESPST_ERR_RKEY_VIOLATION;
-
        if (unlikely(page_offset & 0x7)) {
                rxe_dbg_mr(mr, "iova not aligned\n");
                return RESPST_ERR_MISALIGNED_ATOMIC;
        }
 
+       idx = rxe_odp_iova_to_index(umem_odp, iova);
+       page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
+
        va = kmap_local_page(page);
 
        spin_lock_bh(&atomic_ops_lock);
@@ -352,10 +348,6 @@ int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
                page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
 
                page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
-               if (!page) {
-                       mutex_unlock(&umem_odp->umem_mutex);
-                       return -EFAULT;
-               }
 
                bytes = min_t(unsigned int, length,
                              mr_page_size(mr) - page_offset);
@@ -396,12 +388,6 @@ enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
                return RESPST_ERR_RKEY_VIOLATION;
 
        page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
-       index = rxe_odp_iova_to_index(umem_odp, iova);
-       page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
-       if (!page) {
-               mutex_unlock(&umem_odp->umem_mutex);
-               return RESPST_ERR_RKEY_VIOLATION;
-       }
        /* See IBA A19.4.2 */
        if (unlikely(page_offset & 0x7)) {
                mutex_unlock(&umem_odp->umem_mutex);
@@ -409,6 +395,9 @@ enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
                return RESPST_ERR_MISALIGNED_ATOMIC;
        }
 
+       index = rxe_odp_iova_to_index(umem_odp, iova);
+       page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+
        va = kmap_local_page(page);
        /* Do atomic write after all prior operations have completed */
        smp_store_release(&va[page_offset >> 3], value);