From: Daisuke Matsuda Date: Wed, 11 Jun 2025 16:27:58 +0000 (+0000) Subject: RDMA/rxe: Remove redundant page presence check X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c81fef22020c7467c08929330009c7c613e5ff5c;p=thirdparty%2Flinux.git RDMA/rxe: Remove redundant page presence check hmm_pfn_to_page() does not return NULL. ib_umem_odp_map_dma_and_lock() should return an error in case the target pages cannot be mapped until timeout, so these checks can safely be removed. Reviewed-by: Zhu Yanjun Signed-off-by: Daisuke Matsuda Link: https://patch.msgid.link/20250611162758.10000-1-dskmtsd@gmail.com Signed-off-by: Leon Romanovsky --- diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c index 6313680e9d40d..01a59d3f8ed47 100644 --- a/drivers/infiniband/sw/rxe/rxe_odp.c +++ b/drivers/infiniband/sw/rxe/rxe_odp.c @@ -203,8 +203,6 @@ static int __rxe_odp_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]); user_va = kmap_local_page(page); - if (!user_va) - return -EFAULT; src = (dir == RXE_TO_MR_OBJ) ? addr : user_va; dest = (dir == RXE_TO_MR_OBJ) ? user_va : addr; @@ -283,17 +281,15 @@ static enum resp_states rxe_odp_do_atomic_op(struct rxe_mr *mr, u64 iova, return RESPST_ERR_RKEY_VIOLATION; } - idx = rxe_odp_iova_to_index(umem_odp, iova); page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova); - page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]); - if (!page) - return RESPST_ERR_RKEY_VIOLATION; - if (unlikely(page_offset & 0x7)) { rxe_dbg_mr(mr, "iova not aligned\n"); return RESPST_ERR_MISALIGNED_ATOMIC; } + idx = rxe_odp_iova_to_index(umem_odp, iova); + page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]); + va = kmap_local_page(page); spin_lock_bh(&atomic_ops_lock); @@ -352,10 +348,6 @@ int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova, page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova); page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]); - if (!page) { - mutex_unlock(&umem_odp->umem_mutex); - return -EFAULT; - } bytes = min_t(unsigned int, length, mr_page_size(mr) - page_offset); @@ -396,12 +388,6 @@ enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) return RESPST_ERR_RKEY_VIOLATION; page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova); - index = rxe_odp_iova_to_index(umem_odp, iova); - page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]); - if (!page) { - mutex_unlock(&umem_odp->umem_mutex); - return RESPST_ERR_RKEY_VIOLATION; - } /* See IBA A19.4.2 */ if (unlikely(page_offset & 0x7)) { mutex_unlock(&umem_odp->umem_mutex); @@ -409,6 +395,9 @@ enum resp_states rxe_odp_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) return RESPST_ERR_MISALIGNED_ATOMIC; } + index = rxe_odp_iova_to_index(umem_odp, iova); + page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]); + va = kmap_local_page(page); /* Do atomic write after all prior operations have completed */ smp_store_release(&va[page_offset >> 3], value);