page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
user_va = kmap_local_page(page);
- if (!user_va)
- return -EFAULT;
src = (dir == RXE_TO_MR_OBJ) ? addr : user_va;
dest = (dir == RXE_TO_MR_OBJ) ? user_va : addr;
return RESPST_ERR_RKEY_VIOLATION;
}
- idx = rxe_odp_iova_to_index(umem_odp, iova);
page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
- page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
- if (!page)
- return RESPST_ERR_RKEY_VIOLATION;
-
if (unlikely(page_offset & 0x7)) {
rxe_dbg_mr(mr, "iova not aligned\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
+ idx = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[idx]);
+
va = kmap_local_page(page);
spin_lock_bh(&atomic_ops_lock);
page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
- if (!page) {
- mutex_unlock(&umem_odp->umem_mutex);
- return -EFAULT;
- }
bytes = min_t(unsigned int, length,
mr_page_size(mr) - page_offset);
return RESPST_ERR_RKEY_VIOLATION;
page_offset = rxe_odp_iova_to_page_offset(umem_odp, iova);
- index = rxe_odp_iova_to_index(umem_odp, iova);
- page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
- if (!page) {
- mutex_unlock(&umem_odp->umem_mutex);
- return RESPST_ERR_RKEY_VIOLATION;
- }
/* See IBA A19.4.2 */
if (unlikely(page_offset & 0x7)) {
mutex_unlock(&umem_odp->umem_mutex);
return RESPST_ERR_MISALIGNED_ATOMIC;
}
+ index = rxe_odp_iova_to_index(umem_odp, iova);
+ page = hmm_pfn_to_page(umem_odp->map.pfn_list[index]);
+
va = kmap_local_page(page);
/* Do atomic write after all prior operations have completed */
smp_store_release(&va[page_offset >> 3], value);