]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.10
authorSasha Levin <sashal@kernel.org>
Thu, 15 Aug 2024 12:20:09 +0000 (08:20 -0400)
committerSasha Levin <sashal@kernel.org>
Thu, 15 Aug 2024 12:20:09 +0000 (08:20 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.10/series
queue-5.10/vdpa-make-use-of-pfn_phys-pfn_up-pfn_down-helper-mac.patch [new file with mode: 0644]
queue-5.10/vhost-vdpa-switch-to-use-vmf_insert_pfn-in-the-fault.patch [new file with mode: 0644]

index 08a303d7d2f73d46f2d9029f91c5e33e0a87a771..644021554dd4bccb5832deba0db5a96743387e5e 100644 (file)
@@ -346,3 +346,5 @@ arm64-cpufeature-fix-the-visibility-of-compat-hwcaps.patch
 media-uvcvideo-use-entity-get_cur-in-uvc_ctrl_set.patch
 exec-fix-toctou-between-perm-check-and-set-uid-gid-usage.patch
 nvme-pci-add-apst-quirk-for-lenovo-n60z-laptop.patch
+vdpa-make-use-of-pfn_phys-pfn_up-pfn_down-helper-mac.patch
+vhost-vdpa-switch-to-use-vmf_insert_pfn-in-the-fault.patch
diff --git a/queue-5.10/vdpa-make-use-of-pfn_phys-pfn_up-pfn_down-helper-mac.patch b/queue-5.10/vdpa-make-use-of-pfn_phys-pfn_up-pfn_down-helper-mac.patch
new file mode 100644 (file)
index 0000000..c822d7e
--- /dev/null
@@ -0,0 +1,113 @@
+From e5e838cc00a8facc70cd18f2ba84495c3ebf81e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Aug 2021 09:37:17 +0800
+Subject: vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro
+
+From: Cai Huoqing <caihuoqing@baidu.com>
+
+[ Upstream commit 729ce5a5bd6fda5eb2322a39db2287f1f26f92f3 ]
+
+it's a nice refactor to make use of
+PFN_PHYS/PFN_UP/PFN_DOWN helper macro
+
+Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
+Link: https://lore.kernel.org/r/20210802013717.851-1-caihuoqing@baidu.com
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Stable-dep-of: 0823dc64586b ("vhost-vdpa: switch to use vmf_insert_pfn() in the fault handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vhost/vdpa.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index 04578aa87e4da..f48047a1027a2 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -519,15 +519,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+       unsigned long pfn, pinned;
+       while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+-              pinned = map->size >> PAGE_SHIFT;
+-              for (pfn = map->addr >> PAGE_SHIFT;
++              pinned = PFN_DOWN(map->size);
++              for (pfn = PFN_DOWN(map->addr);
+                    pinned > 0; pfn++, pinned--) {
+                       page = pfn_to_page(pfn);
+                       if (map->perm & VHOST_ACCESS_WO)
+                               set_page_dirty_lock(page);
+                       unpin_user_page(page);
+               }
+-              atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
++              atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
+               vhost_iotlb_map_free(iotlb, map);
+       }
+ }
+@@ -589,7 +589,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
+       if (r)
+               vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
+       else
+-              atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
++              atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
+       return r;
+ }
+@@ -643,7 +643,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+       if (msg->perm & VHOST_ACCESS_WO)
+               gup_flags |= FOLL_WRITE;
+-      npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
++      npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
+       if (!npages) {
+               ret = -EINVAL;
+               goto free;
+@@ -651,7 +651,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+       mmap_read_lock(dev->mm);
+-      lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
++      lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
+       if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
+               ret = -ENOMEM;
+               goto unlock;
+@@ -685,9 +685,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+                       if (last_pfn && (this_pfn != last_pfn + 1)) {
+                               /* Pin a contiguous chunk of memory */
+-                              csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
++                              csize = PFN_PHYS(last_pfn - map_pfn + 1);
+                               ret = vhost_vdpa_map(v, iova, csize,
+-                                                   map_pfn << PAGE_SHIFT,
++                                                   PFN_PHYS(map_pfn),
+                                                    msg->perm);
+                               if (ret) {
+                                       /*
+@@ -711,13 +711,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+                       last_pfn = this_pfn;
+               }
+-              cur_base += pinned << PAGE_SHIFT;
++              cur_base += PFN_PHYS(pinned);
+               npages -= pinned;
+       }
+       /* Pin the rest chunk */
+-      ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
+-                           map_pfn << PAGE_SHIFT, msg->perm);
++      ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
++                           PFN_PHYS(map_pfn), msg->perm);
+ out:
+       if (ret) {
+               if (nchunks) {
+@@ -961,7 +961,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
+-                          notify.addr >> PAGE_SHIFT, PAGE_SIZE,
++                          PFN_DOWN(notify.addr), PAGE_SIZE,
+                           vma->vm_page_prot))
+               return VM_FAULT_SIGBUS;
+-- 
+2.43.0
+
diff --git a/queue-5.10/vhost-vdpa-switch-to-use-vmf_insert_pfn-in-the-fault.patch b/queue-5.10/vhost-vdpa-switch-to-use-vmf_insert_pfn-in-the-fault.patch
new file mode 100644 (file)
index 0000000..1839892
--- /dev/null
@@ -0,0 +1,50 @@
+From 1a1fcb806dd315f1c931b7d0c9d907b6bbda30f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Jul 2024 11:31:59 +0800
+Subject: vhost-vdpa: switch to use vmf_insert_pfn() in the fault handler
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit 0823dc64586ba5ea13a7d200a5d33e4c5fa45950 ]
+
+remap_pfn_page() should not be called in the fault handler as it may
+change the vma->flags which may trigger lockdep warning since the vma
+write lock is not held. Actually there's no need to modify the
+vma->flags as it has been set in the mmap(). So this patch switches to
+use vmf_insert_pfn() instead.
+
+Reported-by: Dragos Tatulea <dtatulea@nvidia.com>
+Tested-by: Dragos Tatulea <dtatulea@nvidia.com>
+Fixes: ddd89d0a059d ("vhost_vdpa: support doorbell mapping via mmap")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Message-Id: <20240701033159.18133-1-jasowang@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Michal Kubiak <michal.kubiak@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vhost/vdpa.c | 8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
+index f48047a1027a2..c9f585db1553c 100644
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -959,13 +959,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
+       notify = ops->get_vq_notification(vdpa, index);
+-      vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+-      if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
+-                          PFN_DOWN(notify.addr), PAGE_SIZE,
+-                          vma->vm_page_prot))
+-              return VM_FAULT_SIGBUS;
+-
+-      return VM_FAULT_NOPAGE;
++      return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
+ }
+ static const struct vm_operations_struct vhost_vdpa_vm_ops = {
+-- 
+2.43.0
+