]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
delete queue-4.12/drm-i915-gvt-fix-possible-recursive-locking-issue.patch
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 25 Jul 2017 14:07:37 +0000 (07:07 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 25 Jul 2017 14:07:37 +0000 (07:07 -0700)
queue-4.12/drm-i915-gvt-fix-inconsistent-locks-holding-sequence.patch
queue-4.12/drm-i915-gvt-fix-possible-recursive-locking-issue.patch [deleted file]
queue-4.12/series

index 23f037a5766435226af809afc8728394e2069478..3f28d96713754a04b9026d28f7f6db09deec82a9 100644 (file)
@@ -36,7 +36,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 
 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
 +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
-@@ -260,16 +260,20 @@ static void gvt_cache_destroy(struct int
+@@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct int
        struct device *dev = mdev_dev(vgpu->vdev.mdev);
        unsigned long gfn;
  
diff --git a/queue-4.12/drm-i915-gvt-fix-possible-recursive-locking-issue.patch b/queue-4.12/drm-i915-gvt-fix-possible-recursive-locking-issue.patch
deleted file mode 100644 (file)
index 53b5a79..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-From 62d02fd1f807bf5a259a242c483c9fb98a242630 Mon Sep 17 00:00:00 2001
-From: Chuanxiao Dong <chuanxiao.dong@intel.com>
-Date: Mon, 26 Jun 2017 15:20:49 +0800
-Subject: drm/i915/gvt: Fix possible recursive locking issue
-
-From: Chuanxiao Dong <chuanxiao.dong@intel.com>
-
-commit 62d02fd1f807bf5a259a242c483c9fb98a242630 upstream.
-
-vfio_unpin_pages will hold a read semaphore however it is already hold
-in the same thread by vfio ioctl. It will cause below warning:
-
-[ 5102.127454] ============================================
-[ 5102.133379] WARNING: possible recursive locking detected
-[ 5102.139304] 4.12.0-rc4+ #3 Not tainted
-[ 5102.143483] --------------------------------------------
-[ 5102.149407] qemu-system-x86/1620 is trying to acquire lock:
-[ 5102.155624]  (&container->group_lock){++++++}, at: [<ffffffff817768c6>] vfio_unpin_pages+0x96/0xf0
-[ 5102.165626]
-but task is already holding lock:
-[ 5102.172134]  (&container->group_lock){++++++}, at: [<ffffffff8177728f>] vfio_fops_unl_ioctl+0x5f/0x280
-[ 5102.182522]
-other info that might help us debug this:
-[ 5102.189806]  Possible unsafe locking scenario:
-
-[ 5102.196411]        CPU0
-[ 5102.199136]        ----
-[ 5102.201861]   lock(&container->group_lock);
-[ 5102.206527]   lock(&container->group_lock);
-[ 5102.211191]
----
- drivers/gpu/drm/i915/gvt/gvt.h   |    3 ++
- drivers/gpu/drm/i915/gvt/kvmgt.c |   55 +++++++++++++++++++++++++++++++--------
- 2 files changed, 48 insertions(+), 10 deletions(-)
-
---- a/drivers/gpu/drm/i915/gvt/gvt.h
-+++ b/drivers/gpu/drm/i915/gvt/gvt.h
-@@ -183,6 +183,9 @@ struct intel_vgpu {
-               struct kvm *kvm;
-               struct work_struct release_work;
-               atomic_t released;
-+              struct work_struct unpin_work;
-+              spinlock_t unpin_lock; /* To protect unpin_list */
-+              struct list_head unpin_list;
-       } vdev;
- #endif
- };
---- a/drivers/gpu/drm/i915/gvt/kvmgt.c
-+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
-@@ -78,6 +78,7 @@ struct gvt_dma {
-       struct rb_node node;
-       gfn_t gfn;
-       unsigned long iova;
-+      struct list_head list;
- };
- static inline bool handle_valid(unsigned long handle)
-@@ -166,6 +167,7 @@ static void gvt_cache_add(struct intel_v
-       new->gfn = gfn;
-       new->iova = iova;
-+      INIT_LIST_HEAD(&new->list);
-       mutex_lock(&vgpu->vdev.cache_lock);
-       while (*link) {
-@@ -197,26 +199,52 @@ static void __gvt_cache_remove_entry(str
-       kfree(entry);
- }
--static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
-+static void intel_vgpu_unpin_work(struct work_struct *work)
- {
-+      struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
-+                                             vdev.unpin_work);
-       struct device *dev = mdev_dev(vgpu->vdev.mdev);
-       struct gvt_dma *this;
--      unsigned long g1;
--      int rc;
-+      unsigned long gfn;
-+
-+      for (;;) {
-+              spin_lock(&vgpu->vdev.unpin_lock);
-+              if (list_empty(&vgpu->vdev.unpin_list)) {
-+                      spin_unlock(&vgpu->vdev.unpin_lock);
-+                      break;
-+              }
-+              this = list_first_entry(&vgpu->vdev.unpin_list,
-+                                      struct gvt_dma, list);
-+              list_del(&this->list);
-+              spin_unlock(&vgpu->vdev.unpin_lock);
-+
-+              gfn = this->gfn;
-+              vfio_unpin_pages(dev, &gfn, 1);
-+              kfree(this);
-+      }
-+}
-+
-+static bool gvt_cache_mark_remove(struct intel_vgpu *vgpu, gfn_t gfn)
-+{
-+      struct gvt_dma *this;
-       mutex_lock(&vgpu->vdev.cache_lock);
-       this  = __gvt_cache_find(vgpu, gfn);
-       if (!this) {
-               mutex_unlock(&vgpu->vdev.cache_lock);
--              return;
-+              return false;
-       }
--
--      g1 = gfn;
-       gvt_dma_unmap_iova(vgpu, this->iova);
--      rc = vfio_unpin_pages(dev, &g1, 1);
--      WARN_ON(rc != 1);
--      __gvt_cache_remove_entry(vgpu, this);
-+      /* remove this from rb tree */
-+      rb_erase(&this->node, &vgpu->vdev.cache);
-       mutex_unlock(&vgpu->vdev.cache_lock);
-+
-+      /* put this to the unpin_list */
-+      spin_lock(&vgpu->vdev.unpin_lock);
-+      list_move_tail(&this->list, &vgpu->vdev.unpin_list);
-+      spin_unlock(&vgpu->vdev.unpin_lock);
-+
-+      return true;
- }
- static void gvt_cache_init(struct intel_vgpu *vgpu)
-@@ -453,6 +481,9 @@ static int intel_vgpu_create(struct kobj
-       }
-       INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
-+      INIT_WORK(&vgpu->vdev.unpin_work, intel_vgpu_unpin_work);
-+      spin_lock_init(&vgpu->vdev.unpin_lock);
-+      INIT_LIST_HEAD(&vgpu->vdev.unpin_list);
-       vgpu->vdev.mdev = mdev;
-       mdev_set_drvdata(mdev, vgpu);
-@@ -482,6 +513,7 @@ static int intel_vgpu_iommu_notifier(str
-       struct intel_vgpu *vgpu = container_of(nb,
-                                       struct intel_vgpu,
-                                       vdev.iommu_notifier);
-+      bool sched_unmap = false;
-       if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
-               struct vfio_iommu_type1_dma_unmap *unmap = data;
-@@ -491,7 +523,10 @@ static int intel_vgpu_iommu_notifier(str
-               end_gfn = gfn + unmap->size / PAGE_SIZE;
-               while (gfn < end_gfn)
--                      gvt_cache_remove(vgpu, gfn++);
-+                      sched_unmap |= gvt_cache_mark_remove(vgpu, gfn++);
-+
-+              if (sched_unmap)
-+                      schedule_work(&vgpu->vdev.unpin_work);
-       }
-       return NOTIFY_OK;
index 50164269bde4f51d9d1a40146da8abdb877ff6e1..cfd8b10beddbd4e46dc59d85c2c9a51da55df051 100644 (file)
@@ -119,7 +119,6 @@ vfio-remove-unnecessary-uses-of-vfio_container.group_lock.patch
 nvme-rdma-remove-race-conditions-from-ib-signalling.patch
 ftrace-fix-uninitialized-variable-in-match_records.patch
 iommu-arm-smmu-plumb-in-new-acpi-identifiers.patch
-drm-i915-gvt-fix-possible-recursive-locking-issue.patch
 drm-i915-gvt-fix-inconsistent-locks-holding-sequence.patch
 drm-atomic-add-missing-drm_atomic_state_clear-to-atomic_remove_fb.patch
 mips-fix-mips_atomic_set-retry-condition.patch