]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 26 Sep 2015 17:30:42 +0000 (10:30 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 26 Sep 2015 17:30:42 +0000 (10:30 -0700)
added patches:
xen-gntdev-convert-priv-lock-to-a-mutex.patch

queue-3.14/series
queue-3.14/xen-gntdev-convert-priv-lock-to-a-mutex.patch [new file with mode: 0644]

index 5ec0d8806a0a01a5d5292389e8e06977d18ba455..4c21b681aa613665e5596c16c068e2042cdd562d 100644 (file)
@@ -31,3 +31,4 @@ vmscan-fix-increasing-nr_isolated-incurred-by-putback-unevictable-pages.patch
 fs-if-a-coredump-already-exists-unlink-and-recreate-with-o_excl.patch
 mmc-core-fix-race-condition-in-mmc_wait_data_done.patch
 md-raid10-always-set-reshape_safe-when-initializing-reshape_position.patch
+xen-gntdev-convert-priv-lock-to-a-mutex.patch
diff --git a/queue-3.14/xen-gntdev-convert-priv-lock-to-a-mutex.patch b/queue-3.14/xen-gntdev-convert-priv-lock-to-a-mutex.patch
new file mode 100644 (file)
index 0000000..97ee788
--- /dev/null
@@ -0,0 +1,181 @@
+From 1401c00e59ea021c575f74612fe2dbba36d6a4ee Mon Sep 17 00:00:00 2001
+From: David Vrabel <david.vrabel@citrix.com>
+Date: Fri, 9 Jan 2015 18:06:12 +0000
+Subject: xen/gntdev: convert priv->lock to a mutex
+
+From: David Vrabel <david.vrabel@citrix.com>
+
+commit 1401c00e59ea021c575f74612fe2dbba36d6a4ee upstream.
+
+Unmapping may require sleeping and we unmap while holding priv->lock, so
+convert it to a mutex.
+
+Signed-off-by: David Vrabel <david.vrabel@citrix.com>
+Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
+Cc: Ian Campbell <ian.campbell@citrix.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/xen/gntdev.c |   40 ++++++++++++++++++++--------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -67,7 +67,7 @@ struct gntdev_priv {
+        * Only populated if populate_freeable_maps == 1 */
+       struct list_head freeable_maps;
+       /* lock protects maps and freeable_maps */
+-      spinlock_t lock;
++      struct mutex lock;
+       struct mm_struct *mm;
+       struct mmu_notifier mn;
+ };
+@@ -216,9 +216,9 @@ static void gntdev_put_map(struct gntdev
+       }
+       if (populate_freeable_maps && priv) {
+-              spin_lock(&priv->lock);
++              mutex_lock(&priv->lock);
+               list_del(&map->next);
+-              spin_unlock(&priv->lock);
++              mutex_unlock(&priv->lock);
+       }
+       if (map->pages && !use_ptemod)
+@@ -387,9 +387,9 @@ static void gntdev_vma_close(struct vm_a
+                * not do any unmapping, since that has been done prior to
+                * closing the vma, but it may still iterate the unmap_ops list.
+                */
+-              spin_lock(&priv->lock);
++              mutex_lock(&priv->lock);
+               map->vma = NULL;
+-              spin_unlock(&priv->lock);
++              mutex_unlock(&priv->lock);
+       }
+       vma->vm_private_data = NULL;
+       gntdev_put_map(priv, map);
+@@ -433,14 +433,14 @@ static void mn_invl_range_start(struct m
+       struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
+       struct grant_map *map;
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       list_for_each_entry(map, &priv->maps, next) {
+               unmap_if_in_range(map, start, end);
+       }
+       list_for_each_entry(map, &priv->freeable_maps, next) {
+               unmap_if_in_range(map, start, end);
+       }
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ }
+ static void mn_invl_page(struct mmu_notifier *mn,
+@@ -457,7 +457,7 @@ static void mn_release(struct mmu_notifi
+       struct grant_map *map;
+       int err;
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       list_for_each_entry(map, &priv->maps, next) {
+               if (!map->vma)
+                       continue;
+@@ -476,7 +476,7 @@ static void mn_release(struct mmu_notifi
+               err = unmap_grant_pages(map, /* offset */ 0, map->count);
+               WARN_ON(err);
+       }
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ }
+ static struct mmu_notifier_ops gntdev_mmu_ops = {
+@@ -498,7 +498,7 @@ static int gntdev_open(struct inode *ino
+       INIT_LIST_HEAD(&priv->maps);
+       INIT_LIST_HEAD(&priv->freeable_maps);
+-      spin_lock_init(&priv->lock);
++      mutex_init(&priv->lock);
+       if (use_ptemod) {
+               priv->mm = get_task_mm(current);
+@@ -574,10 +574,10 @@ static long gntdev_ioctl_map_grant_ref(s
+               return -EFAULT;
+       }
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       gntdev_add_map(priv, map);
+       op.index = map->index << PAGE_SHIFT;
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+       if (copy_to_user(u, &op, sizeof(op)) != 0)
+               return -EFAULT;
+@@ -596,7 +596,7 @@ static long gntdev_ioctl_unmap_grant_ref
+               return -EFAULT;
+       pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
+       if (map) {
+               list_del(&map->next);
+@@ -604,7 +604,7 @@ static long gntdev_ioctl_unmap_grant_ref
+                       list_add_tail(&map->next, &priv->freeable_maps);
+               err = 0;
+       }
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+       if (map)
+               gntdev_put_map(priv, map);
+       return err;
+@@ -672,7 +672,7 @@ static long gntdev_ioctl_notify(struct g
+       out_flags = op.action;
+       out_event = op.event_channel_port;
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       list_for_each_entry(map, &priv->maps, next) {
+               uint64_t begin = map->index << PAGE_SHIFT;
+@@ -700,7 +700,7 @@ static long gntdev_ioctl_notify(struct g
+       rc = 0;
+  unlock_out:
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+       /* Drop the reference to the event channel we did not save in the map */
+       if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
+@@ -750,7 +750,7 @@ static int gntdev_mmap(struct file *flip
+       pr_debug("map %d+%d at %lx (pgoff %lx)\n",
+                       index, count, vma->vm_start, vma->vm_pgoff);
+-      spin_lock(&priv->lock);
++      mutex_lock(&priv->lock);
+       map = gntdev_find_map_index(priv, index, count);
+       if (!map)
+               goto unlock_out;
+@@ -785,7 +785,7 @@ static int gntdev_mmap(struct file *flip
+                       map->flags |= GNTMAP_readonly;
+       }
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+       if (use_ptemod) {
+               err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+@@ -813,11 +813,11 @@ static int gntdev_mmap(struct file *flip
+       return 0;
+ unlock_out:
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+       return err;
+ out_unlock_put:
+-      spin_unlock(&priv->lock);
++      mutex_unlock(&priv->lock);
+ out_put_map:
+       if (use_ptemod)
+               map->vma = NULL;