]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 4 Sep 2017 10:53:02 +0000 (12:53 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 4 Sep 2017 10:53:02 +0000 (12:53 +0200)
added patches:
drm-dp-mst-handle-errors-from-drm_atomic_get_private_obj_state-correctly.patch
drm-ttm-fix-accounting-error-when-fail-to-get-pages-for-pool.patch
drm-vgem-pin-our-pages-for-dmabuf-exports.patch

queue-4.13/drm-dp-mst-handle-errors-from-drm_atomic_get_private_obj_state-correctly.patch [new file with mode: 0644]
queue-4.13/drm-ttm-fix-accounting-error-when-fail-to-get-pages-for-pool.patch [new file with mode: 0644]
queue-4.13/drm-vgem-pin-our-pages-for-dmabuf-exports.patch [new file with mode: 0644]

diff --git a/queue-4.13/drm-dp-mst-handle-errors-from-drm_atomic_get_private_obj_state-correctly.patch b/queue-4.13/drm-dp-mst-handle-errors-from-drm_atomic_get_private_obj_state-correctly.patch
new file mode 100644 (file)
index 0000000..e59bd98
--- /dev/null
@@ -0,0 +1,52 @@
+From 56a91c4932bd038f3d1f6555ddc349ca4e6933b0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 12 Jul 2017 18:51:00 +0300
+Subject: drm/dp/mst: Handle errors from drm_atomic_get_private_obj_state() correctly
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 56a91c4932bd038f3d1f6555ddc349ca4e6933b0 upstream.
+
+On failure drm_atomic_get_private_obj_state() returns and error
+pointer instead of NULL. Adjust the checks in the callers to match.
+
+Cc: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Fixes: edb1ed1ab7d3 ("drm/dp: Add DP MST helpers to atomically find and release vcpi slots")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170712155102.26276-1-ville.syrjala@linux.intel.com
+Reviewed-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_dp_mst_topology.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2540,8 +2540,8 @@ int drm_dp_atomic_find_vcpi_slots(struct
+       int req_slots;
+       topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+-      if (topology_state == NULL)
+-              return -ENOMEM;
++      if (IS_ERR(topology_state))
++              return PTR_ERR(topology_state);
+       port = drm_dp_get_validated_port_ref(mgr, port);
+       if (port == NULL)
+@@ -2580,8 +2580,8 @@ int drm_dp_atomic_release_vcpi_slots(str
+       struct drm_dp_mst_topology_state *topology_state;
+       topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+-      if (topology_state == NULL)
+-              return -ENOMEM;
++      if (IS_ERR(topology_state))
++              return PTR_ERR(topology_state);
+       /* We cannot rely on port->vcpi.num_slots to update
+        * topology_state->avail_slots as the port may not exist if the parent
diff --git a/queue-4.13/drm-ttm-fix-accounting-error-when-fail-to-get-pages-for-pool.patch b/queue-4.13/drm-ttm-fix-accounting-error-when-fail-to-get-pages-for-pool.patch
new file mode 100644 (file)
index 0000000..742e09e
--- /dev/null
@@ -0,0 +1,37 @@
+From 9afae2719273fa1d406829bf3498f82dbdba71c7 Mon Sep 17 00:00:00 2001
+From: "Xiangliang.Yu" <Xiangliang.Yu@amd.com>
+Date: Wed, 16 Aug 2017 14:25:51 +0800
+Subject: drm/ttm: Fix accounting error when fail to get pages for pool
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Xiangliang.Yu <Xiangliang.Yu@amd.com>
+
+commit 9afae2719273fa1d406829bf3498f82dbdba71c7 upstream.
+
+When fail to get needed page for pool, need to put allocated pages
+into pool. But current code has a miscalculation of allocated pages,
+correct it.
+
+Signed-off-by: Xiangliang.Yu <Xiangliang.Yu@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Monk Liu <monk.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/ttm/ttm_page_alloc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -615,7 +615,7 @@ static void ttm_page_pool_fill_locked(st
+               } else {
+                       pr_err("Failed to fill pool (%p)\n", pool);
+                       /* If we have any pages left put them to the pool. */
+-                      list_for_each_entry(p, &pool->list, lru) {
++                      list_for_each_entry(p, &new_pages, lru) {
+                               ++cpages;
+                       }
+                       list_splice(&new_pages, &pool->list);
diff --git a/queue-4.13/drm-vgem-pin-our-pages-for-dmabuf-exports.patch b/queue-4.13/drm-vgem-pin-our-pages-for-dmabuf-exports.patch
new file mode 100644 (file)
index 0000000..7ad3292
--- /dev/null
@@ -0,0 +1,218 @@
+From 71bb23c707c141b176bc084179ca5ee58d5fd26a Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 22 Jun 2017 14:46:17 +0100
+Subject: drm/vgem: Pin our pages for dmabuf exports
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 71bb23c707c141b176bc084179ca5ee58d5fd26a upstream.
+
+When the caller maps their dmabuf and we return an sg_table, the caller
+doesn't expect the pages beneath that sg_table to vanish on a whim (i.e.
+under mempressure). The contract is that the pages are pinned for the
+duration of the mapping (from dma_buf_map_attachment() to
+dma_buf_unmap_attachment). To comply, we need to introduce our own
+vgem_object.pages_pin_count and elevate it across the mapping. However,
+the drm_prime interface we use calls drv->prime_pin on dma_buf_attach
+and drv->prime_unpin on dma_buf_detach, which while that does cover the
+mapping is much broader than is desired -- but it will do for now.
+
+v2: also hold the pin across prime_vmap/vunmap
+
+Reported-by: Tomi Sarvela <tomi.p.sarvela@intel.com>
+Testcase: igt/gem_concurrent_blit/*swap*vgem*
+Fixes: 5ba6c9ff961a ("drm/vgem: Fix mmaping")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tomi Sarvela <tomi.p.sarvela@intel.com>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: Sean Paul <seanpaul@chromium.org>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
+Cc: <stable@vger.kernel.org> # needs a backport
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: http://patchwork.freedesktop.org/patch/msgid/20170622134617.17912-1-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vgem/vgem_drv.c |   81 +++++++++++++++++++++++++++++-----------
+ drivers/gpu/drm/vgem/vgem_drv.h |    4 +
+ 2 files changed, 64 insertions(+), 21 deletions(-)
+
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -52,6 +52,7 @@ static void vgem_gem_free_object(struct
+       struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
+       kvfree(vgem_obj->pages);
++      mutex_destroy(&vgem_obj->pages_lock);
+       if (obj->import_attach)
+               drm_prime_gem_destroy(obj, vgem_obj->table);
+@@ -76,11 +77,15 @@ static int vgem_gem_fault(struct vm_faul
+       if (page_offset > num_pages)
+               return VM_FAULT_SIGBUS;
++      ret = -ENOENT;
++      mutex_lock(&obj->pages_lock);
+       if (obj->pages) {
+               get_page(obj->pages[page_offset]);
+               vmf->page = obj->pages[page_offset];
+               ret = 0;
+-      } else {
++      }
++      mutex_unlock(&obj->pages_lock);
++      if (ret) {
+               struct page *page;
+               page = shmem_read_mapping_page(
+@@ -161,6 +166,8 @@ static struct drm_vgem_gem_object *__vge
+               return ERR_PTR(ret);
+       }
++      mutex_init(&obj->pages_lock);
++
+       return obj;
+ }
+@@ -274,37 +281,66 @@ static const struct file_operations vgem
+       .release        = drm_release,
+ };
++static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
++{
++      mutex_lock(&bo->pages_lock);
++      if (bo->pages_pin_count++ == 0) {
++              struct page **pages;
++
++              pages = drm_gem_get_pages(&bo->base);
++              if (IS_ERR(pages)) {
++                      bo->pages_pin_count--;
++                      mutex_unlock(&bo->pages_lock);
++                      return pages;
++              }
++
++              bo->pages = pages;
++      }
++      mutex_unlock(&bo->pages_lock);
++
++      return bo->pages;
++}
++
++static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
++{
++      mutex_lock(&bo->pages_lock);
++      if (--bo->pages_pin_count == 0) {
++              drm_gem_put_pages(&bo->base, bo->pages, true, true);
++              bo->pages = NULL;
++      }
++      mutex_unlock(&bo->pages_lock);
++}
++
+ static int vgem_prime_pin(struct drm_gem_object *obj)
+ {
++      struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+       long n_pages = obj->size >> PAGE_SHIFT;
+       struct page **pages;
+-      /* Flush the object from the CPU cache so that importers can rely
+-       * on coherent indirect access via the exported dma-address.
+-       */
+-      pages = drm_gem_get_pages(obj);
++      pages = vgem_pin_pages(bo);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
++      /* Flush the object from the CPU cache so that importers can rely
++       * on coherent indirect access via the exported dma-address.
++       */
+       drm_clflush_pages(pages, n_pages);
+-      drm_gem_put_pages(obj, pages, true, false);
+       return 0;
+ }
+-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
++static void vgem_prime_unpin(struct drm_gem_object *obj)
+ {
+-      struct sg_table *st;
+-      struct page **pages;
++      struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+-      pages = drm_gem_get_pages(obj);
+-      if (IS_ERR(pages))
+-              return ERR_CAST(pages);
++      vgem_unpin_pages(bo);
++}
+-      st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
+-      drm_gem_put_pages(obj, pages, false, false);
++static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
++{
++      struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+-      return st;
++      return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT);
+ }
+ static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
+@@ -333,6 +369,8 @@ static struct drm_gem_object *vgem_prime
+               __vgem_gem_destroy(obj);
+               return ERR_PTR(-ENOMEM);
+       }
++
++      obj->pages_pin_count++; /* perma-pinned */
+       drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
+                                       npages);
+       return &obj->base;
+@@ -340,23 +378,23 @@ static struct drm_gem_object *vgem_prime
+ static void *vgem_prime_vmap(struct drm_gem_object *obj)
+ {
++      struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+       long n_pages = obj->size >> PAGE_SHIFT;
+       struct page **pages;
+-      void *addr;
+-      pages = drm_gem_get_pages(obj);
++      pages = vgem_pin_pages(bo);
+       if (IS_ERR(pages))
+               return NULL;
+-      addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+-      drm_gem_put_pages(obj, pages, false, false);
+-
+-      return addr;
++      return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ }
+ static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+ {
++      struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
++
+       vunmap(vaddr);
++      vgem_unpin_pages(bo);
+ }
+ static int vgem_prime_mmap(struct drm_gem_object *obj,
+@@ -409,6 +447,7 @@ static struct drm_driver vgem_driver = {
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+       .gem_prime_pin = vgem_prime_pin,
++      .gem_prime_unpin = vgem_prime_unpin,
+       .gem_prime_import = vgem_prime_import,
+       .gem_prime_export = drm_gem_prime_export,
+       .gem_prime_import_sg_table = vgem_prime_import_sg_table,
+--- a/drivers/gpu/drm/vgem/vgem_drv.h
++++ b/drivers/gpu/drm/vgem/vgem_drv.h
+@@ -43,7 +43,11 @@ struct vgem_file {
+ #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
+ struct drm_vgem_gem_object {
+       struct drm_gem_object base;
++
+       struct page **pages;
++      unsigned int pages_pin_count;
++      struct mutex pages_lock;
++
+       struct sg_table *table;
+ };