]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 May 2013 16:38:59 +0000 (09:38 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 May 2013 16:38:59 +0000 (09:38 -0700)
added patches:
drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch
drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch
drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch
drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch

queue-3.8/drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch [new file with mode: 0644]
queue-3.8/drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch [new file with mode: 0644]
queue-3.8/drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch [new file with mode: 0644]
queue-3.8/drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch [new file with mode: 0644]
queue-3.8/series

diff --git a/queue-3.8/drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch b/queue-3.8/drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch
new file mode 100644 (file)
index 0000000..55ba5b7
--- /dev/null
@@ -0,0 +1,129 @@
+From 306373b645d80625335b8e684fa09b14ba460cec Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Thu, 2 May 2013 02:40:25 -0400
+Subject: drm/ast: deal with bo reserve fail in dirty update path
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 306373b645d80625335b8e684fa09b14ba460cec upstream.
+
+Port over the mgag200 fix to ast as it suffers the same issue.
+
+    On F19 testing, it was noticed we get a lot of errors in dmesg
+    about being unable to reserve the buffer when plymouth starts,
+    this is due to the buffer being in the process of migrating,
+    so it makes sense we can't reserve it.
+
+    In order to deal with it, this adds delayed updates for the dirty
+    updates, when the bo is unreservable, in the normal console case
+    this shouldn't ever happen, its just when plymouth or X is
+    pushing the console bo to system memory.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/ast/ast_drv.h |    2 +
+ drivers/gpu/drm/ast/ast_fb.c  |   43 +++++++++++++++++++++++++++++++++++++++---
+ drivers/gpu/drm/ast/ast_ttm.c |    2 -
+ 3 files changed, 43 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -239,6 +239,8 @@ struct ast_fbdev {
+       void *sysram;
+       int size;
+       struct ttm_bo_kmap_obj mapping;
++      int x1, y1, x2, y2; /* dirty rect */
++      spinlock_t dirty_lock;
+ };
+ #define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
+--- a/drivers/gpu/drm/ast/ast_fb.c
++++ b/drivers/gpu/drm/ast/ast_fb.c
+@@ -52,16 +52,52 @@ static void ast_dirty_update(struct ast_
+       int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
+       int ret;
+       bool unmap = false;
++      bool store_for_later = false;
++      int x2, y2;
++      unsigned long flags;
+       obj = afbdev->afb.obj;
+       bo = gem_to_ast_bo(obj);
++      /*
++       * try and reserve the BO, if we fail with busy
++       * then the BO is being moved and we should
++       * store up the damage until later.
++       */
+       ret = ast_bo_reserve(bo, true);
+       if (ret) {
+-              DRM_ERROR("failed to reserve fb bo\n");
++              if (ret != -EBUSY)
++                      return;
++
++              store_for_later = true;
++      }
++
++      x2 = x + width - 1;
++      y2 = y + height - 1;
++      spin_lock_irqsave(&afbdev->dirty_lock, flags);
++
++      if (afbdev->y1 < y)
++              y = afbdev->y1;
++      if (afbdev->y2 > y2)
++              y2 = afbdev->y2;
++      if (afbdev->x1 < x)
++              x = afbdev->x1;
++      if (afbdev->x2 > x2)
++              x2 = afbdev->x2;
++
++      if (store_for_later) {
++              afbdev->x1 = x;
++              afbdev->x2 = x2;
++              afbdev->y1 = y;
++              afbdev->y2 = y2;
++              spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
+               return;
+       }
++      afbdev->x1 = afbdev->y1 = INT_MAX;
++      afbdev->x2 = afbdev->y2 = 0;
++      spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
++
+       if (!bo->kmap.virtual) {
+               ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+               if (ret) {
+@@ -71,10 +107,10 @@ static void ast_dirty_update(struct ast_
+               }
+               unmap = true;
+       }
+-      for (i = y; i < y + height; i++) {
++      for (i = y; i <= y2; i++) {
+               /* assume equal stride for now */
+               src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
+-              memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
++              memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
+       }
+       if (unmap)
+@@ -305,6 +341,7 @@ int ast_fbdev_init(struct drm_device *de
+       ast->fbdev = afbdev;
+       afbdev->helper.funcs = &ast_fb_helper_funcs;
++      spin_lock_init(&afbdev->dirty_lock);
+       ret = drm_fb_helper_init(dev, &afbdev->helper,
+                                1, 1);
+       if (ret) {
+--- a/drivers/gpu/drm/ast/ast_ttm.c
++++ b/drivers/gpu/drm/ast/ast_ttm.c
+@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bo
+       ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+       if (ret) {
+-              if (ret != -ERESTARTSYS)
++              if (ret != -ERESTARTSYS && ret != -EBUSY)
+                       DRM_ERROR("reserve failed %p\n", bo);
+               return ret;
+       }
diff --git a/queue-3.8/drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch b/queue-3.8/drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch
new file mode 100644 (file)
index 0000000..b7a7b7d
--- /dev/null
@@ -0,0 +1,42 @@
+From e127dc28cc3057575da0216cde85687153ca180f Mon Sep 17 00:00:00 2001
+From: Anisse Astier <anisse@astier.eu>
+Date: Wed, 24 Apr 2013 17:36:01 +0200
+Subject: drm/gma500: fix backlight hotkeys behaviour on netbooks
+
+From: Anisse Astier <anisse@astier.eu>
+
+commit e127dc28cc3057575da0216cde85687153ca180f upstream.
+
+Backlight hotkeys weren't working before on certain cedartrail laptops.
+
+The source of this problem is that the hotkeys' ASLE opregion interrupts
+were simply ignored. Driver seemed to expect the interrupt to be
+associated with a pipe, but it wasn't.
+
+Accepting the ASLE interrupt without an associated pipe event flag fixes
+the issue, the backlight code is called when needed, making the
+brightness keys work properly.
+
+[patrik: This patch affects irq handling on any netbook with opregion support]
+
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=833597
+Reference: http://lists.freedesktop.org/archives/dri-devel/2012-July/025279.html
+Signed-off-by: Anisse Astier <anisse@astier.eu>
+Signed-off-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/gma500/psb_irq.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/gma500/psb_irq.c
++++ b/drivers/gpu/drm/gma500/psb_irq.c
+@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS
+       vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
+-      if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
++      if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
+               dsp_int = 1;
+       /* FIXME: Handle Medfield
diff --git a/queue-3.8/drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch b/queue-3.8/drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch
new file mode 100644 (file)
index 0000000..f38f304
--- /dev/null
@@ -0,0 +1,129 @@
+From 641719599528d806e00de8ae8c8453361266a312 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Thu, 2 May 2013 00:52:01 -0400
+Subject: drm/mgag200: deal with bo reserve fail in dirty update path
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 641719599528d806e00de8ae8c8453361266a312 upstream.
+
+On F19 testing, it was noticed we get a lot of errors in dmesg
+about being unable to reserve the buffer when plymouth starts,
+this is due to the buffer being in the process of migrating,
+so it makes sense we can't reserve it.
+
+In order to deal with it, this adds delayed updates for the dirty
+updates, when the bo is unreservable, in the normal console case
+this shouldn't ever happen, its just when plymouth or X is
+pushing the console bo to system memory.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/mgag200/mgag200_drv.h |    2 +
+ drivers/gpu/drm/mgag200/mgag200_fb.c  |   43 +++++++++++++++++++++++++++++++---
+ drivers/gpu/drm/mgag200/mgag200_ttm.c |    4 +--
+ 3 files changed, 44 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
+@@ -116,6 +116,8 @@ struct mga_fbdev {
+       void *sysram;
+       int size;
+       struct ttm_bo_kmap_obj mapping;
++      int x1, y1, x2, y2; /* dirty rect */
++      spinlock_t dirty_lock;
+ };
+ struct mga_crtc {
+--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
++++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
+@@ -28,16 +28,52 @@ static void mga_dirty_update(struct mga_
+       int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
+       int ret;
+       bool unmap = false;
++      bool store_for_later = false;
++      int x2, y2;
++      unsigned long flags;
+       obj = mfbdev->mfb.obj;
+       bo = gem_to_mga_bo(obj);
++      /*
++       * try and reserve the BO, if we fail with busy
++       * then the BO is being moved and we should
++       * store up the damage until later.
++       */
+       ret = mgag200_bo_reserve(bo, true);
+       if (ret) {
+-              DRM_ERROR("failed to reserve fb bo\n");
++              if (ret != -EBUSY)
++                      return;
++
++              store_for_later = true;
++      }
++
++      x2 = x + width - 1;
++      y2 = y + height - 1;
++      spin_lock_irqsave(&mfbdev->dirty_lock, flags);
++
++      if (mfbdev->y1 < y)
++              y = mfbdev->y1;
++      if (mfbdev->y2 > y2)
++              y2 = mfbdev->y2;
++      if (mfbdev->x1 < x)
++              x = mfbdev->x1;
++      if (mfbdev->x2 > x2)
++              x2 = mfbdev->x2;
++
++      if (store_for_later) {
++              mfbdev->x1 = x;
++              mfbdev->x2 = x2;
++              mfbdev->y1 = y;
++              mfbdev->y2 = y2;
++              spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
+               return;
+       }
++      mfbdev->x1 = mfbdev->y1 = INT_MAX;
++      mfbdev->x2 = mfbdev->y2 = 0;
++      spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
++
+       if (!bo->kmap.virtual) {
+               ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+               if (ret) {
+@@ -47,10 +83,10 @@ static void mga_dirty_update(struct mga_
+               }
+               unmap = true;
+       }
+-      for (i = y; i < y + height; i++) {
++      for (i = y; i <= y2; i++) {
+               /* assume equal stride for now */
+               src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
+-              memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
++              memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
+       }
+       if (unmap)
+@@ -269,6 +305,7 @@ int mgag200_fbdev_init(struct mga_device
+       mdev->mfbdev = mfbdev;
+       mfbdev->helper.funcs = &mga_fb_helper_funcs;
++      spin_lock_init(&mfbdev->dirty_lock);
+       ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
+                                mdev->num_crtc, MGAG200FB_CONN_LIMIT);
+--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
++++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
+@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo
+       ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+       if (ret) {
+-              if (ret != -ERESTARTSYS)
+-                      DRM_ERROR("reserve failed %p\n", bo);
++              if (ret != -ERESTARTSYS && ret != -EBUSY)
++                      DRM_ERROR("reserve failed %p %d\n", bo, ret);
+               return ret;
+       }
+       return 0;
diff --git a/queue-3.8/drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch b/queue-3.8/drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch
new file mode 100644 (file)
index 0000000..18caf11
--- /dev/null
@@ -0,0 +1,253 @@
+From 219b47339ced80ca580bb6ce7d1636166984afa7 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@gmail.com>
+Date: Mon, 22 Apr 2013 09:54:36 +1000
+Subject: drm/prime: keep a reference from the handle to exported dma-buf (v6)
+
+From: Dave Airlie <airlied@gmail.com>
+
+commit 219b47339ced80ca580bb6ce7d1636166984afa7 upstream.
+
+Currently we have a problem with this:
+1. i915: create gem object
+2. i915: export gem object to prime
+3. radeon: import gem object
+4. close prime fd
+5. radeon: unref object
+6. i915: unref object
+
+i915 has an imported object reference in its file priv, that isn't
+cleaned up properly until fd close. The reference gets added at step 2,
+but at step 6 we don't have enough info to clean it up.
+
+The solution is to take a reference on the dma-buf when we export it,
+and drop the reference when the gem handle goes away.
+
+So when we export a dma_buf from a gem object, we keep track of it
+with the handle, we take a reference to the dma_buf. When we close
+the handle (i.e. userspace is finished with the buffer), we drop
+the reference to the dma_buf, and it gets collected.
+
+This patch isn't meant to fix any other problem or bikesheds, and it doesn't
+fix any races with other scenarios.
+
+v1.1: move export symbol line back up.
+
+v2: okay I had to do a bit more, as the first patch showed a leak
+on one of my tests, that I found using the dma-buf debugfs support,
+the problem case is exporting a buffer twice with the same handle,
+we'd add another export handle for it unnecessarily, however
+we now fail if we try to export the same object with a different gem handle,
+however I'm not sure if that is a case I want to support, and I've
+gotten the code to WARN_ON if we hit something like that.
+
+v2.1: rebase this patch, write better commit msg.
+v3: cleanup error handling, track import vs export in linked list,
+these two patches were separate previously, but seem to work better
+like this.
+v4: danvet is correct, this code is no longer useful, since the buffer
+better exist, so remove it.
+v5: always take a reference to the dma buf object, import or export.
+(Imre Deak contributed this originally)
+v6: square the circle, remove import vs export tracking now
+that there is no difference
+
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_gem.c   |    4 +-
+ drivers/gpu/drm/drm_prime.c |   76 +++++++++++++++++++++++---------------------
+ include/drm/drmP.h          |    5 +-
+ 3 files changed, 44 insertions(+), 41 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -205,11 +205,11 @@ static void
+ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+ {
+       if (obj->import_attach) {
+-              drm_prime_remove_imported_buf_handle(&filp->prime,
++              drm_prime_remove_buf_handle(&filp->prime,
+                               obj->import_attach->dmabuf);
+       }
+       if (obj->export_dma_buf) {
+-              drm_prime_remove_imported_buf_handle(&filp->prime,
++              drm_prime_remove_buf_handle(&filp->prime,
+                               obj->export_dma_buf);
+       }
+ }
+--- a/drivers/gpu/drm/drm_prime.c
++++ b/drivers/gpu/drm/drm_prime.c
+@@ -61,6 +61,7 @@ struct drm_prime_member {
+       struct dma_buf *dma_buf;
+       uint32_t handle;
+ };
++static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+               struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+@@ -68,7 +69,8 @@ int drm_gem_prime_handle_to_fd(struct dr
+ {
+       struct drm_gem_object *obj;
+       void *buf;
+-      int ret;
++      int ret = 0;
++      struct dma_buf *dmabuf;
+       obj = drm_gem_object_lookup(dev, file_priv, handle);
+       if (!obj)
+@@ -77,43 +79,44 @@ int drm_gem_prime_handle_to_fd(struct dr
+       mutex_lock(&file_priv->prime.lock);
+       /* re-export the original imported object */
+       if (obj->import_attach) {
+-              get_dma_buf(obj->import_attach->dmabuf);
+-              *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
+-              drm_gem_object_unreference_unlocked(obj);
+-              mutex_unlock(&file_priv->prime.lock);
+-              return 0;
++              dmabuf = obj->import_attach->dmabuf;
++              goto out_have_obj;
+       }
+       if (obj->export_dma_buf) {
+-              get_dma_buf(obj->export_dma_buf);
+-              *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
+-              drm_gem_object_unreference_unlocked(obj);
+-      } else {
+-              buf = dev->driver->gem_prime_export(dev, obj, flags);
+-              if (IS_ERR(buf)) {
+-                      /* normally the created dma-buf takes ownership of the ref,
+-                       * but if that fails then drop the ref
+-                       */
+-                      drm_gem_object_unreference_unlocked(obj);
+-                      mutex_unlock(&file_priv->prime.lock);
+-                      return PTR_ERR(buf);
+-              }
+-              obj->export_dma_buf = buf;
+-              *prime_fd = dma_buf_fd(buf, flags);
++              dmabuf = obj->export_dma_buf;
++              goto out_have_obj;
+       }
++
++      buf = dev->driver->gem_prime_export(dev, obj, flags);
++      if (IS_ERR(buf)) {
++              /* normally the created dma-buf takes ownership of the ref,
++               * but if that fails then drop the ref
++               */
++              ret = PTR_ERR(buf);
++              goto out;
++      }
++      obj->export_dma_buf = buf;
++
+       /* if we've exported this buffer the cheat and add it to the import list
+        * so we get the correct handle back
+        */
+-      ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
+-                      obj->export_dma_buf, handle);
+-      if (ret) {
+-              drm_gem_object_unreference_unlocked(obj);
+-              mutex_unlock(&file_priv->prime.lock);
+-              return ret;
+-      }
++      ret = drm_prime_add_buf_handle(&file_priv->prime,
++                                     obj->export_dma_buf, handle);
++      if (ret)
++              goto out;
++      *prime_fd = dma_buf_fd(buf, flags);
+       mutex_unlock(&file_priv->prime.lock);
+       return 0;
++
++out_have_obj:
++      get_dma_buf(dmabuf);
++      *prime_fd = dma_buf_fd(dmabuf, flags);
++out:
++      drm_gem_object_unreference_unlocked(obj);
++      mutex_unlock(&file_priv->prime.lock);
++      return ret;
+ }
+ EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+@@ -130,7 +133,7 @@ int drm_gem_prime_fd_to_handle(struct dr
+       mutex_lock(&file_priv->prime.lock);
+-      ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
++      ret = drm_prime_lookup_buf_handle(&file_priv->prime,
+                       dma_buf, handle);
+       if (!ret) {
+               ret = 0;
+@@ -149,7 +152,7 @@ int drm_gem_prime_fd_to_handle(struct dr
+       if (ret)
+               goto out_put;
+-      ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
++      ret = drm_prime_add_buf_handle(&file_priv->prime,
+                       dma_buf, *handle);
+       if (ret)
+               goto fail;
+@@ -307,7 +310,7 @@ void drm_prime_destroy_file_private(stru
+ }
+ EXPORT_SYMBOL(drm_prime_destroy_file_private);
+-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
++static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
+ {
+       struct drm_prime_member *member;
+@@ -315,14 +318,14 @@ int drm_prime_add_imported_buf_handle(st
+       if (!member)
+               return -ENOMEM;
++      get_dma_buf(dma_buf);
+       member->dma_buf = dma_buf;
+       member->handle = handle;
+       list_add(&member->entry, &prime_fpriv->head);
+       return 0;
+ }
+-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
+-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
++int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
+ {
+       struct drm_prime_member *member;
+@@ -334,19 +337,20 @@ int drm_prime_lookup_imported_buf_handle
+       }
+       return -ENOENT;
+ }
+-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
++EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
+-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
+ {
+       struct drm_prime_member *member, *safe;
+       mutex_lock(&prime_fpriv->lock);
+       list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+               if (member->dma_buf == dma_buf) {
++                      dma_buf_put(dma_buf);
+                       list_del(&member->entry);
+                       kfree(member);
+               }
+       }
+       mutex_unlock(&prime_fpriv->lock);
+ }
+-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
++EXPORT_SYMBOL(drm_prime_remove_buf_handle);
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -1559,9 +1559,8 @@ extern void drm_prime_gem_destroy(struct
+ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
+ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
+-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
+-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
++int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
+ int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
+ int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
index 671eca88dbcb59f1cdbca08aee724f9bd135534c..cf9e82075a7be3b561f8975f3f56d11821ab849a 100644 (file)
@@ -29,3 +29,7 @@ rdma-cxgb4-fix-sq-allocation-when-on-chip-sq-is-disabled.patch
 arm64-ignore-the-write-esr-flag-on-cache-maintenance-faults.patch
 block-fix-max-discard-sectors-limit.patch
 drm-cirrus-deal-with-bo-reserve-fail-in-dirty-update-path.patch
+drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch
+drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch
+drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch
+drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch