From: Greg Kroah-Hartman Date: Thu, 9 May 2013 16:39:05 +0000 (-0700) Subject: 3.9-stable patches X-Git-Tag: v3.9.2~51 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=dc3399a9dc2b0bce82ff268399636202ea0972f4;p=thirdparty%2Fkernel%2Fstable-queue.git 3.9-stable patches added patches: drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch drm-prime-fix-refcounting-on-the-dmabuf-import-error-path.patch drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch --- diff --git a/queue-3.9/drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch b/queue-3.9/drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch new file mode 100644 index 00000000000..4638ec137de --- /dev/null +++ b/queue-3.9/drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch @@ -0,0 +1,129 @@ +From 306373b645d80625335b8e684fa09b14ba460cec Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Thu, 2 May 2013 02:40:25 -0400 +Subject: drm/ast: deal with bo reserve fail in dirty update path + +From: Dave Airlie + +commit 306373b645d80625335b8e684fa09b14ba460cec upstream. + +Port over the mgag200 fix to ast as it suffers the same issue. + + On F19 testing, it was noticed we get a lot of errors in dmesg + about being unable to reserve the buffer when plymouth starts, + this is due to the buffer being in the process of migrating, + so it makes sense we can't reserve it. + + In order to deal with it, this adds delayed updates for the dirty + updates, when the bo is unreservable, in the normal console case + this shouldn't ever happen, its just when plymouth or X is + pushing the console bo to system memory. + +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/ast/ast_drv.h | 2 + + drivers/gpu/drm/ast/ast_fb.c | 43 +++++++++++++++++++++++++++++++++++++++--- + drivers/gpu/drm/ast/ast_ttm.c | 2 - + 3 files changed, 43 insertions(+), 4 deletions(-) + +--- a/drivers/gpu/drm/ast/ast_drv.h ++++ b/drivers/gpu/drm/ast/ast_drv.h +@@ -241,6 +241,8 @@ struct ast_fbdev { + void *sysram; + int size; + struct ttm_bo_kmap_obj mapping; ++ int x1, y1, x2, y2; /* dirty rect */ ++ spinlock_t dirty_lock; + }; + + #define to_ast_crtc(x) container_of(x, struct ast_crtc, base) +--- a/drivers/gpu/drm/ast/ast_fb.c ++++ b/drivers/gpu/drm/ast/ast_fb.c +@@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_ + int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; + int ret; + bool unmap = false; ++ bool store_for_later = false; ++ int x2, y2; ++ unsigned long flags; + + obj = afbdev->afb.obj; + bo = gem_to_ast_bo(obj); + ++ /* ++ * try and reserve the BO, if we fail with busy ++ * then the BO is being moved and we should ++ * store up the damage until later. ++ */ + ret = ast_bo_reserve(bo, true); + if (ret) { +- DRM_ERROR("failed to reserve fb bo\n"); ++ if (ret != -EBUSY) ++ return; ++ ++ store_for_later = true; ++ } ++ ++ x2 = x + width - 1; ++ y2 = y + height - 1; ++ spin_lock_irqsave(&afbdev->dirty_lock, flags); ++ ++ if (afbdev->y1 < y) ++ y = afbdev->y1; ++ if (afbdev->y2 > y2) ++ y2 = afbdev->y2; ++ if (afbdev->x1 < x) ++ x = afbdev->x1; ++ if (afbdev->x2 > x2) ++ x2 = afbdev->x2; ++ ++ if (store_for_later) { ++ afbdev->x1 = x; ++ afbdev->x2 = x2; ++ afbdev->y1 = y; ++ afbdev->y2 = y2; ++ spin_unlock_irqrestore(&afbdev->dirty_lock, flags); + return; + } + ++ afbdev->x1 = afbdev->y1 = INT_MAX; ++ afbdev->x2 = afbdev->y2 = 0; ++ spin_unlock_irqrestore(&afbdev->dirty_lock, flags); ++ + if (!bo->kmap.virtual) { + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) { +@@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_ + } + unmap = true; + } +- for (i = y; i < y + height; i++) { ++ for (i = y; i <= y2; i++) { + /* assume equal stride for now */ + src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); +- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); ++ memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp); + + } + if (unmap) +@@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *de + + ast->fbdev = afbdev; + afbdev->helper.funcs = &ast_fb_helper_funcs; ++ spin_lock_init(&afbdev->dirty_lock); + ret = drm_fb_helper_init(dev, &afbdev->helper, + 1, 1); + if (ret) { +--- a/drivers/gpu/drm/ast/ast_ttm.c ++++ b/drivers/gpu/drm/ast/ast_ttm.c +@@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bo + + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + if (ret) { +- if (ret != -ERESTARTSYS) ++ if (ret != -ERESTARTSYS && ret != -EBUSY) + DRM_ERROR("reserve failed %p\n", bo); + return ret; + } diff --git a/queue-3.9/drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch b/queue-3.9/drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch new file mode 100644 index 00000000000..b7a7b7d103d --- /dev/null +++ b/queue-3.9/drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch @@ -0,0 +1,42 @@ +From e127dc28cc3057575da0216cde85687153ca180f Mon Sep 17 00:00:00 2001 +From: Anisse Astier +Date: Wed, 24 Apr 2013 17:36:01 +0200 +Subject: drm/gma500: fix backlight hotkeys behaviour on netbooks + +From: Anisse Astier + +commit e127dc28cc3057575da0216cde85687153ca180f upstream. + +Backlight hotkeys weren't working before on certain cedartrail laptops. + +The source of this problem is that the hotkeys' ASLE opregion interrupts +were simply ignored. Driver seemed to expect the interrupt to be +associated with a pipe, but it wasn't. + +Accepting the ASLE interrupt without an associated pipe event flag fixes +the issue, the backlight code is called when needed, making the +brightness keys work properly. + +[patrik: This patch affects irq handling on any netbook with opregion support] + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=833597 +Reference: http://lists.freedesktop.org/archives/dri-devel/2012-July/025279.html +Signed-off-by: Anisse Astier +Signed-off-by: Patrik Jakobsson +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/gma500/psb_irq.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/gma500/psb_irq.c ++++ b/drivers/gpu/drm/gma500/psb_irq.c +@@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS + + vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); + +- if (vdc_stat & _PSB_PIPE_EVENT_FLAG) ++ if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE)) + dsp_int = 1; + + /* FIXME: Handle Medfield diff --git a/queue-3.9/drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch b/queue-3.9/drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch new file mode 100644 index 00000000000..007b97a3430 --- /dev/null +++ b/queue-3.9/drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch @@ -0,0 +1,129 @@ +From 641719599528d806e00de8ae8c8453361266a312 Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Thu, 2 May 2013 00:52:01 -0400 +Subject: drm/mgag200: deal with bo reserve fail in dirty update path + +From: Dave Airlie + +commit 641719599528d806e00de8ae8c8453361266a312 upstream. + +On F19 testing, it was noticed we get a lot of errors in dmesg +about being unable to reserve the buffer when plymouth starts, +this is due to the buffer being in the process of migrating, +so it makes sense we can't reserve it. + +In order to deal with it, this adds delayed updates for the dirty +updates, when the bo is unreservable, in the normal console case +this shouldn't ever happen, its just when plymouth or X is +pushing the console bo to system memory. + +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/mgag200/mgag200_drv.h | 2 + + drivers/gpu/drm/mgag200/mgag200_fb.c | 43 +++++++++++++++++++++++++++++++--- + drivers/gpu/drm/mgag200/mgag200_ttm.c | 4 +-- + 3 files changed, 44 insertions(+), 5 deletions(-) + +--- a/drivers/gpu/drm/mgag200/mgag200_drv.h ++++ b/drivers/gpu/drm/mgag200/mgag200_drv.h +@@ -115,6 +115,8 @@ struct mga_fbdev { + void *sysram; + int size; + struct ttm_bo_kmap_obj mapping; ++ int x1, y1, x2, y2; /* dirty rect */ ++ spinlock_t dirty_lock; + }; + + struct mga_crtc { +--- a/drivers/gpu/drm/mgag200/mgag200_fb.c ++++ b/drivers/gpu/drm/mgag200/mgag200_fb.c +@@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_ + int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; + int ret; + bool unmap = false; ++ bool store_for_later = false; ++ int x2, y2; ++ unsigned long flags; + + obj = mfbdev->mfb.obj; + bo = gem_to_mga_bo(obj); + ++ /* ++ * try and reserve the BO, if we fail with busy ++ * then the BO is being moved and we should ++ * store up the damage until later. ++ */ + ret = mgag200_bo_reserve(bo, true); + if (ret) { +- DRM_ERROR("failed to reserve fb bo\n"); ++ if (ret != -EBUSY) ++ return; ++ ++ store_for_later = true; ++ } ++ ++ x2 = x + width - 1; ++ y2 = y + height - 1; ++ spin_lock_irqsave(&mfbdev->dirty_lock, flags); ++ ++ if (mfbdev->y1 < y) ++ y = mfbdev->y1; ++ if (mfbdev->y2 > y2) ++ y2 = mfbdev->y2; ++ if (mfbdev->x1 < x) ++ x = mfbdev->x1; ++ if (mfbdev->x2 > x2) ++ x2 = mfbdev->x2; ++ ++ if (store_for_later) { ++ mfbdev->x1 = x; ++ mfbdev->x2 = x2; ++ mfbdev->y1 = y; ++ mfbdev->y2 = y2; ++ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags); + return; + } + ++ mfbdev->x1 = mfbdev->y1 = INT_MAX; ++ mfbdev->x2 = mfbdev->y2 = 0; ++ spin_unlock_irqrestore(&mfbdev->dirty_lock, flags); ++ + if (!bo->kmap.virtual) { + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + if (ret) { +@@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_ + } + unmap = true; + } +- for (i = y; i < y + height; i++) { ++ for (i = y; i <= y2; i++) { + /* assume equal stride for now */ + src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp); +- memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp); ++ memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp); + + } + if (unmap) +@@ -255,6 +291,7 @@ int mgag200_fbdev_init(struct mga_device + + mdev->mfbdev = mfbdev; + mfbdev->helper.funcs = &mga_fb_helper_funcs; ++ spin_lock_init(&mfbdev->dirty_lock); + + ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, + mdev->num_crtc, MGAG200FB_CONN_LIMIT); +--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c ++++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c +@@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo + + ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); + if (ret) { +- if (ret != -ERESTARTSYS) +- DRM_ERROR("reserve failed %p\n", bo); ++ if (ret != -ERESTARTSYS && ret != -EBUSY) ++ DRM_ERROR("reserve failed %p %d\n", bo, ret); + return ret; + } + return 0; diff --git a/queue-3.9/drm-prime-fix-refcounting-on-the-dmabuf-import-error-path.patch b/queue-3.9/drm-prime-fix-refcounting-on-the-dmabuf-import-error-path.patch new file mode 100644 index 00000000000..41bb4762797 --- /dev/null +++ b/queue-3.9/drm-prime-fix-refcounting-on-the-dmabuf-import-error-path.patch @@ -0,0 +1,151 @@ +From 011c2282c74db120f01a8414edc66c3f217f5511 Mon Sep 17 00:00:00 2001 +From: Imre Deak +Date: Fri, 19 Apr 2013 11:11:56 +1000 +Subject: drm: prime: fix refcounting on the dmabuf import error path + +From: Imre Deak + +commit 011c2282c74db120f01a8414edc66c3f217f5511 upstream. + +In commit be8a42ae60 we inroduced a refcount problem, where on the +drm_gem_prime_fd_to_handle() error path we'll call dma_buf_put() for +self imported dma buffers. + +Fix this by taking a reference on the dma buffer in the .gem_import +hook instead of assuming the caller had taken one. Besides fixing the +bug this is also more logical. + +Signed-off-by: Imre Deak +Reviewed-by: Daniel Vetter +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/drm_prime.c | 8 +++++++- + drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 4 +++- + drivers/gpu/drm/i915/i915_gem_dmabuf.c | 5 ++++- + drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | 1 - + drivers/gpu/drm/udl/udl_gem.c | 4 ++++ + 5 files changed, 18 insertions(+), 4 deletions(-) + +--- a/drivers/gpu/drm/drm_prime.c ++++ b/drivers/gpu/drm/drm_prime.c +@@ -268,7 +268,6 @@ struct drm_gem_object *drm_gem_prime_imp + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); +- dma_buf_put(dma_buf); + return obj; + } + } +@@ -277,6 +276,8 @@ struct drm_gem_object *drm_gem_prime_imp + if (IS_ERR(attach)) + return ERR_PTR(PTR_ERR(attach)); + ++ get_dma_buf(dma_buf); ++ + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { + ret = PTR_ERR(sgt); +@@ -297,6 +298,8 @@ fail_unmap: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + fail_detach: + dma_buf_detach(dma_buf, attach); ++ dma_buf_put(dma_buf); ++ + return ERR_PTR(ret); + } + EXPORT_SYMBOL(drm_gem_prime_import); +@@ -339,6 +342,9 @@ int drm_gem_prime_fd_to_handle(struct dr + goto fail; + + mutex_unlock(&file_priv->prime.lock); ++ ++ dma_buf_put(dma_buf); ++ + return 0; + + fail: +--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c ++++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +@@ -235,7 +235,6 @@ struct drm_gem_object *exynos_dmabuf_pri + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); +- dma_buf_put(dma_buf); + return obj; + } + } +@@ -244,6 +243,7 @@ struct drm_gem_object *exynos_dmabuf_pri + if (IS_ERR(attach)) + return ERR_PTR(-EINVAL); + ++ get_dma_buf(dma_buf); + + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sgt)) { +@@ -298,6 +298,8 @@ err_unmap_attach: + dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); + err_buf_detach: + dma_buf_detach(dma_buf, attach); ++ dma_buf_put(dma_buf); ++ + return ERR_PTR(ret); + } + +--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c ++++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c +@@ -271,7 +271,6 @@ struct drm_gem_object *i915_gem_prime_im + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(&obj->base); +- dma_buf_put(dma_buf); + return &obj->base; + } + } +@@ -281,6 +280,8 @@ struct drm_gem_object *i915_gem_prime_im + if (IS_ERR(attach)) + return ERR_CAST(attach); + ++ get_dma_buf(dma_buf); ++ + obj = i915_gem_object_alloc(dev); + if (obj == NULL) { + ret = -ENOMEM; +@@ -300,5 +301,7 @@ struct drm_gem_object *i915_gem_prime_im + + fail_detach: + dma_buf_detach(dma_buf, attach); ++ dma_buf_put(dma_buf); ++ + return ERR_PTR(ret); + } +--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c ++++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +@@ -212,7 +212,6 @@ struct drm_gem_object *omap_gem_prime_im + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_reference(obj); +- dma_buf_put(buffer); + return obj; + } + } +--- a/drivers/gpu/drm/udl/udl_gem.c ++++ b/drivers/gpu/drm/udl/udl_gem.c +@@ -303,6 +303,8 @@ struct drm_gem_object *udl_gem_prime_imp + if (IS_ERR(attach)) + return ERR_CAST(attach); + ++ get_dma_buf(dma_buf); ++ + sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); +@@ -322,5 +324,7 @@ fail_unmap: + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); + fail_detach: + dma_buf_detach(dma_buf, attach); ++ dma_buf_put(dma_buf); ++ + return ERR_PTR(ret); + } diff --git a/queue-3.9/drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch b/queue-3.9/drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch new file mode 100644 index 00000000000..86a4382d878 --- /dev/null +++ b/queue-3.9/drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch @@ -0,0 +1,253 @@ +From 219b47339ced80ca580bb6ce7d1636166984afa7 Mon Sep 17 00:00:00 2001 +From: Dave Airlie +Date: Mon, 22 Apr 2013 09:54:36 +1000 +Subject: drm/prime: keep a reference from the handle to exported dma-buf (v6) + +From: Dave Airlie + +commit 219b47339ced80ca580bb6ce7d1636166984afa7 upstream. + +Currently we have a problem with this: +1. i915: create gem object +2. i915: export gem object to prime +3. radeon: import gem object +4. close prime fd +5. radeon: unref object +6. i915: unref object + +i915 has an imported object reference in its file priv, that isn't +cleaned up properly until fd close. The reference gets added at step 2, +but at step 6 we don't have enough info to clean it up. + +The solution is to take a reference on the dma-buf when we export it, +and drop the reference when the gem handle goes away. + +So when we export a dma_buf from a gem object, we keep track of it +with the handle, we take a reference to the dma_buf. When we close +the handle (i.e. userspace is finished with the buffer), we drop +the reference to the dma_buf, and it gets collected. + +This patch isn't meant to fix any other problem or bikesheds, and it doesn't +fix any races with other scenarios. + +v1.1: move export symbol line back up. + +v2: okay I had to do a bit more, as the first patch showed a leak +on one of my tests, that I found using the dma-buf debugfs support, +the problem case is exporting a buffer twice with the same handle, +we'd add another export handle for it unnecessarily, however +we now fail if we try to export the same object with a different gem handle, +however I'm not sure if that is a case I want to support, and I've +gotten the code to WARN_ON if we hit something like that. + +v2.1: rebase this patch, write better commit msg. +v3: cleanup error handling, track import vs export in linked list, +these two patches were separate previously, but seem to work better +like this. +v4: danvet is correct, this code is no longer useful, since the buffer +better exist, so remove it. +v5: always take a reference to the dma buf object, import or export. +(Imre Deak contributed this originally) +v6: square the circle, remove import vs export tracking now +that there is no difference + +Reviewed-by: Daniel Vetter +Signed-off-by: Dave Airlie +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/drm_gem.c | 4 +- + drivers/gpu/drm/drm_prime.c | 76 +++++++++++++++++++++++--------------------- + include/drm/drmP.h | 5 +- + 3 files changed, 44 insertions(+), 41 deletions(-) + +--- a/drivers/gpu/drm/drm_gem.c ++++ b/drivers/gpu/drm/drm_gem.c +@@ -205,11 +205,11 @@ static void + drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) + { + if (obj->import_attach) { +- drm_prime_remove_imported_buf_handle(&filp->prime, ++ drm_prime_remove_buf_handle(&filp->prime, + obj->import_attach->dmabuf); + } + if (obj->export_dma_buf) { +- drm_prime_remove_imported_buf_handle(&filp->prime, ++ drm_prime_remove_buf_handle(&filp->prime, + obj->export_dma_buf); + } + } +--- a/drivers/gpu/drm/drm_prime.c ++++ b/drivers/gpu/drm/drm_prime.c +@@ -62,6 +62,7 @@ struct drm_prime_member { + struct dma_buf *dma_buf; + uint32_t handle; + }; ++static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); + + static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +@@ -200,7 +201,8 @@ int drm_gem_prime_handle_to_fd(struct dr + { + struct drm_gem_object *obj; + void *buf; +- int ret; ++ int ret = 0; ++ struct dma_buf *dmabuf; + + obj = drm_gem_object_lookup(dev, file_priv, handle); + if (!obj) +@@ -209,43 +211,44 @@ int drm_gem_prime_handle_to_fd(struct dr + mutex_lock(&file_priv->prime.lock); + /* re-export the original imported object */ + if (obj->import_attach) { +- get_dma_buf(obj->import_attach->dmabuf); +- *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags); +- drm_gem_object_unreference_unlocked(obj); +- mutex_unlock(&file_priv->prime.lock); +- return 0; ++ dmabuf = obj->import_attach->dmabuf; ++ goto out_have_obj; + } + + if (obj->export_dma_buf) { +- get_dma_buf(obj->export_dma_buf); +- *prime_fd = dma_buf_fd(obj->export_dma_buf, flags); +- drm_gem_object_unreference_unlocked(obj); +- } else { +- buf = dev->driver->gem_prime_export(dev, obj, flags); +- if (IS_ERR(buf)) { +- /* normally the created dma-buf takes ownership of the ref, +- * but if that fails then drop the ref +- */ +- drm_gem_object_unreference_unlocked(obj); +- mutex_unlock(&file_priv->prime.lock); +- return PTR_ERR(buf); +- } +- obj->export_dma_buf = buf; +- *prime_fd = dma_buf_fd(buf, flags); ++ dmabuf = obj->export_dma_buf; ++ goto out_have_obj; + } ++ ++ buf = dev->driver->gem_prime_export(dev, obj, flags); ++ if (IS_ERR(buf)) { ++ /* normally the created dma-buf takes ownership of the ref, ++ * but if that fails then drop the ref ++ */ ++ ret = PTR_ERR(buf); ++ goto out; ++ } ++ obj->export_dma_buf = buf; ++ + /* if we've exported this buffer the cheat and add it to the import list + * so we get the correct handle back + */ +- ret = drm_prime_add_imported_buf_handle(&file_priv->prime, +- obj->export_dma_buf, handle); +- if (ret) { +- drm_gem_object_unreference_unlocked(obj); +- mutex_unlock(&file_priv->prime.lock); +- return ret; +- } ++ ret = drm_prime_add_buf_handle(&file_priv->prime, ++ obj->export_dma_buf, handle); ++ if (ret) ++ goto out; + ++ *prime_fd = dma_buf_fd(buf, flags); + mutex_unlock(&file_priv->prime.lock); + return 0; ++ ++out_have_obj: ++ get_dma_buf(dmabuf); ++ *prime_fd = dma_buf_fd(dmabuf, flags); ++out: ++ drm_gem_object_unreference_unlocked(obj); ++ mutex_unlock(&file_priv->prime.lock); ++ return ret; + } + EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); + +@@ -317,7 +320,7 @@ int drm_gem_prime_fd_to_handle(struct dr + + mutex_lock(&file_priv->prime.lock); + +- ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime, ++ ret = drm_prime_lookup_buf_handle(&file_priv->prime, + dma_buf, handle); + if (!ret) { + ret = 0; +@@ -336,7 +339,7 @@ int drm_gem_prime_fd_to_handle(struct dr + if (ret) + goto out_put; + +- ret = drm_prime_add_imported_buf_handle(&file_priv->prime, ++ ret = drm_prime_add_buf_handle(&file_priv->prime, + dma_buf, *handle); + if (ret) + goto fail; +@@ -497,7 +500,7 @@ void drm_prime_destroy_file_private(stru + } + EXPORT_SYMBOL(drm_prime_destroy_file_private); + +-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) ++static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) + { + struct drm_prime_member *member; + +@@ -505,14 +508,14 @@ int drm_prime_add_imported_buf_handle(st + if (!member) + return -ENOMEM; + ++ get_dma_buf(dma_buf); + member->dma_buf = dma_buf; + member->handle = handle; + list_add(&member->entry, &prime_fpriv->head); + return 0; + } +-EXPORT_SYMBOL(drm_prime_add_imported_buf_handle); + +-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) ++int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) + { + struct drm_prime_member *member; + +@@ -524,19 +527,20 @@ int drm_prime_lookup_imported_buf_handle + } + return -ENOENT; + } +-EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle); ++EXPORT_SYMBOL(drm_prime_lookup_buf_handle); + +-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) ++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) + { + struct drm_prime_member *member, *safe; + + mutex_lock(&prime_fpriv->lock); + list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { + if (member->dma_buf == dma_buf) { ++ dma_buf_put(dma_buf); + list_del(&member->entry); + kfree(member); + } + } + mutex_unlock(&prime_fpriv->lock); + } +-EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle); ++EXPORT_SYMBOL(drm_prime_remove_buf_handle); +--- a/include/drm/drmP.h ++++ b/include/drm/drmP.h +@@ -1593,9 +1593,8 @@ extern void drm_prime_gem_destroy(struct + + void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); + void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); +-int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); +-int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); +-void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); ++int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); ++void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); + + int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj); + int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf, diff --git a/queue-3.9/series b/queue-3.9/series index 72af3bc9793..833b904231a 100644 --- a/queue-3.9/series +++ b/queue-3.9/series @@ -24,3 +24,8 @@ arm64-ignore-the-write-esr-flag-on-cache-maintenance-faults.patch blkcg-fix-scheduling-while-atomic-in-blk_queue_bypass_start.patch block-fix-max-discard-sectors-limit.patch drm-cirrus-deal-with-bo-reserve-fail-in-dirty-update-path.patch +drm-mgag200-deal-with-bo-reserve-fail-in-dirty-update-path.patch +drm-gma500-fix-backlight-hotkeys-behaviour-on-netbooks.patch +drm-prime-fix-refcounting-on-the-dmabuf-import-error-path.patch +drm-prime-keep-a-reference-from-the-handle-to-exported-dma-buf-v6.patch +drm-ast-deal-with-bo-reserve-fail-in-dirty-update-path.patch