From: Greg Kroah-Hartman Date: Fri, 28 Jan 2022 07:24:02 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v4.4.301~6 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=963e6820f0d97e2ed7f6f1cf175c38b2fa3dd0d2;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch --- diff --git a/queue-4.14/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch b/queue-4.14/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch new file mode 100644 index 00000000000..ca5ca48df17 --- /dev/null +++ b/queue-4.14/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch @@ -0,0 +1,160 @@ +From a0f90c8815706981c483a652a6aefca51a5e191c Mon Sep 17 00:00:00 2001 +From: Mathias Krause +Date: Thu, 27 Jan 2022 18:34:19 +1000 +Subject: drm/vmwgfx: Fix stale file descriptors on failed usercopy + +From: Mathias Krause + +commit a0f90c8815706981c483a652a6aefca51a5e191c upstream. + +A failing usercopy of the fence_rep object will lead to a stale entry in +the file descriptor table as put_unused_fd() won't release it. This +enables userland to refer to a dangling 'file' object through that still +valid file descriptor, leading to all kinds of use-after-free +exploitation scenarios. + +Fix this by deferring the call to fd_install() until after the usercopy +has succeeded. + +Fixes: c906965dee22 ("drm/vmwgfx: Add export fence to file descriptor support") +Signed-off-by: Mathias Krause +Signed-off-by: Zack Rusin +Signed-off-by: Dave Airlie +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 5 +--- + drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 34 ++++++++++++++++---------------- + drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 - + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 - + 4 files changed, 21 insertions(+), 22 deletions(-) + +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -837,15 +837,14 @@ extern int vmw_execbuf_fence_commands(st + struct vmw_private *dev_priv, + struct vmw_fence_obj **p_fence, + uint32_t *p_handle); +-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ++extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, + int ret, + struct drm_vmw_fence_rep __user + *user_fence_rep, + struct vmw_fence_obj *fence, + uint32_t fence_handle, +- int32_t out_fence_fd, +- struct sync_file *sync_file); ++ int32_t out_fence_fd); + extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, + struct ttm_buffer_object *bo, + bool interruptible, +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -3848,20 +3848,19 @@ int vmw_execbuf_fence_commands(struct dr + * object so we wait for it immediately, and then unreference the + * user-space reference. + */ +-void ++int + vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, + int ret, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct vmw_fence_obj *fence, + uint32_t fence_handle, +- int32_t out_fence_fd, +- struct sync_file *sync_file) ++ int32_t out_fence_fd) + { + struct drm_vmw_fence_rep fence_rep; + + if (user_fence_rep == NULL) +- return; ++ return 0; + + memset(&fence_rep, 0, sizeof(fence_rep)); + +@@ -3889,20 +3888,14 @@ vmw_execbuf_copy_fence_user(struct vmw_p + * and unreference the handle. + */ + if (unlikely(ret != 0) && (fence_rep.error == 0)) { +- if (sync_file) +- fput(sync_file->file); +- +- if (fence_rep.fd != -1) { +- put_unused_fd(fence_rep.fd); +- fence_rep.fd = -1; +- } +- + ttm_ref_object_base_unref(vmw_fp->tfile, + fence_handle, TTM_REF_USAGE); + DRM_ERROR("Fence copy error. Syncing.\n"); + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); + } ++ ++ return ret ? -EFAULT : 0; + } + + /** +@@ -4262,16 +4255,23 @@ int vmw_execbuf_process(struct drm_file + + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); ++ } ++ } ++ ++ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, ++ user_fence_rep, fence, handle, out_fence_fd); ++ ++ if (sync_file) { ++ if (ret) { ++ /* usercopy of fence failed, put the file object */ ++ fput(sync_file->file); ++ put_unused_fd(out_fence_fd); + } else { + /* Link the fence with the FD created earlier */ + fd_install(out_fence_fd, sync_file->file); + } + } + +- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, +- user_fence_rep, fence, handle, +- out_fence_fd, sync_file); +- + /* Don't unreference when handing fence out */ + if (unlikely(out_fence != NULL)) { + *out_fence = fence; +@@ -4290,7 +4290,7 @@ int vmw_execbuf_process(struct drm_file + */ + vmw_resource_list_unreference(sw_context, &resource_list); + +- return 0; ++ return ret; + + out_unlock_binding: + mutex_unlock(&dev_priv->binding_mutex); +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -1150,7 +1150,7 @@ int vmw_fence_event_ioctl(struct drm_dev + } + + vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + vmw_fence_obj_unreference(&fence); + return 0; + out_no_create: +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -2511,7 +2511,7 @@ void vmw_kms_helper_buffer_finish(struct + if (file_priv) + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), + ret, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + if (out_fence) + *out_fence = fence; + else diff --git a/queue-4.14/series b/queue-4.14/series index feedf79021d..ceea616a835 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -1,2 +1,3 @@ drm-i915-flush-tlbs-before-releasing-backing-store.patch can-bcm-fix-uaf-of-bcm-op.patch +drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch