From 6fd775fb8738505fd0a11e205ec2c47df0f0cebb Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 28 Jan 2022 08:24:23 +0100 Subject: [PATCH] 4.19-stable patches added patches: drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch --- ...-file-descriptors-on-failed-usercopy.patch | 160 ++++++++++++++++++ queue-4.19/series | 1 + 2 files changed, 161 insertions(+) create mode 100644 queue-4.19/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch diff --git a/queue-4.19/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch b/queue-4.19/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch new file mode 100644 index 00000000000..68b399b4d92 --- /dev/null +++ b/queue-4.19/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch @@ -0,0 +1,160 @@ +From a0f90c8815706981c483a652a6aefca51a5e191c Mon Sep 17 00:00:00 2001 +From: Mathias Krause +Date: Thu, 27 Jan 2022 18:34:19 +1000 +Subject: drm/vmwgfx: Fix stale file descriptors on failed usercopy + +From: Mathias Krause + +commit a0f90c8815706981c483a652a6aefca51a5e191c upstream. + +A failing usercopy of the fence_rep object will lead to a stale entry in +the file descriptor table as put_unused_fd() won't release it. This +enables userland to refer to a dangling 'file' object through that still +valid file descriptor, leading to all kinds of use-after-free +exploitation scenarios. + +Fix this by deferring the call to fd_install() until after the usercopy +has succeeded. + +Fixes: c906965dee22 ("drm/vmwgfx: Add export fence to file descriptor support") +Signed-off-by: Mathias Krause +Signed-off-by: Zack Rusin +Signed-off-by: Dave Airlie +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 5 +--- + drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 34 ++++++++++++++++---------------- + drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 - + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 - + 4 files changed, 21 insertions(+), 22 deletions(-) + +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -855,15 +855,14 @@ extern int vmw_execbuf_fence_commands(st + struct vmw_private *dev_priv, + struct vmw_fence_obj **p_fence, + uint32_t *p_handle); +-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ++extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, + int ret, + struct drm_vmw_fence_rep __user + *user_fence_rep, + struct vmw_fence_obj *fence, + uint32_t fence_handle, +- int32_t out_fence_fd, +- struct sync_file *sync_file); ++ int32_t out_fence_fd); + extern int vmw_validate_single_buffer(struct vmw_private *dev_priv, + struct ttm_buffer_object *bo, + bool interruptible, +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -3873,20 +3873,19 @@ int vmw_execbuf_fence_commands(struct dr + * object so we wait for it immediately, and then unreference the + * user-space reference. + */ +-void ++int + vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, + int ret, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct vmw_fence_obj *fence, + uint32_t fence_handle, +- int32_t out_fence_fd, +- struct sync_file *sync_file) ++ int32_t out_fence_fd) + { + struct drm_vmw_fence_rep fence_rep; + + if (user_fence_rep == NULL) +- return; ++ return 0; + + memset(&fence_rep, 0, sizeof(fence_rep)); + +@@ -3914,20 +3913,14 @@ vmw_execbuf_copy_fence_user(struct vmw_p + * and unreference the handle. + */ + if (unlikely(ret != 0) && (fence_rep.error == 0)) { +- if (sync_file) +- fput(sync_file->file); +- +- if (fence_rep.fd != -1) { +- put_unused_fd(fence_rep.fd); +- fence_rep.fd = -1; +- } +- + ttm_ref_object_base_unref(vmw_fp->tfile, + fence_handle, TTM_REF_USAGE); + DRM_ERROR("Fence copy error. Syncing.\n"); + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); + } ++ ++ return ret ? -EFAULT : 0; + } + + /** +@@ -4287,16 +4280,23 @@ int vmw_execbuf_process(struct drm_file + + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); ++ } ++ } ++ ++ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, ++ user_fence_rep, fence, handle, out_fence_fd); ++ ++ if (sync_file) { ++ if (ret) { ++ /* usercopy of fence failed, put the file object */ ++ fput(sync_file->file); ++ put_unused_fd(out_fence_fd); + } else { + /* Link the fence with the FD created earlier */ + fd_install(out_fence_fd, sync_file->file); + } + } + +- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, +- user_fence_rep, fence, handle, +- out_fence_fd, sync_file); +- + /* Don't unreference when handing fence out */ + if (unlikely(out_fence != NULL)) { + *out_fence = fence; +@@ -4315,7 +4315,7 @@ int vmw_execbuf_process(struct drm_file + */ + vmw_resource_list_unreference(sw_context, &resource_list); + +- return 0; ++ return ret; + + out_unlock_binding: + mutex_unlock(&dev_priv->binding_mutex); +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -1169,7 +1169,7 @@ int vmw_fence_event_ioctl(struct drm_dev + } + + vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + vmw_fence_obj_unreference(&fence); + return 0; + out_no_create: +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -2662,7 +2662,7 @@ void vmw_kms_helper_buffer_finish(struct + if (file_priv) + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), + ret, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + if (out_fence) + *out_fence = fence; + else diff --git a/queue-4.19/series b/queue-4.19/series index 8ee73c156b7..e667a29134a 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -1,3 +1,4 @@ drm-i915-flush-tlbs-before-releasing-backing-store.patch net-bridge-clear-bridge-s-private-skb-space-on-xmit.patch select-fix-indefinitely-sleeping-task-in-poll_schedule_timeout.patch +drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch -- 2.47.2