From: Greg Kroah-Hartman Date: Fri, 28 Jan 2022 07:25:21 +0000 (+0100) Subject: 5.16-stable patches X-Git-Tag: v4.4.301~1 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=4a412849aac0527a58eb4780f0f0c351566e7a75;p=thirdparty%2Fkernel%2Fstable-queue.git 5.16-stable patches added patches: drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch --- diff --git a/queue-5.16/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch b/queue-5.16/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch new file mode 100644 index 00000000000..b47fc4dd10c --- /dev/null +++ b/queue-5.16/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch @@ -0,0 +1,156 @@ +From a0f90c8815706981c483a652a6aefca51a5e191c Mon Sep 17 00:00:00 2001 +From: Mathias Krause +Date: Thu, 27 Jan 2022 18:34:19 +1000 +Subject: drm/vmwgfx: Fix stale file descriptors on failed usercopy + +From: Mathias Krause + +commit a0f90c8815706981c483a652a6aefca51a5e191c upstream. + +A failing usercopy of the fence_rep object will lead to a stale entry in +the file descriptor table as put_unused_fd() won't release it. This +enables userland to refer to a dangling 'file' object through that still +valid file descriptor, leading to all kinds of use-after-free +exploitation scenarios. + +Fix this by deferring the call to fd_install() until after the usercopy +has succeeded. + +Fixes: c906965dee22 ("drm/vmwgfx: Add export fence to file descriptor support") +Signed-off-by: Mathias Krause +Signed-off-by: Zack Rusin +Signed-off-by: Dave Airlie +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 5 +--- + drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 33 ++++++++++++++++---------------- + drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 - + drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 - + 4 files changed, 21 insertions(+), 21 deletions(-) + +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +@@ -1112,15 +1112,14 @@ extern int vmw_execbuf_fence_commands(st + struct vmw_private *dev_priv, + struct vmw_fence_obj **p_fence, + uint32_t *p_handle); +-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, ++extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, + int ret, + struct drm_vmw_fence_rep __user + *user_fence_rep, + struct vmw_fence_obj *fence, + uint32_t fence_handle, +- int32_t out_fence_fd, +- struct sync_file *sync_file); ++ int32_t out_fence_fd); + bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd); + + /** +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +@@ -3823,17 +3823,17 @@ int vmw_execbuf_fence_commands(struct dr + * Also if copying fails, user-space will be unable to signal the fence object + * so we wait for it immediately, and then unreference the user-space reference. + */ +-void ++int + vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, + struct vmw_fpriv *vmw_fp, int ret, + struct drm_vmw_fence_rep __user *user_fence_rep, + struct vmw_fence_obj *fence, uint32_t fence_handle, +- int32_t out_fence_fd, struct sync_file *sync_file) ++ int32_t out_fence_fd) + { + struct drm_vmw_fence_rep fence_rep; + + if (user_fence_rep == NULL) +- return; ++ return 0; + + memset(&fence_rep, 0, sizeof(fence_rep)); + +@@ -3861,20 +3861,14 @@ vmw_execbuf_copy_fence_user(struct vmw_p + * handle. + */ + if (unlikely(ret != 0) && (fence_rep.error == 0)) { +- if (sync_file) +- fput(sync_file->file); +- +- if (fence_rep.fd != -1) { +- put_unused_fd(fence_rep.fd); +- fence_rep.fd = -1; +- } +- + ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle, + TTM_REF_USAGE); + VMW_DEBUG_USER("Fence copy error. Syncing.\n"); + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); + } ++ ++ return ret ? -EFAULT : 0; + } + + /** +@@ -4212,16 +4206,23 @@ int vmw_execbuf_process(struct drm_file + + (void) vmw_fence_obj_wait(fence, false, false, + VMW_FENCE_WAIT_TIMEOUT); ++ } ++ } ++ ++ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, ++ user_fence_rep, fence, handle, out_fence_fd); ++ ++ if (sync_file) { ++ if (ret) { ++ /* usercopy of fence failed, put the file object */ ++ fput(sync_file->file); ++ put_unused_fd(out_fence_fd); + } else { + /* Link the fence with the FD created earlier */ + fd_install(out_fence_fd, sync_file->file); + } + } + +- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, +- user_fence_rep, fence, handle, out_fence_fd, +- sync_file); +- + /* Don't unreference when handing fence out */ + if (unlikely(out_fence != NULL)) { + *out_fence = fence; +@@ -4239,7 +4240,7 @@ int vmw_execbuf_process(struct drm_file + */ + vmw_validation_unref_lists(&val_ctx); + +- return 0; ++ return ret; + + out_unlock_binding: + mutex_unlock(&dev_priv->binding_mutex); +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +@@ -1159,7 +1159,7 @@ int vmw_fence_event_ioctl(struct drm_dev + } + + vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + vmw_fence_obj_unreference(&fence); + return 0; + out_no_create: +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -2516,7 +2516,7 @@ void vmw_kms_helper_validation_finish(st + if (file_priv) + vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), + ret, user_fence_rep, fence, +- handle, -1, NULL); ++ handle, -1); + if (out_fence) + *out_fence = fence; + else diff --git a/queue-5.16/series b/queue-5.16/series index c3426947c32..c2af3b6512c 100644 --- a/queue-5.16/series +++ b/queue-5.16/series @@ -7,3 +7,4 @@ memcg-better-bounds-on-the-memcg-stats-updates.patch rcu-tighten-rcu_advance_cbs_nowake-checks.patch select-fix-indefinitely-sleeping-task-in-poll_schedule_timeout.patch arm64-bpf-remove-128mb-limit-for-bpf-jit-programs.patch +drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch