]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Jan 2022 07:24:23 +0000 (08:24 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 28 Jan 2022 07:24:23 +0000 (08:24 +0100)
added patches:
drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch

queue-4.19/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch b/queue-4.19/drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch
new file mode 100644 (file)
index 0000000..68b399b
--- /dev/null
@@ -0,0 +1,160 @@
+From a0f90c8815706981c483a652a6aefca51a5e191c Mon Sep 17 00:00:00 2001
+From: Mathias Krause <minipli@grsecurity.net>
+Date: Thu, 27 Jan 2022 18:34:19 +1000
+Subject: drm/vmwgfx: Fix stale file descriptors on failed usercopy
+
+From: Mathias Krause <minipli@grsecurity.net>
+
+commit a0f90c8815706981c483a652a6aefca51a5e191c upstream.
+
+A failing usercopy of the fence_rep object will lead to a stale entry in
+the file descriptor table as put_unused_fd() won't release it. This
+enables userland to refer to a dangling 'file' object through that still
+valid file descriptor, leading to all kinds of use-after-free
+exploitation scenarios.
+
+Fix this by deferring the call to fd_install() until after the usercopy
+has succeeded.
+
+Fixes: c906965dee22 ("drm/vmwgfx: Add export fence to file descriptor support")
+Signed-off-by: Mathias Krause <minipli@grsecurity.net>
+Signed-off-by: Zack Rusin <zackr@vmware.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_drv.h     |    5 +---
+ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |   34 ++++++++++++++++----------------
+ drivers/gpu/drm/vmwgfx/vmwgfx_fence.c   |    2 -
+ drivers/gpu/drm/vmwgfx/vmwgfx_kms.c     |    2 -
+ 4 files changed, 21 insertions(+), 22 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -855,15 +855,14 @@ extern int vmw_execbuf_fence_commands(st
+                                     struct vmw_private *dev_priv,
+                                     struct vmw_fence_obj **p_fence,
+                                     uint32_t *p_handle);
+-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
++extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+                                       struct vmw_fpriv *vmw_fp,
+                                       int ret,
+                                       struct drm_vmw_fence_rep __user
+                                       *user_fence_rep,
+                                       struct vmw_fence_obj *fence,
+                                       uint32_t fence_handle,
+-                                      int32_t out_fence_fd,
+-                                      struct sync_file *sync_file);
++                                      int32_t out_fence_fd);
+ extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
+                                     struct ttm_buffer_object *bo,
+                                     bool interruptible,
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -3873,20 +3873,19 @@ int vmw_execbuf_fence_commands(struct dr
+  * object so we wait for it immediately, and then unreference the
+  * user-space reference.
+  */
+-void
++int
+ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+                           struct vmw_fpriv *vmw_fp,
+                           int ret,
+                           struct drm_vmw_fence_rep __user *user_fence_rep,
+                           struct vmw_fence_obj *fence,
+                           uint32_t fence_handle,
+-                          int32_t out_fence_fd,
+-                          struct sync_file *sync_file)
++                          int32_t out_fence_fd)
+ {
+       struct drm_vmw_fence_rep fence_rep;
+       if (user_fence_rep == NULL)
+-              return;
++              return 0;
+       memset(&fence_rep, 0, sizeof(fence_rep));
+@@ -3914,20 +3913,14 @@ vmw_execbuf_copy_fence_user(struct vmw_p
+        * and unreference the handle.
+        */
+       if (unlikely(ret != 0) && (fence_rep.error == 0)) {
+-              if (sync_file)
+-                      fput(sync_file->file);
+-
+-              if (fence_rep.fd != -1) {
+-                      put_unused_fd(fence_rep.fd);
+-                      fence_rep.fd = -1;
+-              }
+-
+               ttm_ref_object_base_unref(vmw_fp->tfile,
+                                         fence_handle, TTM_REF_USAGE);
+               DRM_ERROR("Fence copy error. Syncing.\n");
+               (void) vmw_fence_obj_wait(fence, false, false,
+                                         VMW_FENCE_WAIT_TIMEOUT);
+       }
++
++      return ret ? -EFAULT : 0;
+ }
+ /**
+@@ -4287,16 +4280,23 @@ int vmw_execbuf_process(struct drm_file
+                       (void) vmw_fence_obj_wait(fence, false, false,
+                                                 VMW_FENCE_WAIT_TIMEOUT);
++              }
++      }
++
++      ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
++                                  user_fence_rep, fence, handle, out_fence_fd);
++
++      if (sync_file) {
++              if (ret) {
++                      /* usercopy of fence failed, put the file object */
++                      fput(sync_file->file);
++                      put_unused_fd(out_fence_fd);
+               } else {
+                       /* Link the fence with the FD created earlier */
+                       fd_install(out_fence_fd, sync_file->file);
+               }
+       }
+-      vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+-                                  user_fence_rep, fence, handle,
+-                                  out_fence_fd, sync_file);
+-
+       /* Don't unreference when handing fence out */
+       if (unlikely(out_fence != NULL)) {
+               *out_fence = fence;
+@@ -4315,7 +4315,7 @@ int vmw_execbuf_process(struct drm_file
+        */
+       vmw_resource_list_unreference(sw_context, &resource_list);
+-      return 0;
++      return ret;
+ out_unlock_binding:
+       mutex_unlock(&dev_priv->binding_mutex);
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+@@ -1169,7 +1169,7 @@ int vmw_fence_event_ioctl(struct drm_dev
+       }
+       vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
+-                                  handle, -1, NULL);
++                                  handle, -1);
+       vmw_fence_obj_unreference(&fence);
+       return 0;
+ out_no_create:
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -2662,7 +2662,7 @@ void vmw_kms_helper_buffer_finish(struct
+       if (file_priv)
+               vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+                                           ret, user_fence_rep, fence,
+-                                          handle, -1, NULL);
++                                          handle, -1);
+       if (out_fence)
+               *out_fence = fence;
+       else
index 8ee73c156b7f20f6a868a82b8c811c117fe8beea..e667a29134af1f485fb516814101ac2bfed1247c 100644 (file)
@@ -1,3 +1,4 @@
 drm-i915-flush-tlbs-before-releasing-backing-store.patch
 net-bridge-clear-bridge-s-private-skb-space-on-xmit.patch
 select-fix-indefinitely-sleeping-task-in-poll_schedule_timeout.patch
+drm-vmwgfx-fix-stale-file-descriptors-on-failed-usercopy.patch