]> git.ipfire.org Git - people/ms/linux.git/blobdiff - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drm/vmwgfx: Implement the cursor_set2 callback v2
[people/ms/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_drv.c
index 2c7a25c71af2979c784b6cb4a87ee2088d4f8fbb..c49812b80dd0dae82c77096f75fbd4cced19ffb1 100644 (file)
 
 static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
                      vmw_kms_cursor_bypass_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
 
        VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
        VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
-                     DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_CONTROL_ALLOW),
 
        VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
-       VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+                     DRM_AUTH | DRM_RENDER_ALLOW),
+       VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
                      DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
                      vmw_fence_obj_signaled_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
 
        /* these allow direct access to the framebuffers mark as master only */
        VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
-                     DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_AUTH),
        VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
                      vmw_present_readback_ioctl,
-                     DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+                     DRM_MASTER | DRM_AUTH),
        VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
                      vmw_kms_update_layout_ioctl,
-                     DRM_MASTER | DRM_UNLOCKED),
+                     DRM_MASTER),
        VMW_IOCTL_DEF(VMW_CREATE_SHADER,
                      vmw_shader_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_UNREF_SHADER,
                      vmw_shader_destroy_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
                      vmw_gb_surface_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
                      vmw_gb_surface_reference_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_SYNCCPU,
                      vmw_user_dmabuf_synccpu_ioctl,
-                     DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
                      vmw_extended_context_define_ioctl,
-                     DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+                     DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -643,7 +643,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        init_waitqueue_head(&dev_priv->fence_queue);
        init_waitqueue_head(&dev_priv->fifo_queue);
        dev_priv->fence_queue_waiters = 0;
-       atomic_set(&dev_priv->fifo_queue_waiters, 0);
+       dev_priv->fifo_queue_waiters = 0;
 
        dev_priv->used_memory_size = 0;
 
@@ -752,8 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
        dev_priv->active_master = &dev_priv->fbdev_master;
 
-       dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
-                                           dev_priv->mmio_size);
+       dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
+                                      dev_priv->mmio_size, MEMREMAP_WB);
 
        if (unlikely(dev_priv->mmio_virt == NULL)) {
                ret = -ENOMEM;
@@ -907,7 +907,7 @@ out_no_irq:
 out_no_device:
        ttm_object_device_release(&dev_priv->tdev);
 out_err4:
-       iounmap(dev_priv->mmio_virt);
+       memunmap(dev_priv->mmio_virt);
 out_err3:
        vmw_ttm_global_release(dev_priv);
 out_err0:
@@ -958,7 +958,7 @@ static int vmw_driver_unload(struct drm_device *dev)
                pci_release_regions(dev->pdev);
 
        ttm_object_device_release(&dev_priv->tdev);
-       iounmap(dev_priv->mmio_virt);
+       memunmap(dev_priv->mmio_virt);
        if (dev_priv->ctx.staged_bindings)
                vmw_binding_state_free(dev_priv->ctx.staged_bindings);
        vmw_ttm_global_release(dev_priv);
@@ -1061,14 +1061,6 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
        }
        mutex_unlock(&dev->master_mutex);
 
-       /*
-        * Taking the drm_global_mutex after the TTM lock might deadlock
-        */
-       if (!(flags & DRM_UNLOCKED)) {
-               DRM_ERROR("Refusing locked ioctl access.\n");
-               return ERR_PTR(-EDEADLK);
-       }
-
        /*
         * Take the TTM lock. Possibly sleep waiting for the authenticating
         * master to become master again, or for a SIGTERM if the
@@ -1241,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
+       vmw_kms_legacy_hotspot_clear(dev_priv);
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
                drm_master_put(&vmw_fp->locked_master);