]> git.ipfire.org Git - people/ms/linux.git/commitdiff
drm/i915: Flatten obj->mm.lock
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tue, 23 Mar 2021 15:50:06 +0000 (16:50 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 24 Mar 2021 16:27:19 +0000 (17:27 +0100)
With userptr fixed, there is no need for all separate lockdep classes
now, and we can remove all lockdep tricks used. A trylock in the
shrinker is all we need now to flatten the locking hierarchy.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
[danvet: Resolve conflict because we don't have the patch from Chris
to rebrand i915_gem_shrinker_taints_mutex to fs_reclaim_taints_mutex.
It's not a bad idea, but if we do it, it should be moved to the right
header. See
https://lore.kernel.org/intel-gfx/20210202154318.19246-1-chris@chris-wilson.co.uk/]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-18-maarten.lankhorst@linux.intel.com
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c

index b7fe7e4f69e4170d133af88f1819d402b9e6f762..821cb40f8d73c239b0761522ac36d2e61e9e2dd5 100644 (file)
@@ -62,7 +62,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops,
                          struct lock_class_key *key, unsigned flags)
 {
-       __mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key);
+       mutex_init(&obj->mm.lock);
 
        spin_lock_init(&obj->vma.lock);
        INIT_LIST_HEAD(&obj->vma.list);
@@ -86,10 +86,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        mutex_init(&obj->mm.get_page.lock);
        INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->mm.get_dma_page.lock);
-
-       if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
-               i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
-                                              &obj->mm.lock);
 }
 
 /**
index 3dd470c612949c1fc94895849eb37e520cb549f6..7f4b3062d0dbc61af6e483cc75ad6dde1d2e5136 100644 (file)
@@ -346,27 +346,10 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
-       I915_MM_NORMAL = 0,
-       /*
-        * Only used by struct_mutex, when called "recursively" from
-        * direct-reclaim-esque. Safe because there is only every one
-        * struct_mutex in the entire system.
-        */
-       I915_MM_SHRINKER = 1,
-       /*
-        * Used for obj->mm.lock when allocating pages. Safe because the object
-        * isn't yet on any LRU, and therefore the shrinker can't deadlock on
-        * it. As soon as the object has pages, obj->mm.lock nests within
-        * fs_reclaim.
-        */
-       I915_MM_GET_PAGES = 1,
-};
-
 static inline int __must_check
 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-       might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+       might_lock(&obj->mm.lock);
 
        if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
                return 0;
@@ -410,6 +393,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 }
 
 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj);
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 
index e7d7650072c51e57946ccb0e51eb1900f47c69ba..e947d4c0da1fc018ac14f99fc3b6712b2f561560 100644 (file)
@@ -114,7 +114,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
        int err;
 
-       err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+       err = mutex_lock_interruptible(&obj->mm.lock);
        if (err)
                return err;
 
@@ -196,21 +196,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
        return pages;
 }
 
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj)
 {
        struct sg_table *pages;
-       int err;
 
        if (i915_gem_object_has_pinned_pages(obj))
                return -EBUSY;
 
-       /* May be called by shrinker from within get_pages() (on another bo) */
-       mutex_lock(&obj->mm.lock);
-       if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
-               err = -EBUSY;
-               goto unlock;
-       }
-
        i915_gem_object_release_mmap_offset(obj);
 
        /*
@@ -226,14 +218,22 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
         * get_pages backends we should be better able to handle the
         * cancellation of the async task in a more uniform manner.
         */
-       if (!pages)
-               pages = ERR_PTR(-EINVAL);
-
-       if (!IS_ERR(pages))
+       if (!IS_ERR_OR_NULL(pages))
                obj->ops->put_pages(obj, pages);
 
-       err = 0;
-unlock:
+       return 0;
+}
+
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       if (i915_gem_object_has_pinned_pages(obj))
+               return -EBUSY;
+
+       /* May be called by shrinker from within get_pages() (on another bo) */
+       mutex_lock(&obj->mm.lock);
+       err = __i915_gem_object_put_pages_locked(obj);
        mutex_unlock(&obj->mm.lock);
 
        return err;
@@ -341,7 +341,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
            !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
                return ERR_PTR(-ENXIO);
 
-       err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+       err = mutex_lock_interruptible(&obj->mm.lock);
        if (err)
                return ERR_PTR(err);
 
index 06c481ff79d866e6f0719b53636d03d3959afa20..44329c435cf18001f1a9c6fa277b1246a73768cb 100644 (file)
@@ -236,7 +236,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
        if (err)
                return err;
 
-       err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+       err = mutex_lock_interruptible(&obj->mm.lock);
        if (err)
                goto err_unlock;
 
index c2dba1cd9532becb1a4ae4210442a9c250d32bec..11b8d4e69fc71e6e8882a6270376444099968487 100644 (file)
@@ -49,9 +49,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
                flags = I915_GEM_OBJECT_UNBIND_TEST;
 
        if (i915_gem_object_unbind(obj, flags) == 0)
-               __i915_gem_object_put_pages(obj);
+               return true;
 
-       return !i915_gem_object_has_pages(obj);
+       return false;
 }
 
 static void try_to_writeback(struct drm_i915_gem_object *obj,
@@ -200,10 +200,10 @@ i915_gem_shrink(struct drm_i915_private *i915,
 
                        spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 
-                       if (unsafe_drop_pages(obj, shrink)) {
+                       if (unsafe_drop_pages(obj, shrink) &&
+                           mutex_trylock(&obj->mm.lock)) {
                                /* May arrive from get_pages on another bo */
-                               mutex_lock(&obj->mm.lock);
-                               if (!i915_gem_object_has_pages(obj)) {
+                               if (!__i915_gem_object_put_pages_locked(obj)) {
                                        try_to_writeback(obj, shrink);
                                        count += obj->base.size >> PAGE_SHIFT;
                                }
index ec8f525a6d8ad74bc46e17656dcac997ec5dfd83..f9209ebe27e4ce34bd0a3c1c806449038f31765d 100644 (file)
@@ -253,7 +253,7 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool
        if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
                return -EBUSY;
 
-       mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+       mutex_lock(&obj->mm.lock);
 
        pages = __i915_gem_object_unset_pages(obj);
        if (!IS_ERR_OR_NULL(pages))