]> git.ipfire.org Git - thirdparty/linux.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
Merge drm/drm-next into drm-intel-gt-next
[thirdparty/linux.git] / drivers / gpu / drm / i915 / i915_gem.c
index b37daf9d4bd025a113bfd9016ddb693b2b207c79..55d605c0c55de50476a2a75ecf169ba811755041 100644 (file)
@@ -1039,7 +1039,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 
        if (i915_gem_object_has_pages(obj) &&
            i915_gem_object_is_tiled(obj) &&
-           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+           i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
                if (obj->mm.madv == I915_MADV_WILLNEED) {
                        GEM_BUG_ON(!i915_gem_object_has_tiling_quirk(obj));
                        i915_gem_object_clear_tiling_quirk(obj);
@@ -1089,6 +1089,43 @@ out:
        return err;
 }
 
+/*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+       while (atomic_read(&i915->mm.free_count)) {
+               flush_work(&i915->mm.free_work);
+               flush_delayed_work(&i915->bdev.wq);
+               rcu_barrier();
+       }
+}
+
+/*
+ * Similar to objects above (see i915_gem_drain_freed-objects), in general we
+ * have workers that are armed by RCU and then rearm themselves in their
+ * callbacks. To be paranoid, we need to drain the workqueue a second time after
+ * waiting for the RCU grace period so that we catch work queued via RCU from
+ * the first pass. As neither drain_workqueue() nor flush_workqueue() report a
+ * result, we make an assumption that we only don't require more than 3 passes
+ * to catch all _recursive_ RCU delayed work.
+ */
+void i915_gem_drain_workqueue(struct drm_i915_private *i915)
+{
+       int i;
+
+       for (i = 0; i < 3; i++) {
+               flush_workqueue(i915->wq);
+               rcu_barrier();
+               i915_gem_drain_freed_objects(i915);
+       }
+
+       drain_workqueue(i915->wq);
+}
+
 int i915_gem_init(struct drm_i915_private *dev_priv)
 {
        struct intel_gt *gt;
@@ -1097,8 +1134,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
        /* We need to fallback to 4K pages if host doesn't support huge gtt. */
        if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
-               mkwrite_device_info(dev_priv)->page_sizes =
-                       I915_GTT_PAGE_SIZE_4K;
+               RUNTIME_INFO(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K;
 
        ret = i915_gem_init_userptr(dev_priv);
        if (ret)
@@ -1235,7 +1271,7 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
        i915_gem_init__mm(dev_priv);
        i915_gem_init__contexts(dev_priv);
 
-       spin_lock_init(&dev_priv->fb_tracking.lock);
+       spin_lock_init(&dev_priv->display.fb_tracking.lock);
 }
 
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)