]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/2.6.32.9/drm-i915-update-write_domains-on-active-list-after-flush.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 2.6.32.9 / drm-i915-update-write_domains-on-active-list-after-flush.patch
1 From 99fcb766a3a50466fe31d743260a3400c1aee855 Mon Sep 17 00:00:00 2001
2 From: Daniel Vetter <daniel.vetter@ffwll.ch>
3 Date: Sun, 7 Feb 2010 16:20:18 +0100
4 Subject: drm/i915: Update write_domains on active list after flush.
5
6 From: Daniel Vetter <daniel.vetter@ffwll.ch>
7
8 commit 99fcb766a3a50466fe31d743260a3400c1aee855 upstream.
9
10 Before changing the status of a buffer with a pending write we will await
11 upon a new flush for that buffer. So we can take advantage of any flushes
12 posted whilst the buffer is active and pending processing by the GPU, by
13 clearing its write_domain and updating its last_rendering_seqno -- thus
14 saving a potential flush in deep queues and improves flushing behaviour
15 upon eviction for both GTT space and fences.
16
17 In order to reduce the time spent searching the active list for matching
18 write_domains, we move those to a separate list whose elements are
19 the buffers belong to the active/flushing list with pending writes.
20
21 Orignal patch by Chris Wilson <chris@chris-wilson.co.uk>, forward-ported
22 by me.
23
24 In addition to better performance, this also fixes a real bug. Before
25 this changes, i915_gem_evict_everything didn't work as advertised. When
26 the gpu was actually busy and processing request, the flush and subsequent
27 wait would not move active and dirty buffers to the inactive list, but
28 just to the flushing list. Which triggered the BUG_ON at the end of this
29 function. With the more tight dirty buffer tracking, all currently busy and
30 dirty buffers get moved to the inactive list by one i915_gem_flush operation.
31
32 I've left the BUG_ON I've used to prove this in there.
33
34 References:
35 Bug 25911 - 2.10.0 causes kernel oops and system hangs
36 http://bugs.freedesktop.org/show_bug.cgi?id=25911
37
38 Bug 26101 - [i915] xf86-video-intel 2.10.0 (and git) triggers kernel oops
39 within seconds after login
40 http://bugs.freedesktop.org/show_bug.cgi?id=26101
41
42 Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
43 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
44 Tested-by: Adam Lantos <hege@playma.org>
45 Signed-off-by: Eric Anholt <eric@anholt.net>
46 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
47
48 ---
49 drivers/gpu/drm/i915/i915_drv.h | 11 +++++++++++
50 drivers/gpu/drm/i915/i915_gem.c | 23 +++++++++++++++++++----
51 2 files changed, 30 insertions(+), 4 deletions(-)
52
53 --- a/drivers/gpu/drm/i915/i915_drv.h
54 +++ b/drivers/gpu/drm/i915/i915_drv.h
55 @@ -467,6 +467,15 @@ typedef struct drm_i915_private {
56 struct list_head flushing_list;
57
58 /**
59 + * List of objects currently pending a GPU write flush.
60 + *
61 + * All elements on this list will belong to either the
62 + * active_list or flushing_list, last_rendering_seqno can
63 + * be used to differentiate between the two elements.
64 + */
65 + struct list_head gpu_write_list;
66 +
67 + /**
68 * LRU list of objects which are not in the ringbuffer and
69 * are ready to unbind, but are still in the GTT.
70 *
71 @@ -558,6 +567,8 @@ struct drm_i915_gem_object {
72
73 /** This object's place on the active/flushing/inactive lists */
74 struct list_head list;
75 + /** This object's place on GPU write list */
76 + struct list_head gpu_write_list;
77
78 /** This object's place on the fenced object LRU */
79 struct list_head fence_list;
80 --- a/drivers/gpu/drm/i915/i915_gem.c
81 +++ b/drivers/gpu/drm/i915/i915_gem.c
82 @@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct
83 else
84 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
85
86 + BUG_ON(!list_empty(&obj_priv->gpu_write_list));
87 +
88 obj_priv->last_rendering_seqno = 0;
89 if (obj_priv->active) {
90 obj_priv->active = 0;
91 @@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev,
92 struct drm_i915_gem_object *obj_priv, *next;
93
94 list_for_each_entry_safe(obj_priv, next,
95 - &dev_priv->mm.flushing_list, list) {
96 + &dev_priv->mm.gpu_write_list,
97 + gpu_write_list) {
98 struct drm_gem_object *obj = obj_priv->obj;
99
100 if ((obj->write_domain & flush_domains) ==
101 @@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev,
102 uint32_t old_write_domain = obj->write_domain;
103
104 obj->write_domain = 0;
105 + list_del_init(&obj_priv->gpu_write_list);
106 i915_gem_object_move_to_active(obj, seqno);
107
108 trace_i915_gem_object_change_domain(obj,
109 @@ -2073,8 +2077,8 @@ static int
110 i915_gem_evict_everything(struct drm_device *dev)
111 {
112 drm_i915_private_t *dev_priv = dev->dev_private;
113 - uint32_t seqno;
114 int ret;
115 + uint32_t seqno;
116 bool lists_empty;
117
118 spin_lock(&dev_priv->mm.active_list_lock);
119 @@ -2096,6 +2100,8 @@ i915_gem_evict_everything(struct drm_dev
120 if (ret)
121 return ret;
122
123 + BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
124 +
125 ret = i915_gem_evict_from_inactive_list(dev);
126 if (ret)
127 return ret;
128 @@ -2690,7 +2696,7 @@ i915_gem_object_flush_gpu_write_domain(s
129 old_write_domain = obj->write_domain;
130 i915_gem_flush(dev, 0, obj->write_domain);
131 seqno = i915_add_request(dev, NULL, obj->write_domain);
132 - obj->write_domain = 0;
133 + BUG_ON(obj->write_domain);
134 i915_gem_object_move_to_active(obj, seqno);
135
136 trace_i915_gem_object_change_domain(obj,
137 @@ -3710,16 +3716,23 @@ i915_gem_execbuffer(struct drm_device *d
138 i915_gem_flush(dev,
139 dev->invalidate_domains,
140 dev->flush_domains);
141 - if (dev->flush_domains)
142 + if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
143 (void)i915_add_request(dev, file_priv,
144 dev->flush_domains);
145 }
146
147 for (i = 0; i < args->buffer_count; i++) {
148 struct drm_gem_object *obj = object_list[i];
149 + struct drm_i915_gem_object *obj_priv = obj->driver_private;
150 uint32_t old_write_domain = obj->write_domain;
151
152 obj->write_domain = obj->pending_write_domain;
153 + if (obj->write_domain)
154 + list_move_tail(&obj_priv->gpu_write_list,
155 + &dev_priv->mm.gpu_write_list);
156 + else
157 + list_del_init(&obj_priv->gpu_write_list);
158 +
159 trace_i915_gem_object_change_domain(obj,
160 obj->read_domains,
161 old_write_domain);
162 @@ -4112,6 +4125,7 @@ int i915_gem_init_object(struct drm_gem_
163 obj_priv->obj = obj;
164 obj_priv->fence_reg = I915_FENCE_REG_NONE;
165 INIT_LIST_HEAD(&obj_priv->list);
166 + INIT_LIST_HEAD(&obj_priv->gpu_write_list);
167 INIT_LIST_HEAD(&obj_priv->fence_list);
168 obj_priv->madv = I915_MADV_WILLNEED;
169
170 @@ -4563,6 +4577,7 @@ i915_gem_load(struct drm_device *dev)
171 spin_lock_init(&dev_priv->mm.active_list_lock);
172 INIT_LIST_HEAD(&dev_priv->mm.active_list);
173 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
174 + INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
175 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
176 INIT_LIST_HEAD(&dev_priv->mm.request_list);
177 INIT_LIST_HEAD(&dev_priv->mm.fence_list);