]> git.ipfire.org Git - thirdparty/openembedded/openembedded-core-contrib.git/blob
9337475c31fb621d6363a5a4bc052c2c79495d4f
[thirdparty/openembedded/openembedded-core-contrib.git] /
1 commit 4f99970852559935b27bc634318f34c18c5fd143
2 Author: Eric Anholt <eric@anholt.net>
3 Date: Tue Jul 29 12:10:39 2008 -0700
4
5 i915: Add support for MSI and interrupt mitigation.
6
7 Previous attempts at interrupt mitigation had been foiled by i915_wait_irq's
8 failure to update the sarea seqno value when the status page indicated that
9 the seqno had already been passed. MSI support has been seen to cut CPU
10 costs by up to 40% in some workloads by avoiding other expensive interrupt
11 handlers for frequent graphics interrupts.
12
13 Signed-off-by: Eric Anholt <eric@anholt.net>
14 Signed-off-by: Dave Airlie <airlied@redhat.com>
15
16 diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
17 index 53f0e5a..61ed515 100644
18 --- a/drivers/gpu/drm/drm_irq.c
19 +++ b/drivers/gpu/drm/drm_irq.c
20 @@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
21 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
22 return -EINVAL;
23
24 - p->irq = dev->irq;
25 + p->irq = dev->pdev->irq;
26
27 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
28 p->irq);
29 @@ -89,7 +89,7 @@ static int drm_irq_install(struct drm_device * dev)
30 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
31 return -EINVAL;
32
33 - if (dev->irq == 0)
34 + if (dev->pdev->irq == 0)
35 return -EINVAL;
36
37 mutex_lock(&dev->struct_mutex);
38 @@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
39 dev->irq_enabled = 1;
40 mutex_unlock(&dev->struct_mutex);
41
42 - DRM_DEBUG("irq=%d\n", dev->irq);
43 + DRM_DEBUG("irq=%d\n", dev->pdev->irq);
44
45 if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
46 init_waitqueue_head(&dev->vbl_queue);
47 @@ -127,8 +127,12 @@ static int drm_irq_install(struct drm_device * dev)
48 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
49 sh_flags = IRQF_SHARED;
50
51 - ret = request_irq(dev->irq, dev->driver->irq_handler,
52 + ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
53 sh_flags, dev->devname, dev);
54 + /* Expose the device irq number to drivers that want to export it for
55 + * whatever reason.
56 + */
57 + dev->irq = dev->pdev->irq;
58 if (ret < 0) {
59 mutex_lock(&dev->struct_mutex);
60 dev->irq_enabled = 0;
61 @@ -164,11 +168,11 @@ int drm_irq_uninstall(struct drm_device * dev)
62 if (!irq_enabled)
63 return -EINVAL;
64
65 - DRM_DEBUG("irq=%d\n", dev->irq);
66 + DRM_DEBUG("irq=%d\n", dev->pdev->irq);
67
68 dev->driver->irq_uninstall(dev);
69
70 - free_irq(dev->irq, dev);
71 + free_irq(dev->pdev->irq, dev);
72
73 dev->locked_tasklet_func = NULL;
74
75 @@ -201,7 +205,7 @@ int drm_control(struct drm_device *dev, void *data,
76 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
77 return 0;
78 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
79 - ctl->irq != dev->irq)
80 + ctl->irq != dev->pdev->irq)
81 return -EINVAL;
82 return drm_irq_install(dev);
83 case DRM_UNINST_HANDLER:
84 @@ -239,7 +243,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
85 int ret = 0;
86 unsigned int flags, seq;
87
88 - if ((!dev->irq) || (!dev->irq_enabled))
89 + if ((!dev->pdev->irq) || (!dev->irq_enabled))
90 return -EINVAL;
91
92 if (vblwait->request.type &
93 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
94 index 7be580b..10bfb0c 100644
95 --- a/drivers/gpu/drm/i915/i915_dma.c
96 +++ b/drivers/gpu/drm/i915/i915_dma.c
97 @@ -84,7 +84,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
98 * may not have been called from userspace and after dev_private
99 * is freed, it's too late.
100 */
101 - if (dev->irq)
102 + if (dev->irq_enabled)
103 drm_irq_uninstall(dev);
104
105 if (dev_priv->ring.virtual_start) {
106 @@ -644,7 +644,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
107
108 switch (param->param) {
109 case I915_PARAM_IRQ_ACTIVE:
110 - value = dev->irq ? 1 : 0;
111 + value = dev->irq_enabled;
112 break;
113 case I915_PARAM_ALLOW_BATCHBUFFER:
114 value = dev_priv->allow_batchbuffer ? 1 : 0;
115 @@ -763,6 +763,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
116 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
117 _DRM_KERNEL | _DRM_DRIVER,
118 &dev_priv->mmio_map);
119 +
120 +
121 + /* On the 945G/GM, the chipset reports the MSI capability on the
122 + * integrated graphics even though the support isn't actually there
123 + * according to the published specs. It doesn't appear to function
124 + * correctly in testing on 945G.
125 + * This may be a side effect of MSI having been made available for PEG
126 + * and the registers being closely associated.
127 + */
128 + if (!IS_I945G(dev) && !IS_I945GM(dev))
129 + pci_enable_msi(dev->pdev);
130 +
131 + spin_lock_init(&dev_priv->user_irq_lock);
132 +
133 return ret;
134 }
135
136 @@ -770,6 +784,9 @@ int i915_driver_unload(struct drm_device *dev)
137 {
138 struct drm_i915_private *dev_priv = dev->dev_private;
139
140 + if (dev->pdev->msi_enabled)
141 + pci_disable_msi(dev->pdev);
142 +
143 if (dev_priv->mmio_map)
144 drm_rmmap(dev, dev_priv->mmio_map);
145
146 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
147 index afb51a3..8daf0d8 100644
148 --- a/drivers/gpu/drm/i915/i915_drv.h
149 +++ b/drivers/gpu/drm/i915/i915_drv.h
150 @@ -105,6 +105,12 @@ typedef struct drm_i915_private {
151 wait_queue_head_t irq_queue;
152 atomic_t irq_received;
153 atomic_t irq_emitted;
154 + /** Protects user_irq_refcount and irq_mask_reg */
155 + spinlock_t user_irq_lock;
156 + /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
157 + int user_irq_refcount;
158 + /** Cached value of IMR to avoid reads in updating the bitfield */
159 + u32 irq_mask_reg;
160
161 int tex_lru_log_granularity;
162 int allow_batchbuffer;
163 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
164 index 4a2de78..24d11ed 100644
165 --- a/drivers/gpu/drm/i915/i915_irq.c
166 +++ b/drivers/gpu/drm/i915/i915_irq.c
167 @@ -33,6 +33,31 @@
168
169 #define MAX_NOPID ((u32)~0)
170
171 +/** These are the interrupts used by the driver */
172 +#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
173 + I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
174 + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
175 +
176 +static inline void
177 +i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
178 +{
179 + if ((dev_priv->irq_mask_reg & mask) != 0) {
180 + dev_priv->irq_mask_reg &= ~mask;
181 + I915_WRITE(IMR, dev_priv->irq_mask_reg);
182 + (void) I915_READ(IMR);
183 + }
184 +}
185 +
186 +static inline void
187 +i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
188 +{
189 + if ((dev_priv->irq_mask_reg & mask) != mask) {
190 + dev_priv->irq_mask_reg |= mask;
191 + I915_WRITE(IMR, dev_priv->irq_mask_reg);
192 + (void) I915_READ(IMR);
193 + }
194 +}
195 +
196 /**
197 * Emit blits for scheduled buffer swaps.
198 *
199 @@ -229,46 +254,50 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
200 {
201 struct drm_device *dev = (struct drm_device *) arg;
202 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
203 - u16 temp;
204 u32 pipea_stats, pipeb_stats;
205 + u32 iir;
206
207 pipea_stats = I915_READ(PIPEASTAT);
208 pipeb_stats = I915_READ(PIPEBSTAT);
209
210 - temp = I915_READ16(IIR);
211 -
212 - temp &= (I915_USER_INTERRUPT |
213 - I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
214 - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT);
215 + if (dev->pdev->msi_enabled)
216 + I915_WRITE(IMR, ~0);
217 + iir = I915_READ(IIR);
218
219 - DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
220 + DRM_DEBUG("iir=%08x\n", iir);
221
222 - if (temp == 0)
223 + if (iir == 0) {
224 + if (dev->pdev->msi_enabled) {
225 + I915_WRITE(IMR, dev_priv->irq_mask_reg);
226 + (void) I915_READ(IMR);
227 + }
228 return IRQ_NONE;
229 + }
230
231 - I915_WRITE16(IIR, temp);
232 - (void) I915_READ16(IIR);
233 - DRM_READMEMORYBARRIER();
234 + I915_WRITE(IIR, iir);
235 + if (dev->pdev->msi_enabled)
236 + I915_WRITE(IMR, dev_priv->irq_mask_reg);
237 + (void) I915_READ(IIR); /* Flush posted writes */
238
239 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
240
241 - if (temp & I915_USER_INTERRUPT)
242 + if (iir & I915_USER_INTERRUPT)
243 DRM_WAKEUP(&dev_priv->irq_queue);
244
245 - if (temp & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
246 - I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
247 + if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
248 + I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
249 int vblank_pipe = dev_priv->vblank_pipe;
250
251 if ((vblank_pipe &
252 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
253 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
254 - if (temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
255 + if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
256 atomic_inc(&dev->vbl_received);
257 - if (temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
258 + if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
259 atomic_inc(&dev->vbl_received2);
260 - } else if (((temp & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
261 + } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
262 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
263 - ((temp & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
264 + ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
265 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
266 atomic_inc(&dev->vbl_received);
267
268 @@ -314,6 +343,27 @@ static int i915_emit_irq(struct drm_device * dev)
269 return dev_priv->counter;
270 }
271
272 +static void i915_user_irq_get(struct drm_device *dev)
273 +{
274 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
275 +
276 + spin_lock(&dev_priv->user_irq_lock);
277 + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
278 + i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
279 + spin_unlock(&dev_priv->user_irq_lock);
280 +}
281 +
282 +static void i915_user_irq_put(struct drm_device *dev)
283 +{
284 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
285 +
286 + spin_lock(&dev_priv->user_irq_lock);
287 + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
288 + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
289 + i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
290 + spin_unlock(&dev_priv->user_irq_lock);
291 +}
292 +
293 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
294 {
295 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
296 @@ -322,13 +372,17 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
297 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
298 READ_BREADCRUMB(dev_priv));
299
300 - if (READ_BREADCRUMB(dev_priv) >= irq_nr)
301 + if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
302 + dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
303 return 0;
304 + }
305
306 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
307
308 + i915_user_irq_get(dev);
309 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
310 READ_BREADCRUMB(dev_priv) >= irq_nr);
311 + i915_user_irq_put(dev);
312
313 if (ret == -EBUSY) {
314 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
315 @@ -413,20 +467,6 @@ int i915_irq_wait(struct drm_device *dev, void *data,
316 return i915_wait_irq(dev, irqwait->irq_seq);
317 }
318
319 -static void i915_enable_interrupt (struct drm_device *dev)
320 -{
321 - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
322 - u16 flag;
323 -
324 - flag = 0;
325 - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
326 - flag |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
327 - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
328 - flag |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
329 -
330 - I915_WRITE16(IER, I915_USER_INTERRUPT | flag);
331 -}
332 -
333 /* Set the vblank monitor pipe
334 */
335 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
336 @@ -434,6 +474,7 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
337 {
338 drm_i915_private_t *dev_priv = dev->dev_private;
339 drm_i915_vblank_pipe_t *pipe = data;
340 + u32 enable_mask = 0, disable_mask = 0;
341
342 if (!dev_priv) {
343 DRM_ERROR("called with no initialization\n");
344 @@ -445,9 +486,20 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
345 return -EINVAL;
346 }
347
348 - dev_priv->vblank_pipe = pipe->pipe;
349 + if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
350 + enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
351 + else
352 + disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
353 +
354 + if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
355 + enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
356 + else
357 + disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
358
359 - i915_enable_interrupt (dev);
360 + i915_enable_irq(dev_priv, enable_mask);
361 + i915_disable_irq(dev_priv, disable_mask);
362 +
363 + dev_priv->vblank_pipe = pipe->pipe;
364
365 return 0;
366 }
367 @@ -464,7 +516,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
368 return -EINVAL;
369 }
370
371 - flag = I915_READ(IER);
372 + flag = I915_READ(IMR);
373 pipe->pipe = 0;
374 if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
375 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
376 @@ -586,9 +638,9 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
377 {
378 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
379
380 - I915_WRITE16(HWSTAM, 0xfffe);
381 - I915_WRITE16(IMR, 0x0);
382 - I915_WRITE16(IER, 0x0);
383 + I915_WRITE(HWSTAM, 0xfffe);
384 + I915_WRITE(IMR, 0x0);
385 + I915_WRITE(IER, 0x0);
386 }
387
388 void i915_driver_irq_postinstall(struct drm_device * dev)
389 @@ -601,7 +653,18 @@ void i915_driver_irq_postinstall(struct drm_device * dev)
390
391 if (!dev_priv->vblank_pipe)
392 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
393 - i915_enable_interrupt(dev);
394 +
395 + /* Set initial unmasked IRQs to just the selected vblank pipes. */
396 + dev_priv->irq_mask_reg = ~0;
397 + if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
398 + dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
399 + if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
400 + dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
401 +
402 + I915_WRITE(IMR, dev_priv->irq_mask_reg);
403 + I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
404 + (void) I915_READ(IER);
405 +
406 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
407 }
408
409 @@ -613,10 +676,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
410 if (!dev_priv)
411 return;
412
413 - I915_WRITE16(HWSTAM, 0xffff);
414 - I915_WRITE16(IMR, 0xffff);
415 - I915_WRITE16(IER, 0x0);
416 + I915_WRITE(HWSTAM, 0xffff);
417 + I915_WRITE(IMR, 0xffff);
418 + I915_WRITE(IER, 0x0);
419
420 - temp = I915_READ16(IIR);
421 - I915_WRITE16(IIR, temp);
422 + temp = I915_READ(IIR);
423 + I915_WRITE(IIR, temp);
424 }