]>
Commit | Line | Data |
---|---|---|
9c065a7d DV |
1 | /* |
2 | * Copyright © 2012-2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eugeni Dodonov <eugeni.dodonov@intel.com> | |
25 | * Daniel Vetter <daniel.vetter@ffwll.ch> | |
26 | * | |
27 | */ | |
28 | ||
29 | #include <linux/pm_runtime.h> | |
30 | #include <linux/vgaarb.h> | |
31 | ||
bd780f37 CW |
32 | #include <drm/drm_print.h> |
33 | ||
9c065a7d | 34 | #include "i915_drv.h" |
9c065a7d | 35 | |
e4e7684f DV |
36 | /** |
37 | * DOC: runtime pm | |
38 | * | |
39 | * The i915 driver supports dynamic enabling and disabling of entire hardware | |
40 | * blocks at runtime. This is especially important on the display side where | |
41 | * software is supposed to control many power gates manually on recent hardware, | |
42 | * since on the GT side a lot of the power management is done by the hardware. | |
43 | * But even there some manual control at the device level is required. | |
44 | * | |
45 | * Since i915 supports a diverse set of platforms with a unified codebase and | |
46 | * hardware engineers just love to shuffle functionality around between power | |
47 | * domains there's a sizeable amount of indirection required. This file provides | |
48 | * generic functions to the driver for grabbing and releasing references for | |
49 | * abstract power domains. It then maps those to the actual power wells | |
50 | * present for a given platform. | |
51 | */ | |
52 | ||
bd780f37 CW |
53 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
54 | ||
55 | #include <linux/sort.h> | |
56 | ||
57 | #define STACKDEPTH 8 | |
58 | ||
59 | static noinline depot_stack_handle_t __save_depot_stack(void) | |
60 | { | |
61 | unsigned long entries[STACKDEPTH]; | |
487f3c7f | 62 | unsigned int n; |
bd780f37 | 63 | |
487f3c7f TG |
64 | n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); |
65 | return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN); | |
bd780f37 CW |
66 | } |
67 | ||
68 | static void __print_depot_stack(depot_stack_handle_t stack, | |
69 | char *buf, int sz, int indent) | |
70 | { | |
487f3c7f TG |
71 | unsigned long *entries; |
72 | unsigned int nr_entries; | |
bd780f37 | 73 | |
487f3c7f TG |
74 | nr_entries = stack_depot_fetch(stack, &entries); |
75 | stack_trace_snprint(buf, sz, entries, nr_entries, indent); | |
bd780f37 CW |
76 | } |
77 | ||
78 | static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | |
79 | { | |
80 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
81 | ||
82 | spin_lock_init(&rpm->debug.lock); | |
83 | } | |
84 | ||
16e4dd03 | 85 | static noinline depot_stack_handle_t |
bd780f37 CW |
86 | track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) |
87 | { | |
88 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
89 | depot_stack_handle_t stack, *stacks; | |
90 | unsigned long flags; | |
91 | ||
bd780f37 | 92 | if (!HAS_RUNTIME_PM(i915)) |
16e4dd03 | 93 | return -1; |
bd780f37 CW |
94 | |
95 | stack = __save_depot_stack(); | |
96 | if (!stack) | |
16e4dd03 | 97 | return -1; |
bd780f37 CW |
98 | |
99 | spin_lock_irqsave(&rpm->debug.lock, flags); | |
100 | ||
101 | if (!rpm->debug.count) | |
102 | rpm->debug.last_acquire = stack; | |
103 | ||
104 | stacks = krealloc(rpm->debug.owners, | |
105 | (rpm->debug.count + 1) * sizeof(*stacks), | |
106 | GFP_NOWAIT | __GFP_NOWARN); | |
107 | if (stacks) { | |
108 | stacks[rpm->debug.count++] = stack; | |
109 | rpm->debug.owners = stacks; | |
16e4dd03 CW |
110 | } else { |
111 | stack = -1; | |
bd780f37 CW |
112 | } |
113 | ||
114 | spin_unlock_irqrestore(&rpm->debug.lock, flags); | |
16e4dd03 CW |
115 | |
116 | return stack; | |
117 | } | |
118 | ||
4547c255 ID |
119 | static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915, |
120 | depot_stack_handle_t stack) | |
16e4dd03 CW |
121 | { |
122 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
123 | unsigned long flags, n; | |
124 | bool found = false; | |
125 | ||
126 | if (unlikely(stack == -1)) | |
127 | return; | |
128 | ||
129 | spin_lock_irqsave(&rpm->debug.lock, flags); | |
130 | for (n = rpm->debug.count; n--; ) { | |
131 | if (rpm->debug.owners[n] == stack) { | |
132 | memmove(rpm->debug.owners + n, | |
133 | rpm->debug.owners + n + 1, | |
134 | (--rpm->debug.count - n) * sizeof(stack)); | |
135 | found = true; | |
136 | break; | |
137 | } | |
138 | } | |
139 | spin_unlock_irqrestore(&rpm->debug.lock, flags); | |
140 | ||
141 | if (WARN(!found, | |
142 | "Unmatched wakeref (tracking %lu), count %u\n", | |
143 | rpm->debug.count, atomic_read(&rpm->wakeref_count))) { | |
144 | char *buf; | |
145 | ||
2e1e5c55 | 146 | buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); |
16e4dd03 CW |
147 | if (!buf) |
148 | return; | |
149 | ||
150 | __print_depot_stack(stack, buf, PAGE_SIZE, 2); | |
151 | DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf); | |
152 | ||
153 | stack = READ_ONCE(rpm->debug.last_release); | |
154 | if (stack) { | |
155 | __print_depot_stack(stack, buf, PAGE_SIZE, 2); | |
156 | DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf); | |
157 | } | |
158 | ||
159 | kfree(buf); | |
160 | } | |
bd780f37 CW |
161 | } |
162 | ||
163 | static int cmphandle(const void *_a, const void *_b) | |
164 | { | |
165 | const depot_stack_handle_t * const a = _a, * const b = _b; | |
166 | ||
167 | if (*a < *b) | |
168 | return -1; | |
169 | else if (*a > *b) | |
170 | return 1; | |
171 | else | |
172 | return 0; | |
173 | } | |
174 | ||
175 | static void | |
176 | __print_intel_runtime_pm_wakeref(struct drm_printer *p, | |
177 | const struct intel_runtime_pm_debug *dbg) | |
178 | { | |
179 | unsigned long i; | |
180 | char *buf; | |
181 | ||
2e1e5c55 | 182 | buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN); |
bd780f37 CW |
183 | if (!buf) |
184 | return; | |
185 | ||
186 | if (dbg->last_acquire) { | |
187 | __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2); | |
188 | drm_printf(p, "Wakeref last acquired:\n%s", buf); | |
189 | } | |
190 | ||
191 | if (dbg->last_release) { | |
192 | __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2); | |
193 | drm_printf(p, "Wakeref last released:\n%s", buf); | |
194 | } | |
195 | ||
196 | drm_printf(p, "Wakeref count: %lu\n", dbg->count); | |
197 | ||
198 | sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL); | |
199 | ||
200 | for (i = 0; i < dbg->count; i++) { | |
201 | depot_stack_handle_t stack = dbg->owners[i]; | |
202 | unsigned long rep; | |
203 | ||
204 | rep = 1; | |
205 | while (i + 1 < dbg->count && dbg->owners[i + 1] == stack) | |
206 | rep++, i++; | |
207 | __print_depot_stack(stack, buf, PAGE_SIZE, 2); | |
208 | drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf); | |
209 | } | |
210 | ||
211 | kfree(buf); | |
212 | } | |
213 | ||
214 | static noinline void | |
dbf99c1f ID |
215 | __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, |
216 | struct intel_runtime_pm_debug *saved) | |
217 | { | |
218 | *saved = *debug; | |
219 | ||
220 | debug->owners = NULL; | |
221 | debug->count = 0; | |
222 | debug->last_release = __save_depot_stack(); | |
223 | } | |
224 | ||
225 | static void | |
226 | dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) | |
227 | { | |
228 | struct drm_printer p; | |
229 | ||
230 | if (!debug->count) | |
231 | return; | |
232 | ||
233 | p = drm_debug_printer("i915"); | |
234 | __print_intel_runtime_pm_wakeref(&p, debug); | |
235 | ||
236 | kfree(debug->owners); | |
237 | } | |
238 | ||
bd780f37 | 239 | static noinline void |
4547c255 | 240 | __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915) |
bd780f37 CW |
241 | { |
242 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
243 | struct intel_runtime_pm_debug dbg = {}; | |
bd780f37 CW |
244 | unsigned long flags; |
245 | ||
dbf99c1f ID |
246 | if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count, |
247 | &rpm->debug.lock, | |
248 | flags)) | |
249 | return; | |
bd780f37 | 250 | |
dbf99c1f ID |
251 | __untrack_all_wakerefs(&rpm->debug, &dbg); |
252 | spin_unlock_irqrestore(&rpm->debug.lock, flags); | |
bd780f37 | 253 | |
dbf99c1f ID |
254 | dump_and_free_wakeref_tracking(&dbg); |
255 | } | |
bd780f37 | 256 | |
dbf99c1f ID |
257 | static noinline void |
258 | untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915) | |
259 | { | |
260 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
261 | struct intel_runtime_pm_debug dbg = {}; | |
262 | unsigned long flags; | |
bd780f37 | 263 | |
dbf99c1f ID |
264 | spin_lock_irqsave(&rpm->debug.lock, flags); |
265 | __untrack_all_wakerefs(&rpm->debug, &dbg); | |
266 | spin_unlock_irqrestore(&rpm->debug.lock, flags); | |
267 | ||
268 | dump_and_free_wakeref_tracking(&dbg); | |
bd780f37 CW |
269 | } |
270 | ||
271 | void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915, | |
272 | struct drm_printer *p) | |
273 | { | |
274 | struct intel_runtime_pm_debug dbg = {}; | |
275 | ||
276 | do { | |
277 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
278 | unsigned long alloc = dbg.count; | |
279 | depot_stack_handle_t *s; | |
280 | ||
281 | spin_lock_irq(&rpm->debug.lock); | |
282 | dbg.count = rpm->debug.count; | |
283 | if (dbg.count <= alloc) { | |
284 | memcpy(dbg.owners, | |
285 | rpm->debug.owners, | |
286 | dbg.count * sizeof(*s)); | |
287 | } | |
288 | dbg.last_acquire = rpm->debug.last_acquire; | |
289 | dbg.last_release = rpm->debug.last_release; | |
290 | spin_unlock_irq(&rpm->debug.lock); | |
291 | if (dbg.count <= alloc) | |
292 | break; | |
293 | ||
2e1e5c55 CW |
294 | s = krealloc(dbg.owners, |
295 | dbg.count * sizeof(*s), | |
296 | GFP_NOWAIT | __GFP_NOWARN); | |
bd780f37 CW |
297 | if (!s) |
298 | goto out; | |
299 | ||
300 | dbg.owners = s; | |
301 | } while (1); | |
302 | ||
303 | __print_intel_runtime_pm_wakeref(p, &dbg); | |
304 | ||
305 | out: | |
306 | kfree(dbg.owners); | |
307 | } | |
308 | ||
309 | #else | |
310 | ||
311 | static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | |
312 | { | |
313 | } | |
314 | ||
16e4dd03 CW |
315 | static depot_stack_handle_t |
316 | track_intel_runtime_pm_wakeref(struct drm_i915_private *i915) | |
bd780f37 | 317 | { |
16e4dd03 | 318 | return -1; |
bd780f37 CW |
319 | } |
320 | ||
4547c255 ID |
321 | static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915, |
322 | intel_wakeref_t wref) | |
323 | { | |
324 | } | |
325 | ||
326 | static void | |
327 | __intel_wakeref_dec_and_check_tracking(struct drm_i915_private *i915) | |
bd780f37 | 328 | { |
bd780f37 CW |
329 | atomic_dec(&i915->runtime_pm.wakeref_count); |
330 | } | |
331 | ||
dbf99c1f ID |
332 | static void |
333 | untrack_all_intel_runtime_pm_wakerefs(struct drm_i915_private *i915) | |
334 | { | |
335 | } | |
336 | ||
bd780f37 CW |
337 | #endif |
338 | ||
4547c255 ID |
339 | static void |
340 | intel_runtime_pm_acquire(struct drm_i915_private *i915, bool wakelock) | |
341 | { | |
342 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
343 | ||
344 | if (wakelock) { | |
345 | atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); | |
346 | assert_rpm_wakelock_held(i915); | |
347 | } else { | |
348 | atomic_inc(&rpm->wakeref_count); | |
349 | assert_rpm_raw_wakeref_held(i915); | |
350 | } | |
351 | } | |
352 | ||
353 | static void | |
354 | intel_runtime_pm_release(struct drm_i915_private *i915, int wakelock) | |
355 | { | |
356 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
357 | ||
358 | if (wakelock) { | |
359 | assert_rpm_wakelock_held(i915); | |
360 | atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count); | |
361 | } else { | |
362 | assert_rpm_raw_wakeref_held(i915); | |
363 | } | |
364 | ||
365 | __intel_wakeref_dec_and_check_tracking(i915); | |
366 | } | |
367 | ||
7645b19d DCS |
368 | static intel_wakeref_t __intel_runtime_pm_get(struct drm_i915_private *i915, |
369 | bool wakelock) | |
dcddab3a | 370 | { |
7645b19d DCS |
371 | struct pci_dev *pdev = i915->drm.pdev; |
372 | struct device *kdev = &pdev->dev; | |
373 | int ret; | |
dcddab3a | 374 | |
7645b19d DCS |
375 | ret = pm_runtime_get_sync(kdev); |
376 | WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); | |
b409ca95 | 377 | |
7645b19d | 378 | intel_runtime_pm_acquire(i915, wakelock); |
b409ca95 | 379 | |
7645b19d | 380 | return track_intel_runtime_pm_wakeref(i915); |
b409ca95 ID |
381 | } |
382 | ||
e4e7684f | 383 | /** |
7645b19d DCS |
384 | * intel_runtime_pm_get_raw - grab a raw runtime pm reference |
385 | * @i915: i915 device instance | |
e4e7684f DV |
386 | * |
387 | * This is the unlocked version of intel_display_power_is_enabled() and should | |
388 | * only be used from error capture and recovery code where deadlocks are | |
389 | * possible. | |
7645b19d DCS |
390 | * This function grabs a device-level runtime pm reference (mostly used for |
391 | * asynchronous PM management from display code) and ensures that it is powered | |
392 | * up. Raw references are not considered during wakelock assert checks. | |
e4e7684f | 393 | * |
7645b19d DCS |
394 | * Any runtime pm reference obtained by this function must have a symmetric |
395 | * call to intel_runtime_pm_put_raw() to release the reference again. | |
396 | * | |
397 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates | |
398 | * as True if the wakeref was acquired, or False otherwise. | |
e4e7684f | 399 | */ |
9c065a7d | 400 | |
7645b19d DCS |
401 | intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) |
402 | { | |
403 | return __intel_runtime_pm_get(i915, false); | |
9c065a7d DV |
404 | } |
405 | ||
e4e7684f | 406 | /** |
7645b19d DCS |
407 | * intel_runtime_pm_get - grab a runtime pm reference |
408 | * @i915: i915 device instance | |
e4e7684f | 409 | * |
7645b19d DCS |
410 | * This function grabs a device-level runtime pm reference (mostly used for GEM |
411 | * code to ensure the GTT or GT is on) and ensures that it is powered up. | |
e4e7684f | 412 | * |
7645b19d DCS |
413 | * Any runtime pm reference obtained by this function must have a symmetric |
414 | * call to intel_runtime_pm_put() to release the reference again. | |
e4e7684f | 415 | * |
7645b19d | 416 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put() |
e4e7684f | 417 | */ |
7645b19d | 418 | intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) |
9c065a7d | 419 | { |
7645b19d | 420 | return __intel_runtime_pm_get(i915, true); |
9c065a7d DV |
421 | } |
422 | ||
7645b19d DCS |
423 | /** |
424 | * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use | |
425 | * @i915: i915 device instance | |
426 | * | |
427 | * This function grabs a device-level runtime pm reference if the device is | |
428 | * already in use and ensures that it is powered up. It is illegal to try | |
429 | * and access the HW should intel_runtime_pm_get_if_in_use() report failure. | |
430 | * | |
431 | * Any runtime pm reference obtained by this function must have a symmetric | |
432 | * call to intel_runtime_pm_put() to release the reference again. | |
433 | * | |
434 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates | |
435 | * as True if the wakeref was acquired, or False otherwise. | |
9c065a7d | 436 | */ |
7645b19d | 437 | intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) |
9c065a7d | 438 | { |
7645b19d DCS |
439 | if (IS_ENABLED(CONFIG_PM)) { |
440 | struct pci_dev *pdev = i915->drm.pdev; | |
441 | struct device *kdev = &pdev->dev; | |
1af474fe | 442 | |
b2891eb2 | 443 | /* |
7645b19d DCS |
444 | * In cases runtime PM is disabled by the RPM core and we get |
445 | * an -EINVAL return value we are not supposed to call this | |
446 | * function, since the power state is undefined. This applies | |
447 | * atm to the late/early system suspend/resume handlers. | |
b2891eb2 | 448 | */ |
7645b19d DCS |
449 | if (pm_runtime_get_if_in_use(kdev) <= 0) |
450 | return 0; | |
ffd7e32d | 451 | } |
664326f8 | 452 | |
7645b19d | 453 | intel_runtime_pm_acquire(i915, true); |
da2f41d1 | 454 | |
7645b19d | 455 | return track_intel_runtime_pm_wakeref(i915); |
da2f41d1 ID |
456 | } |
457 | ||
13e1592f | 458 | /** |
7645b19d DCS |
459 | * intel_runtime_pm_get_noresume - grab a runtime pm reference |
460 | * @i915: i915 device instance | |
461 | * | |
462 | * This function grabs a device-level runtime pm reference (mostly used for GEM | |
463 | * code to ensure the GTT or GT is on). | |
464 | * | |
465 | * It will _not_ power up the device but instead only check that it's powered | |
466 | * on. Therefore it is only valid to call this functions from contexts where | |
467 | * the device is known to be powered up and where trying to power it up would | |
468 | * result in hilarity and deadlocks. That pretty much means only the system | |
469 | * suspend/resume code where this is used to grab runtime pm references for | |
470 | * delayed setup down in work items. | |
13e1592f | 471 | * |
7645b19d DCS |
472 | * Any runtime pm reference obtained by this function must have a symmetric |
473 | * call to intel_runtime_pm_put() to release the reference again. | |
13e1592f | 474 | * |
7645b19d | 475 | * Returns: the wakeref cookie to pass to intel_runtime_pm_put() |
13e1592f | 476 | */ |
7645b19d | 477 | intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) |
dc174300 | 478 | { |
7645b19d DCS |
479 | struct pci_dev *pdev = i915->drm.pdev; |
480 | struct device *kdev = &pdev->dev; | |
5aefb239 | 481 | |
7645b19d DCS |
482 | assert_rpm_wakelock_held(i915); |
483 | pm_runtime_get_noresume(kdev); | |
5aefb239 | 484 | |
7645b19d | 485 | intel_runtime_pm_acquire(i915, true); |
5aefb239 | 486 | |
7645b19d | 487 | return track_intel_runtime_pm_wakeref(i915); |
5aefb239 SS |
488 | } |
489 | ||
7645b19d DCS |
490 | static void __intel_runtime_pm_put(struct drm_i915_private *i915, |
491 | intel_wakeref_t wref, | |
492 | bool wakelock) | |
5aefb239 | 493 | { |
7645b19d DCS |
494 | struct pci_dev *pdev = i915->drm.pdev; |
495 | struct device *kdev = &pdev->dev; | |
53421c2f | 496 | |
7645b19d | 497 | untrack_intel_runtime_pm_wakeref(i915, wref); |
dc174300 | 498 | |
7645b19d | 499 | intel_runtime_pm_release(i915, wakelock); |
93c7cb6c | 500 | |
7645b19d DCS |
501 | pm_runtime_mark_last_busy(kdev); |
502 | pm_runtime_put_autosuspend(kdev); | |
93c7cb6c SS |
503 | } |
504 | ||
7645b19d DCS |
505 | /** |
506 | * intel_runtime_pm_put_raw - release a raw runtime pm reference | |
507 | * @i915: i915 device instance | |
508 | * @wref: wakeref acquired for the reference that is being released | |
509 | * | |
510 | * This function drops the device-level runtime pm reference obtained by | |
511 | * intel_runtime_pm_get_raw() and might power down the corresponding | |
512 | * hardware block right away if this is the last reference. | |
513 | */ | |
514 | void | |
515 | intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) | |
93c7cb6c | 516 | { |
7645b19d | 517 | __intel_runtime_pm_put(i915, wref, false); |
f75a1985 SS |
518 | } |
519 | ||
e4e7684f | 520 | /** |
4547c255 | 521 | * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference |
bd780f37 | 522 | * @i915: i915 device instance |
e4e7684f DV |
523 | * |
524 | * This function drops the device-level runtime pm reference obtained by | |
525 | * intel_runtime_pm_get() and might power down the corresponding | |
526 | * hardware block right away if this is the last reference. | |
4547c255 ID |
527 | * |
528 | * This function exists only for historical reasons and should be avoided in | |
529 | * new code, as the correctness of its use cannot be checked. Always use | |
530 | * intel_runtime_pm_put() instead. | |
e4e7684f | 531 | */ |
16e4dd03 | 532 | void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) |
9c065a7d | 533 | { |
4547c255 | 534 | __intel_runtime_pm_put(i915, -1, true); |
9c065a7d DV |
535 | } |
536 | ||
16e4dd03 | 537 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) |
4547c255 ID |
538 | /** |
539 | * intel_runtime_pm_put - release a runtime pm reference | |
540 | * @i915: i915 device instance | |
541 | * @wref: wakeref acquired for the reference that is being released | |
542 | * | |
543 | * This function drops the device-level runtime pm reference obtained by | |
544 | * intel_runtime_pm_get() and might power down the corresponding | |
545 | * hardware block right away if this is the last reference. | |
546 | */ | |
16e4dd03 CW |
547 | void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) |
548 | { | |
4547c255 | 549 | __intel_runtime_pm_put(i915, wref, true); |
16e4dd03 CW |
550 | } |
551 | #endif | |
552 | ||
e4e7684f DV |
553 | /** |
554 | * intel_runtime_pm_enable - enable runtime pm | |
bd780f37 | 555 | * @i915: i915 device instance |
e4e7684f DV |
556 | * |
557 | * This function enables runtime pm at the end of the driver load sequence. | |
558 | * | |
559 | * Note that this function does currently not enable runtime pm for the | |
2cd9a689 ID |
560 | * subordinate display power domains. That is done by |
561 | * intel_power_domains_enable(). | |
e4e7684f | 562 | */ |
bd780f37 | 563 | void intel_runtime_pm_enable(struct drm_i915_private *i915) |
9c065a7d | 564 | { |
bd780f37 | 565 | struct pci_dev *pdev = i915->drm.pdev; |
52a05c30 | 566 | struct device *kdev = &pdev->dev; |
9c065a7d | 567 | |
07d80572 CW |
568 | /* |
569 | * Disable the system suspend direct complete optimization, which can | |
570 | * leave the device suspended skipping the driver's suspend handlers | |
571 | * if the device was already runtime suspended. This is needed due to | |
572 | * the difference in our runtime and system suspend sequence and | |
573 | * becaue the HDA driver may require us to enable the audio power | |
574 | * domain during system suspend. | |
575 | */ | |
576 | dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP); | |
577 | ||
c49d13ee DW |
578 | pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */ |
579 | pm_runtime_mark_last_busy(kdev); | |
cbc68dc9 | 580 | |
25b181b4 ID |
581 | /* |
582 | * Take a permanent reference to disable the RPM functionality and drop | |
583 | * it only when unloading the driver. Use the low level get/put helpers, | |
584 | * so the driver's own RPM reference tracking asserts also work on | |
585 | * platforms without RPM support. | |
586 | */ | |
bd780f37 | 587 | if (!HAS_RUNTIME_PM(i915)) { |
f5073824 ID |
588 | int ret; |
589 | ||
c49d13ee | 590 | pm_runtime_dont_use_autosuspend(kdev); |
f5073824 ID |
591 | ret = pm_runtime_get_sync(kdev); |
592 | WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret); | |
cbc68dc9 | 593 | } else { |
c49d13ee | 594 | pm_runtime_use_autosuspend(kdev); |
cbc68dc9 | 595 | } |
9c065a7d | 596 | |
aabee1bb ID |
597 | /* |
598 | * The core calls the driver load handler with an RPM reference held. | |
599 | * We drop that here and will reacquire it during unloading in | |
600 | * intel_power_domains_fini(). | |
601 | */ | |
c49d13ee | 602 | pm_runtime_put_autosuspend(kdev); |
9c065a7d | 603 | } |
07d80572 | 604 | |
bd780f37 | 605 | void intel_runtime_pm_disable(struct drm_i915_private *i915) |
07d80572 | 606 | { |
bd780f37 | 607 | struct pci_dev *pdev = i915->drm.pdev; |
07d80572 CW |
608 | struct device *kdev = &pdev->dev; |
609 | ||
610 | /* Transfer rpm ownership back to core */ | |
bd780f37 | 611 | WARN(pm_runtime_get_sync(kdev) < 0, |
07d80572 CW |
612 | "Failed to pass rpm ownership back to core\n"); |
613 | ||
614 | pm_runtime_dont_use_autosuspend(kdev); | |
615 | ||
bd780f37 | 616 | if (!HAS_RUNTIME_PM(i915)) |
07d80572 CW |
617 | pm_runtime_put(kdev); |
618 | } | |
bd780f37 CW |
619 | |
620 | void intel_runtime_pm_cleanup(struct drm_i915_private *i915) | |
621 | { | |
622 | struct i915_runtime_pm *rpm = &i915->runtime_pm; | |
dbf99c1f | 623 | int count = atomic_read(&rpm->wakeref_count); |
bd780f37 | 624 | |
bd780f37 | 625 | WARN(count, |
4547c255 ID |
626 | "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n", |
627 | intel_rpm_raw_wakeref_count(count), | |
628 | intel_rpm_wakelock_count(count)); | |
bd780f37 | 629 | |
dbf99c1f | 630 | untrack_all_intel_runtime_pm_wakerefs(i915); |
bd780f37 CW |
631 | } |
632 | ||
633 | void intel_runtime_pm_init_early(struct drm_i915_private *i915) | |
634 | { | |
635 | init_intel_runtime_pm_wakeref(i915); | |
636 | } |