]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
drm/i915: Deconstruct struct sgt_dma initialiser
[people/arne_f/kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
aae4a3d8
CW
26#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
e007b19d 29#include <linux/log2.h>
606fec95 30#include <linux/random.h>
0e46ce2e 31#include <linux/seq_file.h>
5bab6f60 32#include <linux/stop_machine.h>
e007b19d 33
ed3ba079
LA
34#include <asm/set_memory.h>
35
760285e7
DH
36#include <drm/drmP.h>
37#include <drm/i915_drm.h>
e007b19d 38
76aaf220 39#include "i915_drv.h"
5dda8fa3 40#include "i915_vgpu.h"
76aaf220
DV
41#include "i915_trace.h"
42#include "intel_drv.h"
d07f0e59 43#include "intel_frontbuffer.h"
76aaf220 44
bb8f9cff
CW
45#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
45f8f69a
TU
47/**
48 * DOC: Global GTT views
49 *
50 * Background and previous state
51 *
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
55 *
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
59 *
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
62 * (2x2 pages):
63 *
64 * 12
65 * 34
66 *
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
70 *
71 * 1212
72 * 3434
73 *
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
76 *
77 * Implementation and usage
78 *
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
81 *
82 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
86 *
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
91 *
92 * Code wanting to add or use a new GGTT view needs to:
93 *
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
97 *
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
101 *
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
105 *
106 */
107
70b9f6f8
DV
108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
7c3f86b6
CW
111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
115 */
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
c033666a
CW
135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
cfa7c862 137{
1893a71b
CW
138 bool has_aliasing_ppgtt;
139 bool has_full_ppgtt;
1f9a99e0 140 bool has_full_48bit_ppgtt;
1893a71b 141
9e1d0e60
MT
142 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
143 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
144 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
1893a71b 145
e320d400 146 if (intel_vgpu_active(dev_priv)) {
8a4ab66f 147 /* GVT-g has no support for 32bit ppgtt */
e320d400 148 has_full_ppgtt = false;
8a4ab66f 149 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
e320d400 150 }
71ba2d64 151
0e4ca100
CW
152 if (!has_aliasing_ppgtt)
153 return 0;
154
70ee45e1
DL
155 /*
156 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
157 * execlists, the sole mechanism available to submit work.
158 */
c033666a 159 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
cfa7c862
DV
160 return 0;
161
162 if (enable_ppgtt == 1)
163 return 1;
164
1893a71b 165 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
166 return 2;
167
1f9a99e0
MT
168 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
169 return 3;
170
93a25a9e 171 /* Disable ppgtt on SNB if VT-d is on. */
80debff8 172 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
93a25a9e 173 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 174 return 0;
93a25a9e 175 }
93a25a9e 176
62942ed7 177 /* Early VLV doesn't have this */
91c8a326 178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
62942ed7
JB
179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
4fc05063
JL
183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
184 if (has_full_48bit_ppgtt)
185 return 3;
186
187 if (has_full_ppgtt)
188 return 2;
189 }
190
191 return has_aliasing_ppgtt ? 1 : 0;
93a25a9e
DV
192}
193
70b9f6f8
DV
194static int ppgtt_bind_vma(struct i915_vma *vma,
195 enum i915_cache_level cache_level,
196 u32 unused)
47552659 197{
ff685975
CW
198 u32 pte_flags;
199 int ret;
200
1f23475c
MA
201 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
202 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
203 vma->size);
204 if (ret)
205 return ret;
206 }
47552659 207
a4f5ea64 208 vma->pages = vma->obj->mm.pages;
247177dd 209
47552659 210 /* Currently applicable only to VLV */
ff685975 211 pte_flags = 0;
47552659
DV
212 if (vma->obj->gt_ro)
213 pte_flags |= PTE_READ_ONLY;
214
4a234c5f 215 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
70b9f6f8
DV
216
217 return 0;
47552659
DV
218}
219
220static void ppgtt_unbind_vma(struct i915_vma *vma)
221{
ff685975 222 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
47552659 223}
6f65e29a 224
2c642b07 225static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
4fb84d99 226 enum i915_cache_level level)
94ec8f61 227{
4fb84d99 228 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
94ec8f61 229 pte |= addr;
63c42e56
BW
230
231 switch (level) {
232 case I915_CACHE_NONE:
fbe5d36e 233 pte |= PPAT_UNCACHED_INDEX;
63c42e56
BW
234 break;
235 case I915_CACHE_WT:
236 pte |= PPAT_DISPLAY_ELLC_INDEX;
237 break;
238 default:
239 pte |= PPAT_CACHED_INDEX;
240 break;
241 }
242
94ec8f61
BW
243 return pte;
244}
245
fe36f55d
MK
246static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
247 const enum i915_cache_level level)
b1fe6673 248{
07749ef3 249 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
250 pde |= addr;
251 if (level != I915_CACHE_NONE)
252 pde |= PPAT_CACHED_PDE_INDEX;
253 else
254 pde |= PPAT_UNCACHED_INDEX;
255 return pde;
256}
257
762d9936
MT
258#define gen8_pdpe_encode gen8_pde_encode
259#define gen8_pml4e_encode gen8_pde_encode
260
07749ef3
MT
261static gen6_pte_t snb_pte_encode(dma_addr_t addr,
262 enum i915_cache_level level,
4fb84d99 263 u32 unused)
54d12527 264{
4fb84d99 265 gen6_pte_t pte = GEN6_PTE_VALID;
54d12527 266 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
267
268 switch (level) {
350ec881
CW
269 case I915_CACHE_L3_LLC:
270 case I915_CACHE_LLC:
271 pte |= GEN6_PTE_CACHE_LLC;
272 break;
273 case I915_CACHE_NONE:
274 pte |= GEN6_PTE_UNCACHED;
275 break;
276 default:
5f77eeb0 277 MISSING_CASE(level);
350ec881
CW
278 }
279
280 return pte;
281}
282
07749ef3
MT
283static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
284 enum i915_cache_level level,
4fb84d99 285 u32 unused)
350ec881 286{
4fb84d99 287 gen6_pte_t pte = GEN6_PTE_VALID;
350ec881
CW
288 pte |= GEN6_PTE_ADDR_ENCODE(addr);
289
290 switch (level) {
291 case I915_CACHE_L3_LLC:
292 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
293 break;
294 case I915_CACHE_LLC:
295 pte |= GEN6_PTE_CACHE_LLC;
296 break;
297 case I915_CACHE_NONE:
9119708c 298 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
299 break;
300 default:
5f77eeb0 301 MISSING_CASE(level);
e7210c3c
BW
302 }
303
54d12527
BW
304 return pte;
305}
306
07749ef3
MT
307static gen6_pte_t byt_pte_encode(dma_addr_t addr,
308 enum i915_cache_level level,
4fb84d99 309 u32 flags)
93c34e70 310{
4fb84d99 311 gen6_pte_t pte = GEN6_PTE_VALID;
93c34e70
KG
312 pte |= GEN6_PTE_ADDR_ENCODE(addr);
313
24f3a8cf
AG
314 if (!(flags & PTE_READ_ONLY))
315 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
316
317 if (level != I915_CACHE_NONE)
318 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
319
320 return pte;
321}
322
07749ef3
MT
323static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
324 enum i915_cache_level level,
4fb84d99 325 u32 unused)
9119708c 326{
4fb84d99 327 gen6_pte_t pte = GEN6_PTE_VALID;
0d8ff15e 328 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
329
330 if (level != I915_CACHE_NONE)
87a6b688 331 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
332
333 return pte;
334}
335
07749ef3
MT
336static gen6_pte_t iris_pte_encode(dma_addr_t addr,
337 enum i915_cache_level level,
4fb84d99 338 u32 unused)
4d15c145 339{
4fb84d99 340 gen6_pte_t pte = GEN6_PTE_VALID;
4d15c145
BW
341 pte |= HSW_PTE_ADDR_ENCODE(addr);
342
651d794f
CW
343 switch (level) {
344 case I915_CACHE_NONE:
345 break;
346 case I915_CACHE_WT:
c51e9701 347 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
348 break;
349 default:
c51e9701 350 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
351 break;
352 }
4d15c145
BW
353
354 return pte;
355}
356
8448661d 357static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
678d96fb 358{
8448661d 359 struct page *page;
678d96fb 360
8448661d
CW
361 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
362 i915_gem_shrink_all(vm->i915);
aae4a3d8 363
8448661d
CW
364 if (vm->free_pages.nr)
365 return vm->free_pages.pages[--vm->free_pages.nr];
366
367 page = alloc_page(gfp);
368 if (!page)
369 return NULL;
370
371 if (vm->pt_kmap_wc)
372 set_pages_array_wc(&page, 1);
373
374 return page;
375}
376
377static void vm_free_pages_release(struct i915_address_space *vm)
378{
379 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
380
381 if (vm->pt_kmap_wc)
382 set_pages_array_wb(vm->free_pages.pages,
383 pagevec_count(&vm->free_pages));
384
385 __pagevec_release(&vm->free_pages);
386}
387
388static void vm_free_page(struct i915_address_space *vm, struct page *page)
389{
390 if (!pagevec_add(&vm->free_pages, page))
391 vm_free_pages_release(vm);
392}
678d96fb 393
8448661d
CW
394static int __setup_page_dma(struct i915_address_space *vm,
395 struct i915_page_dma *p,
396 gfp_t gfp)
397{
398 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
399 if (unlikely(!p->page))
400 return -ENOMEM;
678d96fb 401
8448661d
CW
402 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
403 PCI_DMA_BIDIRECTIONAL);
404 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
405 vm_free_page(vm, p->page);
406 return -ENOMEM;
44159ddb 407 }
1266cdb1
MT
408
409 return 0;
678d96fb
BW
410}
411
8448661d 412static int setup_page_dma(struct i915_address_space *vm,
275a991c 413 struct i915_page_dma *p)
c114f76a 414{
8448661d 415 return __setup_page_dma(vm, p, I915_GFP_DMA);
c114f76a
MK
416}
417
8448661d 418static void cleanup_page_dma(struct i915_address_space *vm,
275a991c 419 struct i915_page_dma *p)
06fda602 420{
8448661d
CW
421 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
422 vm_free_page(vm, p->page);
44159ddb
MK
423}
424
9231da70 425#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
d1c54acd 426
8448661d
CW
427#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
428#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
429#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
430#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
567047be 431
8448661d
CW
432static void fill_page_dma(struct i915_address_space *vm,
433 struct i915_page_dma *p,
434 const u64 val)
d1c54acd 435{
9231da70 436 u64 * const vaddr = kmap_atomic(p->page);
d1c54acd 437 int i;
d1c54acd
MK
438
439 for (i = 0; i < 512; i++)
440 vaddr[i] = val;
441
9231da70 442 kunmap_atomic(vaddr);
d1c54acd
MK
443}
444
8448661d
CW
445static void fill_page_dma_32(struct i915_address_space *vm,
446 struct i915_page_dma *p,
447 const u32 v)
73eeea53 448{
8448661d 449 fill_page_dma(vm, p, (u64)v << 32 | v);
73eeea53
MK
450}
451
8bcdd0f7 452static int
8448661d 453setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
4ad2af1e 454{
8448661d 455 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
4ad2af1e
MK
456}
457
8448661d 458static void cleanup_scratch_page(struct i915_address_space *vm)
4ad2af1e 459{
8448661d 460 cleanup_page_dma(vm, &vm->scratch_page);
4ad2af1e
MK
461}
462
8448661d 463static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
06fda602 464{
ec565b3c 465 struct i915_page_table *pt;
06fda602 466
dd19674b
CW
467 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
468 if (unlikely(!pt))
06fda602
BW
469 return ERR_PTR(-ENOMEM);
470
dd19674b
CW
471 if (unlikely(setup_px(vm, pt))) {
472 kfree(pt);
473 return ERR_PTR(-ENOMEM);
474 }
06fda602 475
dd19674b 476 pt->used_ptes = 0;
06fda602
BW
477 return pt;
478}
479
8448661d 480static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
06fda602 481{
8448661d 482 cleanup_px(vm, pt);
2e906bea
MK
483 kfree(pt);
484}
485
486static void gen8_initialize_pt(struct i915_address_space *vm,
487 struct i915_page_table *pt)
488{
dd19674b
CW
489 fill_px(vm, pt,
490 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
2e906bea
MK
491}
492
493static void gen6_initialize_pt(struct i915_address_space *vm,
494 struct i915_page_table *pt)
495{
dd19674b
CW
496 fill32_px(vm, pt,
497 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
06fda602
BW
498}
499
8448661d 500static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
06fda602 501{
ec565b3c 502 struct i915_page_directory *pd;
06fda602 503
fe52e37f
CW
504 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
505 if (unlikely(!pd))
06fda602
BW
506 return ERR_PTR(-ENOMEM);
507
fe52e37f
CW
508 if (unlikely(setup_px(vm, pd))) {
509 kfree(pd);
510 return ERR_PTR(-ENOMEM);
511 }
e5815a2e 512
fe52e37f 513 pd->used_pdes = 0;
06fda602
BW
514 return pd;
515}
516
8448661d 517static void free_pd(struct i915_address_space *vm,
275a991c 518 struct i915_page_directory *pd)
2e906bea 519{
fe52e37f
CW
520 cleanup_px(vm, pd);
521 kfree(pd);
2e906bea
MK
522}
523
524static void gen8_initialize_pd(struct i915_address_space *vm,
525 struct i915_page_directory *pd)
526{
dd19674b 527 unsigned int i;
2e906bea 528
dd19674b
CW
529 fill_px(vm, pd,
530 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
531 for (i = 0; i < I915_PDES; i++)
532 pd->page_table[i] = vm->scratch_pt;
2e906bea
MK
533}
534
fe52e37f 535static int __pdp_init(struct i915_address_space *vm,
6ac18502
MT
536 struct i915_page_directory_pointer *pdp)
537{
3e490042 538 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
e2b763ca 539 unsigned int i;
6ac18502 540
fe52e37f 541 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
e2b763ca
CW
542 GFP_KERNEL | __GFP_NOWARN);
543 if (unlikely(!pdp->page_directory))
6ac18502 544 return -ENOMEM;
6ac18502 545
fe52e37f
CW
546 for (i = 0; i < pdpes; i++)
547 pdp->page_directory[i] = vm->scratch_pd;
548
6ac18502
MT
549 return 0;
550}
551
552static void __pdp_fini(struct i915_page_directory_pointer *pdp)
553{
6ac18502
MT
554 kfree(pdp->page_directory);
555 pdp->page_directory = NULL;
556}
557
1e6437b0
MK
558static inline bool use_4lvl(const struct i915_address_space *vm)
559{
560 return i915_vm_is_48bit(vm);
561}
562
8448661d
CW
563static struct i915_page_directory_pointer *
564alloc_pdp(struct i915_address_space *vm)
762d9936
MT
565{
566 struct i915_page_directory_pointer *pdp;
567 int ret = -ENOMEM;
568
1e6437b0 569 WARN_ON(!use_4lvl(vm));
762d9936
MT
570
571 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
572 if (!pdp)
573 return ERR_PTR(-ENOMEM);
574
fe52e37f 575 ret = __pdp_init(vm, pdp);
762d9936
MT
576 if (ret)
577 goto fail_bitmap;
578
8448661d 579 ret = setup_px(vm, pdp);
762d9936
MT
580 if (ret)
581 goto fail_page_m;
582
583 return pdp;
584
585fail_page_m:
586 __pdp_fini(pdp);
587fail_bitmap:
588 kfree(pdp);
589
590 return ERR_PTR(ret);
591}
592
8448661d 593static void free_pdp(struct i915_address_space *vm,
6ac18502
MT
594 struct i915_page_directory_pointer *pdp)
595{
596 __pdp_fini(pdp);
1e6437b0
MK
597
598 if (!use_4lvl(vm))
599 return;
600
601 cleanup_px(vm, pdp);
602 kfree(pdp);
762d9936
MT
603}
604
69ab76fd
MT
605static void gen8_initialize_pdp(struct i915_address_space *vm,
606 struct i915_page_directory_pointer *pdp)
607{
608 gen8_ppgtt_pdpe_t scratch_pdpe;
609
610 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
611
8448661d 612 fill_px(vm, pdp, scratch_pdpe);
69ab76fd
MT
613}
614
615static void gen8_initialize_pml4(struct i915_address_space *vm,
616 struct i915_pml4 *pml4)
617{
e2b763ca 618 unsigned int i;
762d9936 619
e2b763ca
CW
620 fill_px(vm, pml4,
621 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
622 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
623 pml4->pdps[i] = vm->scratch_pdp;
6ac18502
MT
624}
625
94e409c1 626/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 627static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
628 unsigned entry,
629 dma_addr_t addr)
94e409c1 630{
4a570db5 631 struct intel_engine_cs *engine = req->engine;
73dec95e 632 u32 *cs;
94e409c1
BW
633
634 BUG_ON(entry >= 4);
635
73dec95e
TU
636 cs = intel_ring_begin(req, 6);
637 if (IS_ERR(cs))
638 return PTR_ERR(cs);
94e409c1 639
73dec95e
TU
640 *cs++ = MI_LOAD_REGISTER_IMM(1);
641 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
642 *cs++ = upper_32_bits(addr);
643 *cs++ = MI_LOAD_REGISTER_IMM(1);
644 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
645 *cs++ = lower_32_bits(addr);
646 intel_ring_advance(req, cs);
94e409c1
BW
647
648 return 0;
649}
650
e7167769
MK
651static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
652 struct drm_i915_gem_request *req)
94e409c1 653{
eeb9488e 654 int i, ret;
94e409c1 655
e7167769 656 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
657 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
658
e85b26dc 659 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
660 if (ret)
661 return ret;
94e409c1 662 }
d595bd4b 663
eeb9488e 664 return 0;
94e409c1
BW
665}
666
e7167769
MK
667static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
668 struct drm_i915_gem_request *req)
2dba3239
MT
669{
670 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
671}
672
fce93755
MK
673/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
674 * the page table structures, we mark them dirty so that
675 * context switching/execlist queuing code takes extra steps
676 * to ensure that tlbs are flushed.
677 */
678static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
679{
49d73912 680 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
fce93755
MK
681}
682
2ce5179f
MW
683/* Removes entries from a single page table, releasing it if it's empty.
684 * Caller can use the return value to update higher-level entries.
685 */
686static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
d209b9c3 687 struct i915_page_table *pt,
dd19674b 688 u64 start, u64 length)
459108b8 689{
d209b9c3 690 unsigned int num_entries = gen8_pte_count(start, length);
37c63934
MK
691 unsigned int pte = gen8_pte_index(start);
692 unsigned int pte_end = pte + num_entries;
894ccebe
CW
693 const gen8_pte_t scratch_pte =
694 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
695 gen8_pte_t *vaddr;
459108b8 696
dd19674b 697 GEM_BUG_ON(num_entries > pt->used_ptes);
37c63934 698
dd19674b
CW
699 pt->used_ptes -= num_entries;
700 if (!pt->used_ptes)
701 return true;
2ce5179f 702
9231da70 703 vaddr = kmap_atomic_px(pt);
37c63934 704 while (pte < pte_end)
894ccebe 705 vaddr[pte++] = scratch_pte;
9231da70 706 kunmap_atomic(vaddr);
2ce5179f
MW
707
708 return false;
d209b9c3 709}
06fda602 710
dd19674b
CW
711static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
712 struct i915_page_directory *pd,
713 struct i915_page_table *pt,
714 unsigned int pde)
715{
716 gen8_pde_t *vaddr;
717
718 pd->page_table[pde] = pt;
719
720 vaddr = kmap_atomic_px(pd);
721 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
722 kunmap_atomic(vaddr);
723}
724
2ce5179f 725static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
d209b9c3 726 struct i915_page_directory *pd,
dd19674b 727 u64 start, u64 length)
d209b9c3
MW
728{
729 struct i915_page_table *pt;
dd19674b 730 u32 pde;
d209b9c3
MW
731
732 gen8_for_each_pde(pt, pd, start, length, pde) {
bf75d59e
CW
733 GEM_BUG_ON(pt == vm->scratch_pt);
734
dd19674b
CW
735 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
736 continue;
06fda602 737
dd19674b 738 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
bf75d59e 739 GEM_BUG_ON(!pd->used_pdes);
fe52e37f 740 pd->used_pdes--;
dd19674b
CW
741
742 free_pt(vm, pt);
2ce5179f
MW
743 }
744
fe52e37f
CW
745 return !pd->used_pdes;
746}
2ce5179f 747
fe52e37f
CW
748static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
749 struct i915_page_directory_pointer *pdp,
750 struct i915_page_directory *pd,
751 unsigned int pdpe)
752{
753 gen8_ppgtt_pdpe_t *vaddr;
754
755 pdp->page_directory[pdpe] = pd;
1e6437b0 756 if (!use_4lvl(vm))
fe52e37f
CW
757 return;
758
759 vaddr = kmap_atomic_px(pdp);
760 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
761 kunmap_atomic(vaddr);
d209b9c3 762}
06fda602 763
2ce5179f
MW
764/* Removes entries from a single page dir pointer, releasing it if it's empty.
765 * Caller can use the return value to update higher-level entries
766 */
767static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
d209b9c3 768 struct i915_page_directory_pointer *pdp,
fe52e37f 769 u64 start, u64 length)
d209b9c3
MW
770{
771 struct i915_page_directory *pd;
fe52e37f 772 unsigned int pdpe;
06fda602 773
d209b9c3 774 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
bf75d59e
CW
775 GEM_BUG_ON(pd == vm->scratch_pd);
776
fe52e37f
CW
777 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
778 continue;
459108b8 779
fe52e37f 780 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
bf75d59e 781 GEM_BUG_ON(!pdp->used_pdpes);
e2b763ca 782 pdp->used_pdpes--;
2ce5179f 783
fe52e37f
CW
784 free_pd(vm, pd);
785 }
fce93755 786
e2b763ca 787 return !pdp->used_pdpes;
d209b9c3 788}
459108b8 789
fe52e37f
CW
790static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
791 u64 start, u64 length)
792{
793 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
794}
795
e2b763ca
CW
796static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
797 struct i915_page_directory_pointer *pdp,
798 unsigned int pml4e)
799{
800 gen8_ppgtt_pml4e_t *vaddr;
801
802 pml4->pdps[pml4e] = pdp;
803
804 vaddr = kmap_atomic_px(pml4);
805 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
806 kunmap_atomic(vaddr);
807}
808
2ce5179f
MW
809/* Removes entries from a single pml4.
810 * This is the top-level structure in 4-level page tables used on gen8+.
811 * Empty entries are always scratch pml4e.
812 */
fe52e37f
CW
813static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
814 u64 start, u64 length)
d209b9c3 815{
fe52e37f
CW
816 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
817 struct i915_pml4 *pml4 = &ppgtt->pml4;
d209b9c3 818 struct i915_page_directory_pointer *pdp;
e2b763ca 819 unsigned int pml4e;
2ce5179f 820
1e6437b0 821 GEM_BUG_ON(!use_4lvl(vm));
459108b8 822
d209b9c3 823 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
bf75d59e
CW
824 GEM_BUG_ON(pdp == vm->scratch_pdp);
825
e2b763ca
CW
826 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
827 continue;
459108b8 828
e2b763ca 829 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
e2b763ca
CW
830
831 free_pdp(vm, pdp);
459108b8
BW
832 }
833}
834
423a8a94 835static inline struct sgt_dma {
894ccebe
CW
836 struct scatterlist *sg;
837 dma_addr_t dma, max;
423a8a94
CW
838} sgt_dma(struct i915_vma *vma) {
839 struct scatterlist *sg = vma->pages->sgl;
840 dma_addr_t addr = sg_dma_address(sg);
841 return (struct sgt_dma) { sg, addr, addr + sg->length };
842}
894ccebe 843
9e89f9ee
CW
844struct gen8_insert_pte {
845 u16 pml4e;
846 u16 pdpe;
847 u16 pde;
848 u16 pte;
849};
850
851static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
852{
853 return (struct gen8_insert_pte) {
854 gen8_pml4e_index(start),
855 gen8_pdpe_index(start),
856 gen8_pde_index(start),
857 gen8_pte_index(start),
858 };
859}
860
894ccebe
CW
861static __always_inline bool
862gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
f9b5b782 863 struct i915_page_directory_pointer *pdp,
894ccebe 864 struct sgt_dma *iter,
9e89f9ee 865 struct gen8_insert_pte *idx,
f9b5b782
MT
866 enum i915_cache_level cache_level)
867{
894ccebe
CW
868 struct i915_page_directory *pd;
869 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
870 gen8_pte_t *vaddr;
871 bool ret;
9df15b49 872
3e490042 873 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee
CW
874 pd = pdp->page_directory[idx->pdpe];
875 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
894ccebe 876 do {
9e89f9ee
CW
877 vaddr[idx->pte] = pte_encode | iter->dma;
878
894ccebe
CW
879 iter->dma += PAGE_SIZE;
880 if (iter->dma >= iter->max) {
881 iter->sg = __sg_next(iter->sg);
882 if (!iter->sg) {
883 ret = false;
884 break;
885 }
7ad47cf2 886
894ccebe
CW
887 iter->dma = sg_dma_address(iter->sg);
888 iter->max = iter->dma + iter->sg->length;
d7b3de91 889 }
9df15b49 890
9e89f9ee
CW
891 if (++idx->pte == GEN8_PTES) {
892 idx->pte = 0;
893
894 if (++idx->pde == I915_PDES) {
895 idx->pde = 0;
896
894ccebe 897 /* Limited by sg length for 3lvl */
9e89f9ee
CW
898 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
899 idx->pdpe = 0;
894ccebe 900 ret = true;
de5ba8eb 901 break;
894ccebe
CW
902 }
903
3e490042 904 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee 905 pd = pdp->page_directory[idx->pdpe];
7ad47cf2 906 }
894ccebe 907
9231da70 908 kunmap_atomic(vaddr);
9e89f9ee 909 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
9df15b49 910 }
894ccebe 911 } while (1);
9231da70 912 kunmap_atomic(vaddr);
d1c54acd 913
894ccebe 914 return ret;
9df15b49
BW
915}
916
894ccebe 917static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
4a234c5f 918 struct i915_vma *vma,
894ccebe
CW
919 enum i915_cache_level cache_level,
920 u32 unused)
f9b5b782 921{
17369ba0 922 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
423a8a94 923 struct sgt_dma iter = sgt_dma(vma);
4a234c5f 924 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
f9b5b782 925
9e89f9ee
CW
926 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
927 cache_level);
894ccebe 928}
de5ba8eb 929
894ccebe 930static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
4a234c5f 931 struct i915_vma *vma,
894ccebe
CW
932 enum i915_cache_level cache_level,
933 u32 unused)
934{
935 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
423a8a94 936 struct sgt_dma iter = sgt_dma(vma);
894ccebe 937 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
4a234c5f 938 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
de5ba8eb 939
9e89f9ee
CW
940 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
941 &idx, cache_level))
942 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
f9b5b782
MT
943}
944
8448661d 945static void gen8_free_page_tables(struct i915_address_space *vm,
f37c0505 946 struct i915_page_directory *pd)
7ad47cf2
BW
947{
948 int i;
949
567047be 950 if (!px_page(pd))
7ad47cf2
BW
951 return;
952
fe52e37f
CW
953 for (i = 0; i < I915_PDES; i++) {
954 if (pd->page_table[i] != vm->scratch_pt)
955 free_pt(vm, pd->page_table[i]);
06fda602 956 }
d7b3de91
BW
957}
958
8776f02b
MK
959static int gen8_init_scratch(struct i915_address_space *vm)
960{
64c050db 961 int ret;
8776f02b 962
8448661d 963 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
964 if (ret)
965 return ret;
8776f02b 966
8448661d 967 vm->scratch_pt = alloc_pt(vm);
8776f02b 968 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
969 ret = PTR_ERR(vm->scratch_pt);
970 goto free_scratch_page;
8776f02b
MK
971 }
972
8448661d 973 vm->scratch_pd = alloc_pd(vm);
8776f02b 974 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
975 ret = PTR_ERR(vm->scratch_pd);
976 goto free_pt;
8776f02b
MK
977 }
978
1e6437b0 979 if (use_4lvl(vm)) {
8448661d 980 vm->scratch_pdp = alloc_pdp(vm);
69ab76fd 981 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
982 ret = PTR_ERR(vm->scratch_pdp);
983 goto free_pd;
69ab76fd
MT
984 }
985 }
986
8776f02b
MK
987 gen8_initialize_pt(vm, vm->scratch_pt);
988 gen8_initialize_pd(vm, vm->scratch_pd);
1e6437b0 989 if (use_4lvl(vm))
69ab76fd 990 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
991
992 return 0;
64c050db
MA
993
994free_pd:
8448661d 995 free_pd(vm, vm->scratch_pd);
64c050db 996free_pt:
8448661d 997 free_pt(vm, vm->scratch_pt);
64c050db 998free_scratch_page:
8448661d 999 cleanup_scratch_page(vm);
64c050db
MA
1000
1001 return ret;
8776f02b
MK
1002}
1003
650da34c
ZL
1004static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1005{
1e6437b0
MK
1006 struct i915_address_space *vm = &ppgtt->base;
1007 struct drm_i915_private *dev_priv = vm->i915;
650da34c 1008 enum vgt_g2v_type msg;
650da34c
ZL
1009 int i;
1010
1e6437b0
MK
1011 if (use_4lvl(vm)) {
1012 const u64 daddr = px_dma(&ppgtt->pml4);
650da34c 1013
ab75bb5d
VS
1014 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1015 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
1016
1017 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1018 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1019 } else {
e7167769 1020 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1e6437b0 1021 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
650da34c 1022
ab75bb5d
VS
1023 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1024 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
1025 }
1026
1027 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1028 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1029 }
1030
1031 I915_WRITE(vgtif_reg(g2v_notify), msg);
1032
1033 return 0;
1034}
1035
8776f02b
MK
1036static void gen8_free_scratch(struct i915_address_space *vm)
1037{
1e6437b0 1038 if (use_4lvl(vm))
8448661d
CW
1039 free_pdp(vm, vm->scratch_pdp);
1040 free_pd(vm, vm->scratch_pd);
1041 free_pt(vm, vm->scratch_pt);
1042 cleanup_scratch_page(vm);
8776f02b
MK
1043}
1044
8448661d 1045static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
762d9936 1046 struct i915_page_directory_pointer *pdp)
b45a6715 1047{
3e490042 1048 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
b45a6715
BW
1049 int i;
1050
3e490042 1051 for (i = 0; i < pdpes; i++) {
fe52e37f 1052 if (pdp->page_directory[i] == vm->scratch_pd)
06fda602
BW
1053 continue;
1054
8448661d
CW
1055 gen8_free_page_tables(vm, pdp->page_directory[i]);
1056 free_pd(vm, pdp->page_directory[i]);
7ad47cf2 1057 }
69876bed 1058
8448661d 1059 free_pdp(vm, pdp);
762d9936
MT
1060}
1061
1062static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1063{
1064 int i;
1065
c5d092a4
CW
1066 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1067 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
762d9936
MT
1068 continue;
1069
8448661d 1070 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
762d9936
MT
1071 }
1072
8448661d 1073 cleanup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1074}
1075
1076static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1077{
49d73912 1078 struct drm_i915_private *dev_priv = vm->i915;
e5716f55 1079 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1080
275a991c 1081 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1082 gen8_ppgtt_notify_vgt(ppgtt, false);
1083
1e6437b0 1084 if (use_4lvl(vm))
762d9936 1085 gen8_ppgtt_cleanup_4lvl(ppgtt);
1e6437b0
MK
1086 else
1087 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
d4ec9da0 1088
8776f02b 1089 gen8_free_scratch(vm);
b45a6715
BW
1090}
1091
fe52e37f
CW
1092static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1093 struct i915_page_directory *pd,
1094 u64 start, u64 length)
bf2b4ed2 1095{
d7b2633d 1096 struct i915_page_table *pt;
dd19674b 1097 u64 from = start;
fe52e37f 1098 unsigned int pde;
bf2b4ed2 1099
e8ebd8e2 1100 gen8_for_each_pde(pt, pd, start, length, pde) {
fe52e37f 1101 if (pt == vm->scratch_pt) {
dd19674b
CW
1102 pt = alloc_pt(vm);
1103 if (IS_ERR(pt))
1104 goto unwind;
5441f0cb 1105
dd19674b 1106 gen8_initialize_pt(vm, pt);
fe52e37f
CW
1107
1108 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1109 pd->used_pdes++;
bf75d59e 1110 GEM_BUG_ON(pd->used_pdes > I915_PDES);
dd19674b 1111 }
fe52e37f 1112
dd19674b 1113 pt->used_ptes += gen8_pte_count(start, length);
7ad47cf2 1114 }
bf2b4ed2 1115 return 0;
7ad47cf2 1116
dd19674b
CW
1117unwind:
1118 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
d7b3de91 1119 return -ENOMEM;
bf2b4ed2
BW
1120}
1121
c5d092a4
CW
1122static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1123 struct i915_page_directory_pointer *pdp,
1124 u64 start, u64 length)
bf2b4ed2 1125{
5441f0cb 1126 struct i915_page_directory *pd;
e2b763ca
CW
1127 u64 from = start;
1128 unsigned int pdpe;
bf2b4ed2
BW
1129 int ret;
1130
e8ebd8e2 1131 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
e2b763ca
CW
1132 if (pd == vm->scratch_pd) {
1133 pd = alloc_pd(vm);
1134 if (IS_ERR(pd))
1135 goto unwind;
5441f0cb 1136
e2b763ca 1137 gen8_initialize_pd(vm, pd);
fe52e37f 1138 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
e2b763ca 1139 pdp->used_pdpes++;
3e490042 1140 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
75afcf72
CW
1141
1142 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
e2b763ca
CW
1143 }
1144
1145 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
bf75d59e
CW
1146 if (unlikely(ret))
1147 goto unwind_pd;
fe52e37f 1148 }
33c8819f 1149
d7b3de91 1150 return 0;
bf2b4ed2 1151
bf75d59e
CW
1152unwind_pd:
1153 if (!pd->used_pdes) {
1154 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1155 GEM_BUG_ON(!pdp->used_pdpes);
1156 pdp->used_pdpes--;
1157 free_pd(vm, pd);
1158 }
e2b763ca
CW
1159unwind:
1160 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1161 return -ENOMEM;
bf2b4ed2
BW
1162}
1163
c5d092a4
CW
1164static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1165 u64 start, u64 length)
762d9936 1166{
c5d092a4
CW
1167 return gen8_ppgtt_alloc_pdp(vm,
1168 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1169}
762d9936 1170
c5d092a4
CW
1171static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1172 u64 start, u64 length)
1173{
1174 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1175 struct i915_pml4 *pml4 = &ppgtt->pml4;
1176 struct i915_page_directory_pointer *pdp;
1177 u64 from = start;
1178 u32 pml4e;
1179 int ret;
762d9936 1180
e8ebd8e2 1181 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4
CW
1182 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1183 pdp = alloc_pdp(vm);
1184 if (IS_ERR(pdp))
1185 goto unwind;
762d9936 1186
c5d092a4
CW
1187 gen8_initialize_pdp(vm, pdp);
1188 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1189 }
762d9936 1190
c5d092a4 1191 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
bf75d59e
CW
1192 if (unlikely(ret))
1193 goto unwind_pdp;
762d9936
MT
1194 }
1195
762d9936
MT
1196 return 0;
1197
bf75d59e
CW
1198unwind_pdp:
1199 if (!pdp->used_pdpes) {
1200 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1201 free_pdp(vm, pdp);
1202 }
c5d092a4
CW
1203unwind:
1204 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1205 return -ENOMEM;
762d9936
MT
1206}
1207
8448661d
CW
1208static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1209 struct i915_page_directory_pointer *pdp,
75c7b0b8 1210 u64 start, u64 length,
ea91e401
MT
1211 gen8_pte_t scratch_pte,
1212 struct seq_file *m)
1213{
3e490042 1214 struct i915_address_space *vm = &ppgtt->base;
ea91e401 1215 struct i915_page_directory *pd;
75c7b0b8 1216 u32 pdpe;
ea91e401 1217
e8ebd8e2 1218 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401 1219 struct i915_page_table *pt;
75c7b0b8
CW
1220 u64 pd_len = length;
1221 u64 pd_start = start;
1222 u32 pde;
ea91e401 1223
e2b763ca 1224 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
ea91e401
MT
1225 continue;
1226
1227 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1228 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
75c7b0b8 1229 u32 pte;
ea91e401
MT
1230 gen8_pte_t *pt_vaddr;
1231
fe52e37f 1232 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
ea91e401
MT
1233 continue;
1234
9231da70 1235 pt_vaddr = kmap_atomic_px(pt);
ea91e401 1236 for (pte = 0; pte < GEN8_PTES; pte += 4) {
75c7b0b8
CW
1237 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1238 pde << GEN8_PDE_SHIFT |
1239 pte << GEN8_PTE_SHIFT);
ea91e401
MT
1240 int i;
1241 bool found = false;
1242
1243 for (i = 0; i < 4; i++)
1244 if (pt_vaddr[pte + i] != scratch_pte)
1245 found = true;
1246 if (!found)
1247 continue;
1248
1249 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1250 for (i = 0; i < 4; i++) {
1251 if (pt_vaddr[pte + i] != scratch_pte)
1252 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1253 else
1254 seq_puts(m, " SCRATCH ");
1255 }
1256 seq_puts(m, "\n");
1257 }
ea91e401
MT
1258 kunmap_atomic(pt_vaddr);
1259 }
1260 }
1261}
1262
1263static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1264{
1265 struct i915_address_space *vm = &ppgtt->base;
894ccebe
CW
1266 const gen8_pte_t scratch_pte =
1267 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
381b943b 1268 u64 start = 0, length = ppgtt->base.total;
ea91e401 1269
1e6437b0 1270 if (use_4lvl(vm)) {
75c7b0b8 1271 u64 pml4e;
ea91e401
MT
1272 struct i915_pml4 *pml4 = &ppgtt->pml4;
1273 struct i915_page_directory_pointer *pdp;
1274
e8ebd8e2 1275 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4 1276 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
ea91e401
MT
1277 continue;
1278
1279 seq_printf(m, " PML4E #%llu\n", pml4e);
8448661d 1280 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
ea91e401 1281 }
1e6437b0
MK
1282 } else {
1283 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
ea91e401
MT
1284 }
1285}
1286
e2b763ca 1287static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
331f38e7 1288{
e2b763ca
CW
1289 struct i915_address_space *vm = &ppgtt->base;
1290 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1291 struct i915_page_directory *pd;
1292 u64 start = 0, length = ppgtt->base.total;
1293 u64 from = start;
1294 unsigned int pdpe;
331f38e7 1295
e2b763ca
CW
1296 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1297 pd = alloc_pd(vm);
1298 if (IS_ERR(pd))
1299 goto unwind;
331f38e7 1300
e2b763ca
CW
1301 gen8_initialize_pd(vm, pd);
1302 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1303 pdp->used_pdpes++;
1304 }
331f38e7 1305
e2b763ca
CW
1306 pdp->used_pdpes++; /* never remove */
1307 return 0;
331f38e7 1308
e2b763ca
CW
1309unwind:
1310 start -= from;
1311 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1312 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1313 free_pd(vm, pd);
1314 }
1315 pdp->used_pdpes = 0;
1316 return -ENOMEM;
331f38e7
ZL
1317}
1318
eb0b44ad 1319/*
f3a964b9
BW
1320 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1321 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1322 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1323 * space.
37aca44a 1324 *
f3a964b9 1325 */
5c5f6457 1326static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1327{
1e6437b0
MK
1328 struct i915_address_space *vm = &ppgtt->base;
1329 struct drm_i915_private *dev_priv = vm->i915;
8776f02b 1330 int ret;
7cb6d7ac 1331
1e6437b0
MK
1332 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1333 1ULL << 48 :
1334 1ULL << 32;
1335
8776f02b 1336 ret = gen8_init_scratch(&ppgtt->base);
1e6437b0
MK
1337 if (ret) {
1338 ppgtt->base.total = 0;
8776f02b 1339 return ret;
1e6437b0 1340 }
69876bed 1341
8448661d
CW
1342 /* There are only few exceptions for gen >=6. chv and bxt.
1343 * And we are not sure about the latter so play safe for now.
1344 */
1345 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1346 ppgtt->base.pt_kmap_wc = true;
1347
1e6437b0 1348 if (use_4lvl(vm)) {
8448661d 1349 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1350 if (ret)
1351 goto free_scratch;
6ac18502 1352
69ab76fd
MT
1353 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1354
e7167769 1355 ppgtt->switch_mm = gen8_mm_switch_4lvl;
c5d092a4 1356 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
894ccebe 1357 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
fe52e37f 1358 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
762d9936 1359 } else {
fe52e37f 1360 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
81ba8aef
MT
1361 if (ret)
1362 goto free_scratch;
1363
275a991c 1364 if (intel_vgpu_active(dev_priv)) {
e2b763ca
CW
1365 ret = gen8_preallocate_top_level_pdp(ppgtt);
1366 if (ret) {
1367 __pdp_fini(&ppgtt->pdp);
331f38e7 1368 goto free_scratch;
e2b763ca 1369 }
331f38e7 1370 }
894ccebe 1371
e7167769 1372 ppgtt->switch_mm = gen8_mm_switch_3lvl;
c5d092a4 1373 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
894ccebe 1374 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
fe52e37f 1375 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
81ba8aef 1376 }
6ac18502 1377
275a991c 1378 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1379 gen8_ppgtt_notify_vgt(ppgtt, true);
1380
054b9acd
MK
1381 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1382 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1383 ppgtt->base.bind_vma = ppgtt_bind_vma;
1384 ppgtt->debug_dump = gen8_dump_ppgtt;
1385
d7b2633d 1386 return 0;
6ac18502
MT
1387
1388free_scratch:
1389 gen8_free_scratch(&ppgtt->base);
1390 return ret;
d7b2633d
MT
1391}
1392
87d60b63
BW
1393static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1394{
87d60b63 1395 struct i915_address_space *vm = &ppgtt->base;
09942c65 1396 struct i915_page_table *unused;
07749ef3 1397 gen6_pte_t scratch_pte;
381b943b
CW
1398 u32 pd_entry, pte, pde;
1399 u32 start = 0, length = ppgtt->base.total;
87d60b63 1400
8bcdd0f7 1401 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 1402 I915_CACHE_LLC, 0);
87d60b63 1403
731f74c5 1404 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
87d60b63 1405 u32 expected;
07749ef3 1406 gen6_pte_t *pt_vaddr;
567047be 1407 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1408 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1409 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1410
1411 if (pd_entry != expected)
1412 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1413 pde,
1414 pd_entry,
1415 expected);
1416 seq_printf(m, "\tPDE: %x\n", pd_entry);
1417
9231da70 1418 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
d1c54acd 1419
07749ef3 1420 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1421 unsigned long va =
07749ef3 1422 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1423 (pte * PAGE_SIZE);
1424 int i;
1425 bool found = false;
1426 for (i = 0; i < 4; i++)
1427 if (pt_vaddr[pte + i] != scratch_pte)
1428 found = true;
1429 if (!found)
1430 continue;
1431
1432 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1433 for (i = 0; i < 4; i++) {
1434 if (pt_vaddr[pte + i] != scratch_pte)
1435 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1436 else
1437 seq_puts(m, " SCRATCH ");
1438 }
1439 seq_puts(m, "\n");
1440 }
9231da70 1441 kunmap_atomic(pt_vaddr);
87d60b63
BW
1442 }
1443}
1444
678d96fb 1445/* Write pde (index) from the page directory @pd to the page table @pt */
16a011c8
CW
1446static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1447 const unsigned int pde,
1448 const struct i915_page_table *pt)
6197349b 1449{
678d96fb 1450 /* Caller needs to make sure the write completes if necessary */
16a011c8
CW
1451 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1452 ppgtt->pd_addr + pde);
678d96fb 1453}
6197349b 1454
678d96fb
BW
1455/* Write all the page tables found in the ppgtt structure to incrementing page
1456 * directories. */
16a011c8 1457static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1458 u32 start, u32 length)
678d96fb 1459{
ec565b3c 1460 struct i915_page_table *pt;
16a011c8 1461 unsigned int pde;
678d96fb 1462
16a011c8
CW
1463 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1464 gen6_write_pde(ppgtt, pde, pt);
678d96fb 1465
16a011c8 1466 mark_tlbs_dirty(ppgtt);
dd19674b 1467 wmb();
3e302542
BW
1468}
1469
75c7b0b8 1470static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1471{
dd19674b
CW
1472 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1473 return ppgtt->pd.base.ggtt_offset << 10;
b4a74e3a
BW
1474}
1475
90252e5c 1476static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1477 struct drm_i915_gem_request *req)
90252e5c 1478{
4a570db5 1479 struct intel_engine_cs *engine = req->engine;
73dec95e 1480 u32 *cs;
90252e5c 1481
90252e5c 1482 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1483 cs = intel_ring_begin(req, 6);
1484 if (IS_ERR(cs))
1485 return PTR_ERR(cs);
90252e5c 1486
73dec95e
TU
1487 *cs++ = MI_LOAD_REGISTER_IMM(2);
1488 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1489 *cs++ = PP_DIR_DCLV_2G;
1490 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1491 *cs++ = get_pd_offset(ppgtt);
1492 *cs++ = MI_NOOP;
1493 intel_ring_advance(req, cs);
90252e5c
BW
1494
1495 return 0;
1496}
1497
48a10389 1498static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1499 struct drm_i915_gem_request *req)
48a10389 1500{
4a570db5 1501 struct intel_engine_cs *engine = req->engine;
73dec95e 1502 u32 *cs;
48a10389 1503
48a10389 1504 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1505 cs = intel_ring_begin(req, 6);
1506 if (IS_ERR(cs))
1507 return PTR_ERR(cs);
1508
1509 *cs++ = MI_LOAD_REGISTER_IMM(2);
1510 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1511 *cs++ = PP_DIR_DCLV_2G;
1512 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1513 *cs++ = get_pd_offset(ppgtt);
1514 *cs++ = MI_NOOP;
1515 intel_ring_advance(req, cs);
48a10389
BW
1516
1517 return 0;
1518}
1519
eeb9488e 1520static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1521 struct drm_i915_gem_request *req)
eeb9488e 1522{
4a570db5 1523 struct intel_engine_cs *engine = req->engine;
8eb95204 1524 struct drm_i915_private *dev_priv = req->i915;
48a10389 1525
e2f80391
TU
1526 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1527 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e
BW
1528 return 0;
1529}
1530
c6be607a 1531static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
eeb9488e 1532{
e2f80391 1533 struct intel_engine_cs *engine;
3b3f1650 1534 enum intel_engine_id id;
3e302542 1535
3b3f1650 1536 for_each_engine(engine, dev_priv, id) {
c6be607a
TU
1537 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1538 GEN8_GFX_PPGTT_48B : 0;
e2f80391 1539 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1540 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1541 }
eeb9488e 1542}
6197349b 1543
c6be607a 1544static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
3e302542 1545{
e2f80391 1546 struct intel_engine_cs *engine;
75c7b0b8 1547 u32 ecochk, ecobits;
3b3f1650 1548 enum intel_engine_id id;
6197349b 1549
b4a74e3a
BW
1550 ecobits = I915_READ(GAC_ECO_BITS);
1551 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1552
b4a74e3a 1553 ecochk = I915_READ(GAM_ECOCHK);
772c2a51 1554 if (IS_HASWELL(dev_priv)) {
b4a74e3a
BW
1555 ecochk |= ECOCHK_PPGTT_WB_HSW;
1556 } else {
1557 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1558 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1559 }
1560 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1561
3b3f1650 1562 for_each_engine(engine, dev_priv, id) {
6197349b 1563 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1564 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1565 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1566 }
b4a74e3a 1567}
6197349b 1568
c6be607a 1569static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
b4a74e3a 1570{
75c7b0b8 1571 u32 ecochk, gab_ctl, ecobits;
a65c2fcd 1572
b4a74e3a
BW
1573 ecobits = I915_READ(GAC_ECO_BITS);
1574 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1575 ECOBITS_PPGTT_CACHE64B);
6197349b 1576
b4a74e3a
BW
1577 gab_ctl = I915_READ(GAB_CTL);
1578 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1579
1580 ecochk = I915_READ(GAM_ECOCHK);
1581 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1582
1583 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1584}
1585
1d2a314c 1586/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1587static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
dd19674b 1588 u64 start, u64 length)
1d2a314c 1589{
e5716f55 1590 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
dd19674b
CW
1591 unsigned int first_entry = start >> PAGE_SHIFT;
1592 unsigned int pde = first_entry / GEN6_PTES;
1593 unsigned int pte = first_entry % GEN6_PTES;
1594 unsigned int num_entries = length >> PAGE_SHIFT;
1595 gen6_pte_t scratch_pte =
1596 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1d2a314c 1597
7bddb01f 1598 while (num_entries) {
dd19674b
CW
1599 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1600 unsigned int end = min(pte + num_entries, GEN6_PTES);
1601 gen6_pte_t *vaddr;
7bddb01f 1602
dd19674b 1603 num_entries -= end - pte;
1d2a314c 1604
dd19674b
CW
1605 /* Note that the hw doesn't support removing PDE on the fly
1606 * (they are cached inside the context with no means to
1607 * invalidate the cache), so we can only reset the PTE
1608 * entries back to scratch.
1609 */
1d2a314c 1610
dd19674b
CW
1611 vaddr = kmap_atomic_px(pt);
1612 do {
1613 vaddr[pte++] = scratch_pte;
1614 } while (pte < end);
1615 kunmap_atomic(vaddr);
1d2a314c 1616
dd19674b 1617 pte = 0;
7bddb01f 1618 }
1d2a314c
DV
1619}
1620
853ba5d2 1621static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
4a234c5f 1622 struct i915_vma *vma,
75c7b0b8
CW
1623 enum i915_cache_level cache_level,
1624 u32 flags)
def886c3 1625{
e5716f55 1626 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
4a234c5f 1627 unsigned first_entry = vma->node.start >> PAGE_SHIFT;
07749ef3
MT
1628 unsigned act_pt = first_entry / GEN6_PTES;
1629 unsigned act_pte = first_entry % GEN6_PTES;
b31144c0 1630 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
423a8a94 1631 struct sgt_dma iter = sgt_dma(vma);
b31144c0
CW
1632 gen6_pte_t *vaddr;
1633
9231da70 1634 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
b31144c0
CW
1635 do {
1636 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
6e995e23 1637
b31144c0
CW
1638 iter.dma += PAGE_SIZE;
1639 if (iter.dma == iter.max) {
1640 iter.sg = __sg_next(iter.sg);
1641 if (!iter.sg)
1642 break;
6e995e23 1643
b31144c0
CW
1644 iter.dma = sg_dma_address(iter.sg);
1645 iter.max = iter.dma + iter.sg->length;
1646 }
24f3a8cf 1647
07749ef3 1648 if (++act_pte == GEN6_PTES) {
9231da70
CW
1649 kunmap_atomic(vaddr);
1650 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
6e995e23 1651 act_pte = 0;
def886c3 1652 }
b31144c0 1653 } while (1);
9231da70 1654 kunmap_atomic(vaddr);
def886c3
DV
1655}
1656
678d96fb 1657static int gen6_alloc_va_range(struct i915_address_space *vm,
dd19674b 1658 u64 start, u64 length)
678d96fb 1659{
e5716f55 1660 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1661 struct i915_page_table *pt;
dd19674b
CW
1662 u64 from = start;
1663 unsigned int pde;
1664 bool flush = false;
4933d519 1665
731f74c5 1666 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
dd19674b
CW
1667 if (pt == vm->scratch_pt) {
1668 pt = alloc_pt(vm);
1669 if (IS_ERR(pt))
1670 goto unwind_out;
4933d519 1671
dd19674b
CW
1672 gen6_initialize_pt(vm, pt);
1673 ppgtt->pd.page_table[pde] = pt;
1674 gen6_write_pde(ppgtt, pde, pt);
1675 flush = true;
4933d519 1676 }
4933d519
MT
1677 }
1678
dd19674b
CW
1679 if (flush) {
1680 mark_tlbs_dirty(ppgtt);
1681 wmb();
678d96fb
BW
1682 }
1683
1684 return 0;
4933d519
MT
1685
1686unwind_out:
dd19674b
CW
1687 gen6_ppgtt_clear_range(vm, from, start);
1688 return -ENOMEM;
678d96fb
BW
1689}
1690
8776f02b
MK
1691static int gen6_init_scratch(struct i915_address_space *vm)
1692{
8bcdd0f7 1693 int ret;
8776f02b 1694
8448661d 1695 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
1696 if (ret)
1697 return ret;
8776f02b 1698
8448661d 1699 vm->scratch_pt = alloc_pt(vm);
8776f02b 1700 if (IS_ERR(vm->scratch_pt)) {
8448661d 1701 cleanup_scratch_page(vm);
8776f02b
MK
1702 return PTR_ERR(vm->scratch_pt);
1703 }
1704
1705 gen6_initialize_pt(vm, vm->scratch_pt);
1706
1707 return 0;
1708}
1709
1710static void gen6_free_scratch(struct i915_address_space *vm)
1711{
8448661d
CW
1712 free_pt(vm, vm->scratch_pt);
1713 cleanup_scratch_page(vm);
8776f02b
MK
1714}
1715
061dd493 1716static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1717{
e5716f55 1718 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
731f74c5 1719 struct i915_page_directory *pd = &ppgtt->pd;
09942c65 1720 struct i915_page_table *pt;
75c7b0b8 1721 u32 pde;
4933d519 1722
061dd493
DV
1723 drm_mm_remove_node(&ppgtt->node);
1724
731f74c5 1725 gen6_for_all_pdes(pt, pd, pde)
79ab9370 1726 if (pt != vm->scratch_pt)
8448661d 1727 free_pt(vm, pt);
06fda602 1728
8776f02b 1729 gen6_free_scratch(vm);
3440d265
DV
1730}
1731
b146520f 1732static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1733{
8776f02b 1734 struct i915_address_space *vm = &ppgtt->base;
49d73912 1735 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1736 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f 1737 int ret;
1d2a314c 1738
c8d4c0d6
BW
1739 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1740 * allocator works in address space sizes, so it's multiplied by page
1741 * size. We allocate at the top of the GTT to avoid fragmentation.
1742 */
72e96d64 1743 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 1744
8776f02b
MK
1745 ret = gen6_init_scratch(vm);
1746 if (ret)
1747 return ret;
4933d519 1748
e007b19d
CW
1749 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1750 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1751 I915_COLOR_UNEVICTABLE,
1752 0, ggtt->base.total,
1753 PIN_HIGH);
c8c26622 1754 if (ret)
678d96fb
BW
1755 goto err_out;
1756
72e96d64 1757 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 1758 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 1759
52c126ee
CW
1760 ppgtt->pd.base.ggtt_offset =
1761 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1762
1763 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1764 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1765
c8c26622 1766 return 0;
678d96fb
BW
1767
1768err_out:
8776f02b 1769 gen6_free_scratch(vm);
678d96fb 1770 return ret;
b146520f
BW
1771}
1772
b146520f
BW
1773static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1774{
2f2cf682 1775 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 1776}
06dc68d6 1777
4933d519 1778static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1779 u64 start, u64 length)
4933d519 1780{
ec565b3c 1781 struct i915_page_table *unused;
75c7b0b8 1782 u32 pde;
1d2a314c 1783
731f74c5 1784 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
79ab9370 1785 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
1786}
1787
5c5f6457 1788static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f 1789{
49d73912 1790 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1791 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
1792 int ret;
1793
72e96d64 1794 ppgtt->base.pte_encode = ggtt->base.pte_encode;
5db94019 1795 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
b146520f 1796 ppgtt->switch_mm = gen6_mm_switch;
772c2a51 1797 else if (IS_HASWELL(dev_priv))
b146520f 1798 ppgtt->switch_mm = hsw_mm_switch;
5db94019 1799 else if (IS_GEN7(dev_priv))
b146520f 1800 ppgtt->switch_mm = gen7_mm_switch;
8eb95204 1801 else
b146520f
BW
1802 BUG();
1803
1804 ret = gen6_ppgtt_alloc(ppgtt);
1805 if (ret)
1806 return ret;
1807
09942c65 1808 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1d2a314c 1809
5c5f6457 1810 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
16a011c8 1811 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
678d96fb 1812
52c126ee
CW
1813 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1814 if (ret) {
1815 gen6_ppgtt_cleanup(&ppgtt->base);
1816 return ret;
1817 }
1818
054b9acd
MK
1819 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1820 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1821 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1822 ppgtt->base.bind_vma = ppgtt_bind_vma;
1823 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1824 ppgtt->debug_dump = gen6_dump_ppgtt;
1825
440fd528 1826 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
1827 ppgtt->node.size >> 20,
1828 ppgtt->node.start / PAGE_SIZE);
3440d265 1829
52c126ee
CW
1830 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1831 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 1832
b146520f 1833 return 0;
3440d265
DV
1834}
1835
2bfa996e
CW
1836static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1837 struct drm_i915_private *dev_priv)
3440d265 1838{
49d73912 1839 ppgtt->base.i915 = dev_priv;
8448661d 1840 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
3440d265 1841
2bfa996e 1842 if (INTEL_INFO(dev_priv)->gen < 8)
5c5f6457 1843 return gen6_ppgtt_init(ppgtt);
3ed124b2 1844 else
d7b2633d 1845 return gen8_ppgtt_init(ppgtt);
fa76da34 1846}
c114f76a 1847
a2cad9df 1848static void i915_address_space_init(struct i915_address_space *vm,
80b204bc
CW
1849 struct drm_i915_private *dev_priv,
1850 const char *name)
a2cad9df 1851{
80b204bc 1852 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
47db922f 1853
381b943b 1854 drm_mm_init(&vm->mm, 0, vm->total);
47db922f
CW
1855 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1856
a2cad9df
MW
1857 INIT_LIST_HEAD(&vm->active_list);
1858 INIT_LIST_HEAD(&vm->inactive_list);
50e046b6 1859 INIT_LIST_HEAD(&vm->unbound_list);
47db922f 1860
a2cad9df 1861 list_add_tail(&vm->global_link, &dev_priv->vm_list);
8448661d 1862 pagevec_init(&vm->free_pages, false);
a2cad9df
MW
1863}
1864
ed9724dd
MA
1865static void i915_address_space_fini(struct i915_address_space *vm)
1866{
8448661d
CW
1867 if (pagevec_count(&vm->free_pages))
1868 vm_free_pages_release(vm);
1869
ed9724dd
MA
1870 i915_gem_timeline_fini(&vm->timeline);
1871 drm_mm_takedown(&vm->mm);
1872 list_del(&vm->global_link);
1873}
1874
c6be607a 1875static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
d5165ebd 1876{
d5165ebd
TG
1877 /* This function is for gtt related workarounds. This function is
1878 * called on driver load and after a GPU reset, so you can place
1879 * workarounds here even if they get overwritten by GPU reset.
1880 */
46c26662 1881 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
8652744b 1882 if (IS_BROADWELL(dev_priv))
d5165ebd 1883 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
920a14b2 1884 else if (IS_CHERRYVIEW(dev_priv))
d5165ebd 1885 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
b976dc53 1886 else if (IS_GEN9_BC(dev_priv))
d5165ebd 1887 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
9fb5026f 1888 else if (IS_GEN9_LP(dev_priv))
d5165ebd
TG
1889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1890}
1891
c6be607a 1892int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
82460d97 1893{
c6be607a 1894 gtt_write_workarounds(dev_priv);
d5165ebd 1895
671b5013
TD
1896 /* In the case of execlists, PPGTT is enabled by the context descriptor
1897 * and the PDPs are contained within the context itself. We don't
1898 * need to do anything here. */
1899 if (i915.enable_execlists)
1900 return 0;
1901
c6be607a 1902 if (!USES_PPGTT(dev_priv))
82460d97
DV
1903 return 0;
1904
5db94019 1905 if (IS_GEN6(dev_priv))
c6be607a 1906 gen6_ppgtt_enable(dev_priv);
5db94019 1907 else if (IS_GEN7(dev_priv))
c6be607a
TU
1908 gen7_ppgtt_enable(dev_priv);
1909 else if (INTEL_GEN(dev_priv) >= 8)
1910 gen8_ppgtt_enable(dev_priv);
82460d97 1911 else
c6be607a 1912 MISSING_CASE(INTEL_GEN(dev_priv));
82460d97 1913
4ad2fd88
JH
1914 return 0;
1915}
1d2a314c 1916
4d884705 1917struct i915_hw_ppgtt *
2bfa996e 1918i915_ppgtt_create(struct drm_i915_private *dev_priv,
80b204bc
CW
1919 struct drm_i915_file_private *fpriv,
1920 const char *name)
4d884705
DV
1921{
1922 struct i915_hw_ppgtt *ppgtt;
1923 int ret;
1924
1925 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1926 if (!ppgtt)
1927 return ERR_PTR(-ENOMEM);
1928
1188bc66 1929 ret = __hw_ppgtt_init(ppgtt, dev_priv);
4d884705
DV
1930 if (ret) {
1931 kfree(ppgtt);
1932 return ERR_PTR(ret);
1933 }
1934
1188bc66
CW
1935 kref_init(&ppgtt->ref);
1936 i915_address_space_init(&ppgtt->base, dev_priv, name);
1937 ppgtt->base.file = fpriv;
1938
198c974d
DCS
1939 trace_i915_ppgtt_create(&ppgtt->base);
1940
4d884705
DV
1941 return ppgtt;
1942}
1943
0c7eeda1
CW
1944void i915_ppgtt_close(struct i915_address_space *vm)
1945{
1946 struct list_head *phases[] = {
1947 &vm->active_list,
1948 &vm->inactive_list,
1949 &vm->unbound_list,
1950 NULL,
1951 }, **phase;
1952
1953 GEM_BUG_ON(vm->closed);
1954 vm->closed = true;
1955
1956 for (phase = phases; *phase; phase++) {
1957 struct i915_vma *vma, *vn;
1958
1959 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1960 if (!i915_vma_is_closed(vma))
1961 i915_vma_close(vma);
1962 }
1963}
1964
ed9724dd 1965void i915_ppgtt_release(struct kref *kref)
ee960be7
DV
1966{
1967 struct i915_hw_ppgtt *ppgtt =
1968 container_of(kref, struct i915_hw_ppgtt, ref);
1969
198c974d
DCS
1970 trace_i915_ppgtt_release(&ppgtt->base);
1971
50e046b6 1972 /* vmas should already be unbound and destroyed */
ee960be7
DV
1973 WARN_ON(!list_empty(&ppgtt->base.active_list));
1974 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
50e046b6 1975 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
ee960be7
DV
1976
1977 ppgtt->base.cleanup(&ppgtt->base);
8448661d 1978 i915_address_space_fini(&ppgtt->base);
ee960be7
DV
1979 kfree(ppgtt);
1980}
1d2a314c 1981
a81cc00c
BW
1982/* Certain Gen5 chipsets require require idling the GPU before
1983 * unmapping anything from the GTT when VT-d is enabled.
1984 */
97d6d7ab 1985static bool needs_idle_maps(struct drm_i915_private *dev_priv)
a81cc00c 1986{
a81cc00c
BW
1987 /* Query intel_iommu to see if we need the workaround. Presumably that
1988 * was loaded first.
1989 */
80debff8 1990 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
a81cc00c
BW
1991}
1992
dc97997a 1993void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
828c7908 1994{
e2f80391 1995 struct intel_engine_cs *engine;
3b3f1650 1996 enum intel_engine_id id;
828c7908 1997
dc97997a 1998 if (INTEL_INFO(dev_priv)->gen < 6)
828c7908
BW
1999 return;
2000
3b3f1650 2001 for_each_engine(engine, dev_priv, id) {
828c7908 2002 u32 fault_reg;
e2f80391 2003 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2004 if (fault_reg & RING_FAULT_VALID) {
2005 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2006 "\tAddr: 0x%08lx\n"
828c7908
BW
2007 "\tAddress space: %s\n"
2008 "\tSource ID: %d\n"
2009 "\tType: %d\n",
2010 fault_reg & PAGE_MASK,
2011 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2012 RING_FAULT_SRCID(fault_reg),
2013 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2014 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2015 fault_reg & ~RING_FAULT_VALID);
2016 }
2017 }
3b3f1650
AG
2018
2019 /* Engine specific init may not have been done till this point. */
2020 if (dev_priv->engine[RCS])
2021 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
828c7908
BW
2022}
2023
275a991c 2024void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
828c7908 2025{
72e96d64 2026 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2027
2028 /* Don't bother messing with faults pre GEN6 as we have little
2029 * documentation supporting that it's a good idea.
2030 */
275a991c 2031 if (INTEL_GEN(dev_priv) < 6)
828c7908
BW
2032 return;
2033
dc97997a 2034 i915_check_and_clear_faults(dev_priv);
828c7908 2035
381b943b 2036 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
91e56499 2037
7c3f86b6 2038 i915_ggtt_invalidate(dev_priv);
828c7908
BW
2039}
2040
03ac84f1
CW
2041int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2042 struct sg_table *pages)
7c2e6fdf 2043{
1a292fa5
CW
2044 do {
2045 if (dma_map_sg(&obj->base.dev->pdev->dev,
2046 pages->sgl, pages->nents,
2047 PCI_DMA_BIDIRECTIONAL))
2048 return 0;
2049
2050 /* If the DMA remap fails, one cause can be that we have
2051 * too many objects pinned in a small remapping table,
2052 * such as swiotlb. Incrementally purge all other objects and
2053 * try again - if there are no more pages to remove from
2054 * the DMA remapper, i915_gem_shrink will return 0.
2055 */
2056 GEM_BUG_ON(obj->mm.pages == pages);
2057 } while (i915_gem_shrink(to_i915(obj->base.dev),
912d572d 2058 obj->base.size >> PAGE_SHIFT, NULL,
1a292fa5
CW
2059 I915_SHRINK_BOUND |
2060 I915_SHRINK_UNBOUND |
2061 I915_SHRINK_ACTIVE));
9da3da66 2062
03ac84f1 2063 return -ENOSPC;
7c2e6fdf
DV
2064}
2065
2c642b07 2066static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61 2067{
94ec8f61 2068 writeq(pte, addr);
94ec8f61
BW
2069}
2070
d6473f56
CW
2071static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2072 dma_addr_t addr,
75c7b0b8 2073 u64 offset,
d6473f56
CW
2074 enum i915_cache_level level,
2075 u32 unused)
2076{
7c3f86b6 2077 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2078 gen8_pte_t __iomem *pte =
7c3f86b6 2079 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2080
4fb84d99 2081 gen8_set_pte(pte, gen8_pte_encode(addr, level));
d6473f56 2082
7c3f86b6 2083 ggtt->invalidate(vm->i915);
d6473f56
CW
2084}
2085
94ec8f61 2086static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2087 struct i915_vma *vma,
75c7b0b8
CW
2088 enum i915_cache_level level,
2089 u32 unused)
94ec8f61 2090{
ce7fda2e 2091 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2092 struct sgt_iter sgt_iter;
2093 gen8_pte_t __iomem *gtt_entries;
894ccebe 2094 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
85d1225e 2095 dma_addr_t addr;
be69459a 2096
894ccebe 2097 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
4a234c5f
MA
2098 gtt_entries += vma->node.start >> PAGE_SHIFT;
2099 for_each_sgt_dma(addr, sgt_iter, vma->pages)
894ccebe 2100 gen8_set_pte(gtt_entries++, pte_encode | addr);
85d1225e 2101
894ccebe 2102 wmb();
94ec8f61 2103
94ec8f61
BW
2104 /* This next bit makes the above posting read even more important. We
2105 * want to flush the TLBs only after we're certain all the PTE updates
2106 * have finished.
2107 */
7c3f86b6 2108 ggtt->invalidate(vm->i915);
94ec8f61
BW
2109}
2110
d6473f56
CW
2111static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2112 dma_addr_t addr,
75c7b0b8 2113 u64 offset,
d6473f56
CW
2114 enum i915_cache_level level,
2115 u32 flags)
2116{
7c3f86b6 2117 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2118 gen6_pte_t __iomem *pte =
7c3f86b6 2119 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2120
4fb84d99 2121 iowrite32(vm->pte_encode(addr, level, flags), pte);
d6473f56 2122
7c3f86b6 2123 ggtt->invalidate(vm->i915);
d6473f56
CW
2124}
2125
e76e9aeb
BW
2126/*
2127 * Binds an object into the global gtt with the specified cache level. The object
2128 * will be accessible to the GPU via commands whose operands reference offsets
2129 * within the global GTT as well as accessible by the GPU through the GMADR
2130 * mapped BAR (dev_priv->mm.gtt->gtt).
2131 */
853ba5d2 2132static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2133 struct i915_vma *vma,
75c7b0b8
CW
2134 enum i915_cache_level level,
2135 u32 flags)
e76e9aeb 2136{
ce7fda2e 2137 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
b31144c0 2138 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
4a234c5f 2139 unsigned int i = vma->node.start >> PAGE_SHIFT;
b31144c0 2140 struct sgt_iter iter;
85d1225e 2141 dma_addr_t addr;
4a234c5f 2142 for_each_sgt_dma(addr, iter, vma->pages)
b31144c0
CW
2143 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2144 wmb();
0f9b91c7
BW
2145
2146 /* This next bit makes the above posting read even more important. We
2147 * want to flush the TLBs only after we're certain all the PTE updates
2148 * have finished.
2149 */
7c3f86b6 2150 ggtt->invalidate(vm->i915);
e76e9aeb
BW
2151}
2152
f7770bfd 2153static void nop_clear_range(struct i915_address_space *vm,
75c7b0b8 2154 u64 start, u64 length)
f7770bfd
CW
2155{
2156}
2157
94ec8f61 2158static void gen8_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2159 u64 start, u64 length)
94ec8f61 2160{
ce7fda2e 2161 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2162 unsigned first_entry = start >> PAGE_SHIFT;
2163 unsigned num_entries = length >> PAGE_SHIFT;
894ccebe
CW
2164 const gen8_pte_t scratch_pte =
2165 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2166 gen8_pte_t __iomem *gtt_base =
72e96d64
JL
2167 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2168 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61
BW
2169 int i;
2170
2171 if (WARN(num_entries > max_entries,
2172 "First entry = %d; Num entries = %d (max=%d)\n",
2173 first_entry, num_entries, max_entries))
2174 num_entries = max_entries;
2175
94ec8f61
BW
2176 for (i = 0; i < num_entries; i++)
2177 gen8_set_pte(&gtt_base[i], scratch_pte);
94ec8f61
BW
2178}
2179
0ef34ad6
JB
2180static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2181{
2182 struct drm_i915_private *dev_priv = vm->i915;
2183
2184 /*
2185 * Make sure the internal GAM fifo has been cleared of all GTT
2186 * writes before exiting stop_machine(). This guarantees that
2187 * any aperture accesses waiting to start in another process
2188 * cannot back up behind the GTT writes causing a hang.
2189 * The register can be any arbitrary GAM register.
2190 */
2191 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2192}
2193
2194struct insert_page {
2195 struct i915_address_space *vm;
2196 dma_addr_t addr;
2197 u64 offset;
2198 enum i915_cache_level level;
2199};
2200
2201static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2202{
2203 struct insert_page *arg = _arg;
2204
2205 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2206 bxt_vtd_ggtt_wa(arg->vm);
2207
2208 return 0;
2209}
2210
2211static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2212 dma_addr_t addr,
2213 u64 offset,
2214 enum i915_cache_level level,
2215 u32 unused)
2216{
2217 struct insert_page arg = { vm, addr, offset, level };
2218
2219 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2220}
2221
2222struct insert_entries {
2223 struct i915_address_space *vm;
4a234c5f 2224 struct i915_vma *vma;
0ef34ad6
JB
2225 enum i915_cache_level level;
2226};
2227
2228static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2229{
2230 struct insert_entries *arg = _arg;
2231
4a234c5f 2232 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
0ef34ad6
JB
2233 bxt_vtd_ggtt_wa(arg->vm);
2234
2235 return 0;
2236}
2237
2238static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
4a234c5f 2239 struct i915_vma *vma,
0ef34ad6
JB
2240 enum i915_cache_level level,
2241 u32 unused)
2242{
17369ba0 2243 struct insert_entries arg = { vm, vma, level };
0ef34ad6
JB
2244
2245 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2246}
2247
2248struct clear_range {
2249 struct i915_address_space *vm;
2250 u64 start;
2251 u64 length;
2252};
2253
2254static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2255{
2256 struct clear_range *arg = _arg;
2257
2258 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2259 bxt_vtd_ggtt_wa(arg->vm);
2260
2261 return 0;
2262}
2263
2264static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2265 u64 start,
2266 u64 length)
2267{
2268 struct clear_range arg = { vm, start, length };
2269
2270 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2271}
2272
853ba5d2 2273static void gen6_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2274 u64 start, u64 length)
7faf1ab2 2275{
ce7fda2e 2276 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2277 unsigned first_entry = start >> PAGE_SHIFT;
2278 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2279 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2280 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2281 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2
DV
2282 int i;
2283
2284 if (WARN(num_entries > max_entries,
2285 "First entry = %d; Num entries = %d (max=%d)\n",
2286 first_entry, num_entries, max_entries))
2287 num_entries = max_entries;
2288
8bcdd0f7 2289 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 2290 I915_CACHE_LLC, 0);
828c7908 2291
7faf1ab2
DV
2292 for (i = 0; i < num_entries; i++)
2293 iowrite32(scratch_pte, &gtt_base[i]);
7faf1ab2
DV
2294}
2295
d6473f56
CW
2296static void i915_ggtt_insert_page(struct i915_address_space *vm,
2297 dma_addr_t addr,
75c7b0b8 2298 u64 offset,
d6473f56
CW
2299 enum i915_cache_level cache_level,
2300 u32 unused)
2301{
d6473f56
CW
2302 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2303 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
d6473f56
CW
2304
2305 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
d6473f56
CW
2306}
2307
d369d2d9 2308static void i915_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2309 struct i915_vma *vma,
75c7b0b8
CW
2310 enum i915_cache_level cache_level,
2311 u32 unused)
7faf1ab2
DV
2312{
2313 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2314 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2315
4a234c5f
MA
2316 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2317 flags);
7faf1ab2
DV
2318}
2319
853ba5d2 2320static void i915_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2321 u64 start, u64 length)
7faf1ab2 2322{
2eedfc7d 2323 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
7faf1ab2
DV
2324}
2325
70b9f6f8
DV
2326static int ggtt_bind_vma(struct i915_vma *vma,
2327 enum i915_cache_level cache_level,
2328 u32 flags)
0a878716 2329{
49d73912 2330 struct drm_i915_private *i915 = vma->vm->i915;
0a878716 2331 struct drm_i915_gem_object *obj = vma->obj;
ba7a5741 2332 u32 pte_flags;
0a878716 2333
ba7a5741
CW
2334 if (unlikely(!vma->pages)) {
2335 int ret = i915_get_ggtt_vma_pages(vma);
2336 if (ret)
2337 return ret;
2338 }
0a878716
DV
2339
2340 /* Currently applicable only to VLV */
ba7a5741 2341 pte_flags = 0;
0a878716
DV
2342 if (obj->gt_ro)
2343 pte_flags |= PTE_READ_ONLY;
2344
9c870d03 2345 intel_runtime_pm_get(i915);
4a234c5f 2346 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2347 intel_runtime_pm_put(i915);
0a878716
DV
2348
2349 /*
2350 * Without aliasing PPGTT there's no difference between
2351 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2352 * upgrade to both bound if we bind either to avoid double-binding.
2353 */
3272db53 2354 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0a878716
DV
2355
2356 return 0;
2357}
2358
cbc4e9e6
CW
2359static void ggtt_unbind_vma(struct i915_vma *vma)
2360{
2361 struct drm_i915_private *i915 = vma->vm->i915;
2362
2363 intel_runtime_pm_get(i915);
2364 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2365 intel_runtime_pm_put(i915);
2366}
2367
0a878716
DV
2368static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2369 enum i915_cache_level cache_level,
2370 u32 flags)
d5bd1449 2371{
49d73912 2372 struct drm_i915_private *i915 = vma->vm->i915;
321d178e 2373 u32 pte_flags;
ff685975 2374 int ret;
70b9f6f8 2375
ba7a5741 2376 if (unlikely(!vma->pages)) {
ff685975 2377 ret = i915_get_ggtt_vma_pages(vma);
ba7a5741
CW
2378 if (ret)
2379 return ret;
2380 }
7faf1ab2 2381
24f3a8cf 2382 /* Currently applicable only to VLV */
321d178e
CW
2383 pte_flags = 0;
2384 if (vma->obj->gt_ro)
f329f5f6 2385 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2386
ff685975
CW
2387 if (flags & I915_VMA_LOCAL_BIND) {
2388 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2389
1f23475c
MA
2390 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2391 appgtt->base.allocate_va_range) {
ff685975
CW
2392 ret = appgtt->base.allocate_va_range(&appgtt->base,
2393 vma->node.start,
d567232c 2394 vma->size);
ff685975 2395 if (ret)
2f7399af 2396 goto err_pages;
ff685975
CW
2397 }
2398
4a234c5f
MA
2399 appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2400 pte_flags);
ff685975
CW
2401 }
2402
3272db53 2403 if (flags & I915_VMA_GLOBAL_BIND) {
9c870d03 2404 intel_runtime_pm_get(i915);
4a234c5f 2405 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2406 intel_runtime_pm_put(i915);
6f65e29a 2407 }
d5bd1449 2408
70b9f6f8 2409 return 0;
2f7399af
CW
2410
2411err_pages:
2412 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2413 if (vma->pages != vma->obj->mm.pages) {
2414 GEM_BUG_ON(!vma->pages);
2415 sg_free_table(vma->pages);
2416 kfree(vma->pages);
2417 }
2418 vma->pages = NULL;
2419 }
2420 return ret;
d5bd1449
CW
2421}
2422
cbc4e9e6 2423static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
74163907 2424{
49d73912 2425 struct drm_i915_private *i915 = vma->vm->i915;
6f65e29a 2426
9c870d03
CW
2427 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2428 intel_runtime_pm_get(i915);
cbc4e9e6 2429 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
9c870d03
CW
2430 intel_runtime_pm_put(i915);
2431 }
06615ee5 2432
cbc4e9e6
CW
2433 if (vma->flags & I915_VMA_LOCAL_BIND) {
2434 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2435
2436 vm->clear_range(vm, vma->node.start, vma->size);
2437 }
74163907
DV
2438}
2439
03ac84f1
CW
2440void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2441 struct sg_table *pages)
7c2e6fdf 2442{
52a05c30
DW
2443 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2444 struct device *kdev = &dev_priv->drm.pdev->dev;
307dc25b 2445 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287 2446
307dc25b 2447 if (unlikely(ggtt->do_idle_maps)) {
228ec87c 2448 if (i915_gem_wait_for_idle(dev_priv, 0)) {
307dc25b
CW
2449 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2450 /* Wait a bit, in hopes it avoids the hang */
2451 udelay(10);
2452 }
2453 }
5c042287 2454
03ac84f1 2455 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
7c2e6fdf 2456}
644ec02b 2457
45b186f1 2458static void i915_gtt_color_adjust(const struct drm_mm_node *node,
42d6ab48 2459 unsigned long color,
440fd528
TR
2460 u64 *start,
2461 u64 *end)
42d6ab48 2462{
a6508ded 2463 if (node->allocated && node->color != color)
f51455d4 2464 *start += I915_GTT_PAGE_SIZE;
42d6ab48 2465
a6508ded
CW
2466 /* Also leave a space between the unallocated reserved node after the
2467 * GTT and any objects within the GTT, i.e. we use the color adjustment
2468 * to insert a guard page to prevent prefetches crossing over the
2469 * GTT boundary.
2470 */
b44f97fd 2471 node = list_next_entry(node, node_list);
a6508ded 2472 if (node->color != color)
f51455d4 2473 *end -= I915_GTT_PAGE_SIZE;
42d6ab48 2474}
fbe5d36e 2475
6cde9a02
CW
2476int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2477{
2478 struct i915_ggtt *ggtt = &i915->ggtt;
2479 struct i915_hw_ppgtt *ppgtt;
2480 int err;
2481
57202f47 2482 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
1188bc66
CW
2483 if (IS_ERR(ppgtt))
2484 return PTR_ERR(ppgtt);
6cde9a02 2485
e565ceb0
CW
2486 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2487 err = -ENODEV;
2488 goto err_ppgtt;
2489 }
2490
6cde9a02 2491 if (ppgtt->base.allocate_va_range) {
e565ceb0
CW
2492 /* Note we only pre-allocate as far as the end of the global
2493 * GTT. On 48b / 4-level page-tables, the difference is very,
2494 * very significant! We have to preallocate as GVT/vgpu does
2495 * not like the page directory disappearing.
2496 */
6cde9a02 2497 err = ppgtt->base.allocate_va_range(&ppgtt->base,
e565ceb0 2498 0, ggtt->base.total);
6cde9a02 2499 if (err)
1188bc66 2500 goto err_ppgtt;
6cde9a02
CW
2501 }
2502
6cde9a02 2503 i915->mm.aliasing_ppgtt = ppgtt;
cbc4e9e6 2504
6cde9a02
CW
2505 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2506 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2507
cbc4e9e6
CW
2508 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2509 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2510
6cde9a02
CW
2511 return 0;
2512
6cde9a02 2513err_ppgtt:
1188bc66 2514 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2515 return err;
2516}
2517
2518void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2519{
2520 struct i915_ggtt *ggtt = &i915->ggtt;
2521 struct i915_hw_ppgtt *ppgtt;
2522
2523 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2524 if (!ppgtt)
2525 return;
2526
1188bc66 2527 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2528
2529 ggtt->base.bind_vma = ggtt_bind_vma;
cbc4e9e6 2530 ggtt->base.unbind_vma = ggtt_unbind_vma;
6cde9a02
CW
2531}
2532
f6b9d5ca 2533int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
644ec02b 2534{
e78891ca
BW
2535 /* Let GEM Manage all of the aperture.
2536 *
2537 * However, leave one page at the end still bound to the scratch page.
2538 * There are a number of places where the hardware apparently prefetches
2539 * past the end of the object, and we've seen multiple hangs with the
2540 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2541 * aperture. One page should be enough to keep any prefetching inside
2542 * of the aperture.
2543 */
72e96d64 2544 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452 2545 unsigned long hole_start, hole_end;
f6b9d5ca 2546 struct drm_mm_node *entry;
fa76da34 2547 int ret;
644ec02b 2548
b02d22a3
ZW
2549 ret = intel_vgt_balloon(dev_priv);
2550 if (ret)
2551 return ret;
5dda8fa3 2552
95374d75 2553 /* Reserve a mappable slot for our lockless error capture */
4e64e553
CW
2554 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2555 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2556 0, ggtt->mappable_end,
2557 DRM_MM_INSERT_LOW);
95374d75
CW
2558 if (ret)
2559 return ret;
2560
ed2f3452 2561 /* Clear any non-preallocated blocks */
72e96d64 2562 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2563 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2564 hole_start, hole_end);
72e96d64 2565 ggtt->base.clear_range(&ggtt->base, hole_start,
4fb84d99 2566 hole_end - hole_start);
ed2f3452
CW
2567 }
2568
2569 /* And finally clear the reserved guard page */
f6b9d5ca 2570 ggtt->base.clear_range(&ggtt->base,
4fb84d99 2571 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
6c5566a8 2572
97d6d7ab 2573 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
6cde9a02 2574 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
95374d75 2575 if (ret)
6cde9a02 2576 goto err;
fa76da34
DV
2577 }
2578
6c5566a8 2579 return 0;
95374d75 2580
95374d75
CW
2581err:
2582 drm_mm_remove_node(&ggtt->error_capture);
2583 return ret;
e76e9aeb
BW
2584}
2585
d85489d3
JL
2586/**
2587 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
97d6d7ab 2588 * @dev_priv: i915 device
d85489d3 2589 */
97d6d7ab 2590void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
90d0a0e8 2591{
72e96d64 2592 struct i915_ggtt *ggtt = &dev_priv->ggtt;
94d4a2a9
CW
2593 struct i915_vma *vma, *vn;
2594
2595 ggtt->base.closed = true;
2596
2597 mutex_lock(&dev_priv->drm.struct_mutex);
2598 WARN_ON(!list_empty(&ggtt->base.active_list));
2599 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2600 WARN_ON(i915_vma_unbind(vma));
2601 mutex_unlock(&dev_priv->drm.struct_mutex);
90d0a0e8 2602
97d6d7ab 2603 i915_gem_cleanup_stolen(&dev_priv->drm);
a4eba47b 2604
1188bc66
CW
2605 mutex_lock(&dev_priv->drm.struct_mutex);
2606 i915_gem_fini_aliasing_ppgtt(dev_priv);
2607
95374d75
CW
2608 if (drm_mm_node_allocated(&ggtt->error_capture))
2609 drm_mm_remove_node(&ggtt->error_capture);
2610
72e96d64 2611 if (drm_mm_initialized(&ggtt->base.mm)) {
b02d22a3 2612 intel_vgt_deballoon(dev_priv);
ed9724dd 2613 i915_address_space_fini(&ggtt->base);
90d0a0e8
DV
2614 }
2615
72e96d64 2616 ggtt->base.cleanup(&ggtt->base);
1188bc66 2617 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca
CW
2618
2619 arch_phys_wc_del(ggtt->mtrr);
f7bbe788 2620 io_mapping_fini(&ggtt->mappable);
90d0a0e8 2621}
70e32544 2622
2c642b07 2623static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2624{
2625 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2626 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2627 return snb_gmch_ctl << 20;
2628}
2629
2c642b07 2630static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2631{
2632 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2633 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2634 if (bdw_gmch_ctl)
2635 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2636
2637#ifdef CONFIG_X86_32
2638 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2639 if (bdw_gmch_ctl > 4)
2640 bdw_gmch_ctl = 4;
2641#endif
2642
9459d252
BW
2643 return bdw_gmch_ctl << 20;
2644}
2645
2c642b07 2646static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2647{
2648 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2649 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2650
2651 if (gmch_ctrl)
2652 return 1 << (20 + gmch_ctrl);
2653
2654 return 0;
2655}
2656
2c642b07 2657static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2658{
2659 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2660 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
a92d1a91 2661 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
e76e9aeb
BW
2662}
2663
2c642b07 2664static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2665{
2666 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2667 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
a92d1a91 2668 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
9459d252
BW
2669}
2670
d7f25f23
DL
2671static size_t chv_get_stolen_size(u16 gmch_ctrl)
2672{
2673 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2674 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2675
2676 /*
2677 * 0x0 to 0x10: 32MB increments starting at 0MB
2678 * 0x11 to 0x16: 4MB increments starting at 8MB
2679 * 0x17 to 0x1d: 4MB increments start at 36MB
2680 */
2681 if (gmch_ctrl < 0x11)
a92d1a91 2682 return (size_t)gmch_ctrl << 25;
d7f25f23 2683 else if (gmch_ctrl < 0x17)
a92d1a91 2684 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
d7f25f23 2685 else
a92d1a91 2686 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
d7f25f23
DL
2687}
2688
66375014
DL
2689static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2690{
2691 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2692 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2693
2694 if (gen9_gmch_ctl < 0xf0)
a92d1a91 2695 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
66375014
DL
2696 else
2697 /* 4MB increments starting at 0xf0 for 4MB */
a92d1a91 2698 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
66375014
DL
2699}
2700
34c998b4 2701static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
63340133 2702{
49d73912
CW
2703 struct drm_i915_private *dev_priv = ggtt->base.i915;
2704 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2705 phys_addr_t phys_addr;
8bcdd0f7 2706 int ret;
63340133
BW
2707
2708 /* For Modern GENs the PTEs and register space are split in the BAR */
34c998b4 2709 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
63340133 2710
2a073f89
ID
2711 /*
2712 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2713 * dropped. For WC mappings in general we have 64 byte burst writes
2714 * when the WC buffer is flushed, so we can't use it, but have to
2715 * resort to an uncached mapping. The WC issue is easily caught by the
2716 * readback check when writing GTT PTE entries.
2717 */
cc3f90f0 2718 if (IS_GEN9_LP(dev_priv))
34c998b4 2719 ggtt->gsm = ioremap_nocache(phys_addr, size);
2a073f89 2720 else
34c998b4 2721 ggtt->gsm = ioremap_wc(phys_addr, size);
72e96d64 2722 if (!ggtt->gsm) {
34c998b4 2723 DRM_ERROR("Failed to map the ggtt page table\n");
63340133
BW
2724 return -ENOMEM;
2725 }
2726
8448661d 2727 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
8bcdd0f7 2728 if (ret) {
63340133
BW
2729 DRM_ERROR("Scratch setup failed\n");
2730 /* iounmap will also get called at remove, but meh */
72e96d64 2731 iounmap(ggtt->gsm);
8bcdd0f7 2732 return ret;
63340133
BW
2733 }
2734
4ad2af1e 2735 return 0;
63340133
BW
2736}
2737
4e34935f
RV
2738static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
2739{
2740 /* XXX: spec is unclear if this is still needed for CNL+ */
2741 if (!USES_PPGTT(dev_priv)) {
2742 I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
2743 return;
2744 }
2745
2746 I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
2747 I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
2748 I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
2749 I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
a338d5f8
ZW
2750 I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
2751 I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
2752 I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
2753 I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
4e34935f
RV
2754}
2755
fbe5d36e
BW
2756/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2757 * bits. When using advanced contexts each context stores its own PAT, but
2758 * writing this data shouldn't be harmful even in those cases. */
ee0ce478 2759static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
fbe5d36e 2760{
75c7b0b8 2761 u64 pat;
fbe5d36e
BW
2762
2763 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2764 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2765 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2766 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2767 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2768 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2769 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2770 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2771
2d1fe073 2772 if (!USES_PPGTT(dev_priv))
d6a8b72e
RV
2773 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2774 * so RTL will always use the value corresponding to
2775 * pat_sel = 000".
2776 * So let's disable cache for GGTT to avoid screen corruptions.
2777 * MOCS still can be used though.
2778 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2779 * before this patch, i.e. the same uncached + snooping access
2780 * like on gen6/7 seems to be in effect.
2781 * - So this just fixes blitter/render access. Again it looks
2782 * like it's not just uncached access, but uncached + snooping.
2783 * So we can still hold onto all our assumptions wrt cpu
2784 * clflushing on LLC machines.
2785 */
2786 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2787
fbe5d36e
BW
2788 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2789 * write would work. */
7e435ad2
VS
2790 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2791 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
fbe5d36e
BW
2792}
2793
ee0ce478
VS
2794static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2795{
75c7b0b8 2796 u64 pat;
ee0ce478
VS
2797
2798 /*
2799 * Map WB on BDW to snooped on CHV.
2800 *
2801 * Only the snoop bit has meaning for CHV, the rest is
2802 * ignored.
2803 *
cf3d262e
VS
2804 * The hardware will never snoop for certain types of accesses:
2805 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2806 * - PPGTT page tables
2807 * - some other special cycles
2808 *
2809 * As with BDW, we also need to consider the following for GT accesses:
2810 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2811 * so RTL will always use the value corresponding to
2812 * pat_sel = 000".
2813 * Which means we must set the snoop bit in PAT entry 0
2814 * in order to keep the global status page working.
ee0ce478
VS
2815 */
2816 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2817 GEN8_PPAT(1, 0) |
2818 GEN8_PPAT(2, 0) |
2819 GEN8_PPAT(3, 0) |
2820 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2821 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2822 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2823 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2824
7e435ad2
VS
2825 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2826 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
ee0ce478
VS
2827}
2828
34c998b4
CW
2829static void gen6_gmch_remove(struct i915_address_space *vm)
2830{
2831 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2832
2833 iounmap(ggtt->gsm);
8448661d 2834 cleanup_scratch_page(vm);
34c998b4
CW
2835}
2836
d507d735 2837static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 2838{
49d73912 2839 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 2840 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2841 unsigned int size;
63340133 2842 u16 snb_gmch_ctl;
4519290a 2843 int err;
63340133
BW
2844
2845 /* TODO: We're not aware of mappable constraints on gen8 yet */
97d6d7ab
CW
2846 ggtt->mappable_base = pci_resource_start(pdev, 2);
2847 ggtt->mappable_end = pci_resource_len(pdev, 2);
63340133 2848
4519290a
ID
2849 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2850 if (!err)
2851 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2852 if (err)
2853 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
63340133 2854
97d6d7ab 2855 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
63340133 2856
97d6d7ab 2857 if (INTEL_GEN(dev_priv) >= 9) {
d507d735 2858 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
34c998b4 2859 size = gen8_get_total_gtt_size(snb_gmch_ctl);
97d6d7ab 2860 } else if (IS_CHERRYVIEW(dev_priv)) {
d507d735 2861 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
34c998b4 2862 size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 2863 } else {
d507d735 2864 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
34c998b4 2865 size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 2866 }
63340133 2867
34c998b4 2868 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
63340133 2869
4e34935f
RV
2870 if (INTEL_GEN(dev_priv) >= 10)
2871 cnl_setup_private_ppat(dev_priv);
2872 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
ee0ce478
VS
2873 chv_setup_private_ppat(dev_priv);
2874 else
2875 bdw_setup_private_ppat(dev_priv);
fbe5d36e 2876
34c998b4 2877 ggtt->base.cleanup = gen6_gmch_remove;
d507d735
JL
2878 ggtt->base.bind_vma = ggtt_bind_vma;
2879 ggtt->base.unbind_vma = ggtt_unbind_vma;
d6473f56 2880 ggtt->base.insert_page = gen8_ggtt_insert_page;
f7770bfd 2881 ggtt->base.clear_range = nop_clear_range;
48f112fe 2882 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
f7770bfd
CW
2883 ggtt->base.clear_range = gen8_ggtt_clear_range;
2884
2885 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
f7770bfd 2886
0ef34ad6
JB
2887 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2888 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2889 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2890 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2891 if (ggtt->base.clear_range != nop_clear_range)
2892 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2893 }
2894
7c3f86b6
CW
2895 ggtt->invalidate = gen6_ggtt_invalidate;
2896
34c998b4 2897 return ggtt_probe_common(ggtt, size);
63340133
BW
2898}
2899
d507d735 2900static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 2901{
49d73912 2902 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 2903 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2904 unsigned int size;
e76e9aeb 2905 u16 snb_gmch_ctl;
4519290a 2906 int err;
e76e9aeb 2907
97d6d7ab
CW
2908 ggtt->mappable_base = pci_resource_start(pdev, 2);
2909 ggtt->mappable_end = pci_resource_len(pdev, 2);
41907ddc 2910
baa09f5f
BW
2911 /* 64/512MB is the current min/max we actually know of, but this is just
2912 * a coarse sanity check.
e76e9aeb 2913 */
34c998b4 2914 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
d507d735 2915 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 2916 return -ENXIO;
e76e9aeb
BW
2917 }
2918
4519290a
ID
2919 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2920 if (!err)
2921 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2922 if (err)
2923 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
97d6d7ab 2924 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 2925
d507d735 2926 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
e76e9aeb 2927
34c998b4
CW
2928 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2929 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 2930
d507d735 2931 ggtt->base.clear_range = gen6_ggtt_clear_range;
d6473f56 2932 ggtt->base.insert_page = gen6_ggtt_insert_page;
d507d735
JL
2933 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2934 ggtt->base.bind_vma = ggtt_bind_vma;
2935 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4
CW
2936 ggtt->base.cleanup = gen6_gmch_remove;
2937
7c3f86b6
CW
2938 ggtt->invalidate = gen6_ggtt_invalidate;
2939
34c998b4
CW
2940 if (HAS_EDRAM(dev_priv))
2941 ggtt->base.pte_encode = iris_pte_encode;
2942 else if (IS_HASWELL(dev_priv))
2943 ggtt->base.pte_encode = hsw_pte_encode;
2944 else if (IS_VALLEYVIEW(dev_priv))
2945 ggtt->base.pte_encode = byt_pte_encode;
2946 else if (INTEL_GEN(dev_priv) >= 7)
2947 ggtt->base.pte_encode = ivb_pte_encode;
2948 else
2949 ggtt->base.pte_encode = snb_pte_encode;
7faf1ab2 2950
34c998b4 2951 return ggtt_probe_common(ggtt, size);
e76e9aeb
BW
2952}
2953
34c998b4 2954static void i915_gmch_remove(struct i915_address_space *vm)
e76e9aeb 2955{
34c998b4 2956 intel_gmch_remove();
644ec02b 2957}
baa09f5f 2958
d507d735 2959static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 2960{
49d73912 2961 struct drm_i915_private *dev_priv = ggtt->base.i915;
baa09f5f
BW
2962 int ret;
2963
91c8a326 2964 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
baa09f5f
BW
2965 if (!ret) {
2966 DRM_ERROR("failed to set up gmch\n");
2967 return -EIO;
2968 }
2969
edd1f2fe
CW
2970 intel_gtt_get(&ggtt->base.total,
2971 &ggtt->stolen_size,
2972 &ggtt->mappable_base,
2973 &ggtt->mappable_end);
baa09f5f 2974
97d6d7ab 2975 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
d6473f56 2976 ggtt->base.insert_page = i915_ggtt_insert_page;
d507d735
JL
2977 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2978 ggtt->base.clear_range = i915_ggtt_clear_range;
2979 ggtt->base.bind_vma = ggtt_bind_vma;
2980 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4 2981 ggtt->base.cleanup = i915_gmch_remove;
baa09f5f 2982
7c3f86b6
CW
2983 ggtt->invalidate = gmch_ggtt_invalidate;
2984
d507d735 2985 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
2986 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2987
baa09f5f
BW
2988 return 0;
2989}
2990
d85489d3 2991/**
0088e522 2992 * i915_ggtt_probe_hw - Probe GGTT hardware location
97d6d7ab 2993 * @dev_priv: i915 device
d85489d3 2994 */
97d6d7ab 2995int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
baa09f5f 2996{
62106b4f 2997 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
2998 int ret;
2999
49d73912 3000 ggtt->base.i915 = dev_priv;
8448661d 3001 ggtt->base.dma = &dev_priv->drm.pdev->dev;
c114f76a 3002
34c998b4
CW
3003 if (INTEL_GEN(dev_priv) <= 5)
3004 ret = i915_gmch_probe(ggtt);
3005 else if (INTEL_GEN(dev_priv) < 8)
3006 ret = gen6_gmch_probe(ggtt);
3007 else
3008 ret = gen8_gmch_probe(ggtt);
a54c0c27 3009 if (ret)
baa09f5f 3010 return ret;
baa09f5f 3011
db9309a5
CW
3012 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3013 * This is easier than doing range restriction on the fly, as we
3014 * currently don't have any bits spare to pass in this upper
3015 * restriction!
3016 */
3017 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
3018 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3019 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3020 }
3021
c890e2d5
CW
3022 if ((ggtt->base.total - 1) >> 32) {
3023 DRM_ERROR("We never expected a Global GTT with more than 32bits"
f6b9d5ca 3024 " of address space! Found %lldM!\n",
c890e2d5
CW
3025 ggtt->base.total >> 20);
3026 ggtt->base.total = 1ULL << 32;
3027 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3028 }
3029
f6b9d5ca
CW
3030 if (ggtt->mappable_end > ggtt->base.total) {
3031 DRM_ERROR("mappable aperture extends past end of GGTT,"
3032 " aperture=%llx, total=%llx\n",
3033 ggtt->mappable_end, ggtt->base.total);
3034 ggtt->mappable_end = ggtt->base.total;
3035 }
3036
baa09f5f 3037 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3038 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
3039 ggtt->base.total >> 20);
3040 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
edd1f2fe 3041 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
80debff8 3042 if (intel_vtd_active())
5db6c735 3043 DRM_INFO("VT-d active for gfx access\n");
baa09f5f
BW
3044
3045 return 0;
0088e522
CW
3046}
3047
3048/**
3049 * i915_ggtt_init_hw - Initialize GGTT hardware
97d6d7ab 3050 * @dev_priv: i915 device
0088e522 3051 */
97d6d7ab 3052int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
0088e522 3053{
0088e522
CW
3054 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3055 int ret;
3056
f6b9d5ca
CW
3057 INIT_LIST_HEAD(&dev_priv->vm_list);
3058
a6508ded
CW
3059 /* Note that we use page colouring to enforce a guard page at the
3060 * end of the address space. This is required as the CS may prefetch
3061 * beyond the end of the batch buffer, across the page boundary,
3062 * and beyond the end of the GTT if we do not provide a guard.
f6b9d5ca 3063 */
80b204bc 3064 mutex_lock(&dev_priv->drm.struct_mutex);
80b204bc 3065 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
a6508ded 3066 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
f6b9d5ca 3067 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
80b204bc 3068 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca 3069
f7bbe788
CW
3070 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3071 dev_priv->ggtt.mappable_base,
3072 dev_priv->ggtt.mappable_end)) {
f6b9d5ca
CW
3073 ret = -EIO;
3074 goto out_gtt_cleanup;
3075 }
3076
3077 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3078
0088e522
CW
3079 /*
3080 * Initialise stolen early so that we may reserve preallocated
3081 * objects for the BIOS to KMS transition.
3082 */
7ace3d30 3083 ret = i915_gem_init_stolen(dev_priv);
0088e522
CW
3084 if (ret)
3085 goto out_gtt_cleanup;
3086
3087 return 0;
a4eba47b
ID
3088
3089out_gtt_cleanup:
72e96d64 3090 ggtt->base.cleanup(&ggtt->base);
a4eba47b 3091 return ret;
baa09f5f 3092}
6f65e29a 3093
97d6d7ab 3094int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
ac840ae5 3095{
97d6d7ab 3096 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
ac840ae5
VS
3097 return -EIO;
3098
3099 return 0;
3100}
3101
7c3f86b6
CW
3102void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3103{
04f7b24e
CW
3104 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3105
7c3f86b6
CW
3106 i915->ggtt.invalidate = guc_ggtt_invalidate;
3107}
3108
3109void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3110{
04f7b24e
CW
3111 /* We should only be called after i915_ggtt_enable_guc() */
3112 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3113
3114 i915->ggtt.invalidate = gen6_ggtt_invalidate;
7c3f86b6
CW
3115}
3116
275a991c 3117void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
fa42331b 3118{
72e96d64 3119 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fbb30a5c 3120 struct drm_i915_gem_object *obj, *on;
fa42331b 3121
dc97997a 3122 i915_check_and_clear_faults(dev_priv);
fa42331b
DV
3123
3124 /* First fill our portion of the GTT with scratch pages */
381b943b 3125 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
fa42331b 3126
fbb30a5c
CW
3127 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3128
3129 /* clflush objects bound into the GGTT and rebind them. */
3130 list_for_each_entry_safe(obj, on,
56cea323 3131 &dev_priv->mm.bound_list, global_link) {
fbb30a5c
CW
3132 bool ggtt_bound = false;
3133 struct i915_vma *vma;
3134
1c7f4bca 3135 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3136 if (vma->vm != &ggtt->base)
2c3d9984 3137 continue;
fa42331b 3138
fbb30a5c
CW
3139 if (!i915_vma_unbind(vma))
3140 continue;
3141
2c3d9984
TU
3142 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3143 PIN_UPDATE));
fbb30a5c 3144 ggtt_bound = true;
2c3d9984
TU
3145 }
3146
fbb30a5c 3147 if (ggtt_bound)
975f7ff4 3148 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
2c3d9984 3149 }
fa42331b 3150
fbb30a5c
CW
3151 ggtt->base.closed = false;
3152
275a991c 3153 if (INTEL_GEN(dev_priv) >= 8) {
4e34935f
RV
3154 if (INTEL_GEN(dev_priv) >= 10)
3155 cnl_setup_private_ppat(dev_priv);
3156 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
fa42331b
DV
3157 chv_setup_private_ppat(dev_priv);
3158 else
3159 bdw_setup_private_ppat(dev_priv);
3160
3161 return;
3162 }
3163
275a991c 3164 if (USES_PPGTT(dev_priv)) {
72e96d64
JL
3165 struct i915_address_space *vm;
3166
fa42331b 3167 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
e5716f55 3168 struct i915_hw_ppgtt *ppgtt;
fa42331b 3169
2bfa996e 3170 if (i915_is_ggtt(vm))
fa42331b 3171 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3172 else
3173 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b 3174
16a011c8 3175 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
fa42331b
DV
3176 }
3177 }
3178
7c3f86b6 3179 i915_ggtt_invalidate(dev_priv);
fa42331b
DV
3180}
3181
804beb4b 3182static struct scatterlist *
2d7f3bdb 3183rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3184 unsigned int width, unsigned int height,
87130255 3185 unsigned int stride,
804beb4b 3186 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3187{
3188 unsigned int column, row;
3189 unsigned int src_idx;
50470bb0 3190
50470bb0 3191 for (column = 0; column < width; column++) {
87130255 3192 src_idx = stride * (height - 1) + column;
50470bb0
TU
3193 for (row = 0; row < height; row++) {
3194 st->nents++;
3195 /* We don't need the pages, but need to initialize
3196 * the entries so the sg list can be happily traversed.
3197 * The only thing we need are DMA addresses.
3198 */
3199 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3200 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3201 sg_dma_len(sg) = PAGE_SIZE;
3202 sg = sg_next(sg);
87130255 3203 src_idx -= stride;
50470bb0
TU
3204 }
3205 }
804beb4b
TU
3206
3207 return sg;
50470bb0
TU
3208}
3209
ba7a5741
CW
3210static noinline struct sg_table *
3211intel_rotate_pages(struct intel_rotation_info *rot_info,
3212 struct drm_i915_gem_object *obj)
50470bb0 3213{
75c7b0b8 3214 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
6687c906 3215 unsigned int size = intel_rotation_info_size(rot_info);
85d1225e
DG
3216 struct sgt_iter sgt_iter;
3217 dma_addr_t dma_addr;
50470bb0
TU
3218 unsigned long i;
3219 dma_addr_t *page_addr_list;
3220 struct sg_table *st;
89e3e142 3221 struct scatterlist *sg;
1d00dad5 3222 int ret = -ENOMEM;
50470bb0 3223
50470bb0 3224 /* Allocate a temporary list of source pages for random access. */
2098105e 3225 page_addr_list = kvmalloc_array(n_pages,
f2a85e19 3226 sizeof(dma_addr_t),
0ee931c4 3227 GFP_KERNEL);
50470bb0
TU
3228 if (!page_addr_list)
3229 return ERR_PTR(ret);
3230
3231 /* Allocate target SG list. */
3232 st = kmalloc(sizeof(*st), GFP_KERNEL);
3233 if (!st)
3234 goto err_st_alloc;
3235
6687c906 3236 ret = sg_alloc_table(st, size, GFP_KERNEL);
50470bb0
TU
3237 if (ret)
3238 goto err_sg_alloc;
3239
3240 /* Populate source page list from the object. */
3241 i = 0;
a4f5ea64 3242 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
85d1225e 3243 page_addr_list[i++] = dma_addr;
50470bb0 3244
85d1225e 3245 GEM_BUG_ON(i != n_pages);
11f20322
VS
3246 st->nents = 0;
3247 sg = st->sgl;
3248
6687c906
VS
3249 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3250 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3251 rot_info->plane[i].width, rot_info->plane[i].height,
3252 rot_info->plane[i].stride, st, sg);
89e3e142
TU
3253 }
3254
6687c906
VS
3255 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3256 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
50470bb0 3257
2098105e 3258 kvfree(page_addr_list);
50470bb0
TU
3259
3260 return st;
3261
3262err_sg_alloc:
3263 kfree(st);
3264err_st_alloc:
2098105e 3265 kvfree(page_addr_list);
50470bb0 3266
6687c906
VS
3267 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3268 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3269
50470bb0
TU
3270 return ERR_PTR(ret);
3271}
ec7adb6e 3272
ba7a5741 3273static noinline struct sg_table *
8bd7ef16
JL
3274intel_partial_pages(const struct i915_ggtt_view *view,
3275 struct drm_i915_gem_object *obj)
3276{
3277 struct sg_table *st;
d2a84a76 3278 struct scatterlist *sg, *iter;
8bab1193 3279 unsigned int count = view->partial.size;
d2a84a76 3280 unsigned int offset;
8bd7ef16
JL
3281 int ret = -ENOMEM;
3282
3283 st = kmalloc(sizeof(*st), GFP_KERNEL);
3284 if (!st)
3285 goto err_st_alloc;
3286
d2a84a76 3287 ret = sg_alloc_table(st, count, GFP_KERNEL);
8bd7ef16
JL
3288 if (ret)
3289 goto err_sg_alloc;
3290
8bab1193 3291 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
d2a84a76
CW
3292 GEM_BUG_ON(!iter);
3293
8bd7ef16
JL
3294 sg = st->sgl;
3295 st->nents = 0;
d2a84a76
CW
3296 do {
3297 unsigned int len;
8bd7ef16 3298
d2a84a76
CW
3299 len = min(iter->length - (offset << PAGE_SHIFT),
3300 count << PAGE_SHIFT);
3301 sg_set_page(sg, NULL, len, 0);
3302 sg_dma_address(sg) =
3303 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3304 sg_dma_len(sg) = len;
8bd7ef16 3305
8bd7ef16 3306 st->nents++;
d2a84a76
CW
3307 count -= len >> PAGE_SHIFT;
3308 if (count == 0) {
3309 sg_mark_end(sg);
3310 return st;
3311 }
8bd7ef16 3312
d2a84a76
CW
3313 sg = __sg_next(sg);
3314 iter = __sg_next(iter);
3315 offset = 0;
3316 } while (1);
8bd7ef16
JL
3317
3318err_sg_alloc:
3319 kfree(st);
3320err_st_alloc:
3321 return ERR_PTR(ret);
3322}
3323
70b9f6f8 3324static int
50470bb0 3325i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3326{
ba7a5741 3327 int ret;
50470bb0 3328
2c3a3f44
CW
3329 /* The vma->pages are only valid within the lifespan of the borrowed
3330 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3331 * must be the vma->pages. A simple rule is that vma->pages must only
3332 * be accessed when the obj->mm.pages are pinned.
3333 */
3334 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3335
ba7a5741
CW
3336 switch (vma->ggtt_view.type) {
3337 case I915_GGTT_VIEW_NORMAL:
3338 vma->pages = vma->obj->mm.pages;
fe14d5f4
TU
3339 return 0;
3340
ba7a5741 3341 case I915_GGTT_VIEW_ROTATED:
247177dd 3342 vma->pages =
ba7a5741
CW
3343 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3344 break;
3345
3346 case I915_GGTT_VIEW_PARTIAL:
247177dd 3347 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
ba7a5741
CW
3348 break;
3349
3350 default:
fe14d5f4
TU
3351 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3352 vma->ggtt_view.type);
ba7a5741
CW
3353 return -EINVAL;
3354 }
fe14d5f4 3355
ba7a5741
CW
3356 ret = 0;
3357 if (unlikely(IS_ERR(vma->pages))) {
247177dd
CW
3358 ret = PTR_ERR(vma->pages);
3359 vma->pages = NULL;
50470bb0
TU
3360 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3361 vma->ggtt_view.type, ret);
fe14d5f4 3362 }
50470bb0 3363 return ret;
fe14d5f4
TU
3364}
3365
625d988a
CW
3366/**
3367 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
a4dbf7cf
CW
3368 * @vm: the &struct i915_address_space
3369 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3370 * @size: how much space to allocate inside the GTT,
3371 * must be #I915_GTT_PAGE_SIZE aligned
3372 * @offset: where to insert inside the GTT,
3373 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3374 * (@offset + @size) must fit within the address space
3375 * @color: color to apply to node, if this node is not from a VMA,
3376 * color must be #I915_COLOR_UNEVICTABLE
3377 * @flags: control search and eviction behaviour
625d988a
CW
3378 *
3379 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3380 * the address space (using @size and @color). If the @node does not fit, it
3381 * tries to evict any overlapping nodes from the GTT, including any
3382 * neighbouring nodes if the colors do not match (to ensure guard pages between
3383 * differing domains). See i915_gem_evict_for_node() for the gory details
3384 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3385 * evicting active overlapping objects, and any overlapping node that is pinned
3386 * or marked as unevictable will also result in failure.
3387 *
3388 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3389 * asked to wait for eviction and interrupted.
3390 */
3391int i915_gem_gtt_reserve(struct i915_address_space *vm,
3392 struct drm_mm_node *node,
3393 u64 size, u64 offset, unsigned long color,
3394 unsigned int flags)
3395{
3396 int err;
3397
3398 GEM_BUG_ON(!size);
3399 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3400 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3401 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3fec7ec4 3402 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3403 GEM_BUG_ON(drm_mm_node_allocated(node));
625d988a
CW
3404
3405 node->size = size;
3406 node->start = offset;
3407 node->color = color;
3408
3409 err = drm_mm_reserve_node(&vm->mm, node);
3410 if (err != -ENOSPC)
3411 return err;
3412
616d9cee
CW
3413 if (flags & PIN_NOEVICT)
3414 return -ENOSPC;
3415
625d988a
CW
3416 err = i915_gem_evict_for_node(vm, node, flags);
3417 if (err == 0)
3418 err = drm_mm_reserve_node(&vm->mm, node);
3419
3420 return err;
3421}
3422
606fec95
CW
3423static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3424{
3425 u64 range, addr;
3426
3427 GEM_BUG_ON(range_overflows(start, len, end));
3428 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3429
3430 range = round_down(end - len, align) - round_up(start, align);
3431 if (range) {
3432 if (sizeof(unsigned long) == sizeof(u64)) {
3433 addr = get_random_long();
3434 } else {
3435 addr = get_random_int();
3436 if (range > U32_MAX) {
3437 addr <<= 32;
3438 addr |= get_random_int();
3439 }
3440 }
3441 div64_u64_rem(addr, range, &addr);
3442 start += addr;
3443 }
3444
3445 return round_up(start, align);
3446}
3447
e007b19d
CW
3448/**
3449 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
a4dbf7cf
CW
3450 * @vm: the &struct i915_address_space
3451 * @node: the &struct drm_mm_node (typically i915_vma.node)
3452 * @size: how much space to allocate inside the GTT,
3453 * must be #I915_GTT_PAGE_SIZE aligned
3454 * @alignment: required alignment of starting offset, may be 0 but
3455 * if specified, this must be a power-of-two and at least
3456 * #I915_GTT_MIN_ALIGNMENT
3457 * @color: color to apply to node
3458 * @start: start of any range restriction inside GTT (0 for all),
e007b19d 3459 * must be #I915_GTT_PAGE_SIZE aligned
a4dbf7cf
CW
3460 * @end: end of any range restriction inside GTT (U64_MAX for all),
3461 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3462 * @flags: control search and eviction behaviour
e007b19d
CW
3463 *
3464 * i915_gem_gtt_insert() first searches for an available hole into which
3465 * is can insert the node. The hole address is aligned to @alignment and
3466 * its @size must then fit entirely within the [@start, @end] bounds. The
3467 * nodes on either side of the hole must match @color, or else a guard page
3468 * will be inserted between the two nodes (or the node evicted). If no
606fec95
CW
3469 * suitable hole is found, first a victim is randomly selected and tested
3470 * for eviction, otherwise then the LRU list of objects within the GTT
e007b19d
CW
3471 * is scanned to find the first set of replacement nodes to create the hole.
3472 * Those old overlapping nodes are evicted from the GTT (and so must be
3473 * rebound before any future use). Any node that is currently pinned cannot
3474 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3475 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3476 * searching for an eviction candidate. See i915_gem_evict_something() for
3477 * the gory details on the eviction algorithm.
3478 *
3479 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3480 * asked to wait for eviction and interrupted.
3481 */
3482int i915_gem_gtt_insert(struct i915_address_space *vm,
3483 struct drm_mm_node *node,
3484 u64 size, u64 alignment, unsigned long color,
3485 u64 start, u64 end, unsigned int flags)
3486{
4e64e553 3487 enum drm_mm_insert_mode mode;
606fec95 3488 u64 offset;
e007b19d
CW
3489 int err;
3490
3491 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3492 GEM_BUG_ON(!size);
3493 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3494 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3495 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3496 GEM_BUG_ON(start >= end);
3497 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3498 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3fec7ec4 3499 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3500 GEM_BUG_ON(drm_mm_node_allocated(node));
e007b19d
CW
3501
3502 if (unlikely(range_overflows(start, size, end)))
3503 return -ENOSPC;
3504
3505 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3506 return -ENOSPC;
3507
4e64e553
CW
3508 mode = DRM_MM_INSERT_BEST;
3509 if (flags & PIN_HIGH)
3510 mode = DRM_MM_INSERT_HIGH;
3511 if (flags & PIN_MAPPABLE)
3512 mode = DRM_MM_INSERT_LOW;
e007b19d
CW
3513
3514 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3515 * so we know that we always have a minimum alignment of 4096.
3516 * The drm_mm range manager is optimised to return results
3517 * with zero alignment, so where possible use the optimal
3518 * path.
3519 */
3520 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3521 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3522 alignment = 0;
3523
4e64e553
CW
3524 err = drm_mm_insert_node_in_range(&vm->mm, node,
3525 size, alignment, color,
3526 start, end, mode);
e007b19d
CW
3527 if (err != -ENOSPC)
3528 return err;
3529
616d9cee
CW
3530 if (flags & PIN_NOEVICT)
3531 return -ENOSPC;
3532
606fec95
CW
3533 /* No free space, pick a slot at random.
3534 *
3535 * There is a pathological case here using a GTT shared between
3536 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3537 *
3538 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3539 * (64k objects) (448k objects)
3540 *
3541 * Now imagine that the eviction LRU is ordered top-down (just because
3542 * pathology meets real life), and that we need to evict an object to
3543 * make room inside the aperture. The eviction scan then has to walk
3544 * the 448k list before it finds one within range. And now imagine that
3545 * it has to search for a new hole between every byte inside the memcpy,
3546 * for several simultaneous clients.
3547 *
3548 * On a full-ppgtt system, if we have run out of available space, there
3549 * will be lots and lots of objects in the eviction list! Again,
3550 * searching that LRU list may be slow if we are also applying any
3551 * range restrictions (e.g. restriction to low 4GiB) and so, for
3552 * simplicity and similarilty between different GTT, try the single
3553 * random replacement first.
3554 */
3555 offset = random_offset(start, end,
3556 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3557 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3558 if (err != -ENOSPC)
3559 return err;
3560
3561 /* Randomly selected placement is pinned, do a search */
e007b19d
CW
3562 err = i915_gem_evict_something(vm, size, alignment, color,
3563 start, end, flags);
3564 if (err)
3565 return err;
3566
4e64e553
CW
3567 return drm_mm_insert_node_in_range(&vm->mm, node,
3568 size, alignment, color,
3569 start, end, DRM_MM_INSERT_EVICT);
e007b19d 3570}
3b5bb0a3
CW
3571
3572#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3573#include "selftests/mock_gtt.c"
1c42819a 3574#include "selftests/i915_gem_gtt.c"
3b5bb0a3 3575#endif