]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
f9183127 SR |
32 | |
33 | #include <linux/io.h> | |
771fe6b9 | 34 | #include <linux/list.h> |
5a0e3ad6 | 35 | #include <linux/slab.h> |
f9183127 | 36 | |
c5244987 | 37 | #include <drm/drm_cache.h> |
f9183127 SR |
38 | #include <drm/drm_prime.h> |
39 | #include <drm/radeon_drm.h> | |
40 | ||
771fe6b9 | 41 | #include "radeon.h" |
99ee7fac | 42 | #include "radeon_trace.h" |
771fe6b9 | 43 | |
771fe6b9 JG |
44 | int radeon_ttm_init(struct radeon_device *rdev); |
45 | void radeon_ttm_fini(struct radeon_device *rdev); | |
4c788679 | 46 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
771fe6b9 JG |
47 | |
48 | /* | |
49 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
50 | * function are calling it. | |
51 | */ | |
52 | ||
67e8e3f9 MO |
53 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
54 | unsigned mem_type, int sign) | |
55 | { | |
56 | struct radeon_device *rdev = bo->rdev; | |
57 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; | |
58 | ||
59 | switch (mem_type) { | |
60 | case TTM_PL_TT: | |
61 | if (sign > 0) | |
62 | atomic64_add(size, &rdev->gtt_usage); | |
63 | else | |
64 | atomic64_sub(size, &rdev->gtt_usage); | |
65 | break; | |
66 | case TTM_PL_VRAM: | |
67 | if (sign > 0) | |
68 | atomic64_add(size, &rdev->vram_usage); | |
69 | else | |
70 | atomic64_sub(size, &rdev->vram_usage); | |
71 | break; | |
72 | } | |
73 | } | |
74 | ||
4c788679 | 75 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
771fe6b9 | 76 | { |
4c788679 | 77 | struct radeon_bo *bo; |
771fe6b9 | 78 | |
4c788679 | 79 | bo = container_of(tbo, struct radeon_bo, tbo); |
67e8e3f9 MO |
80 | |
81 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); | |
82 | ||
4c788679 JG |
83 | mutex_lock(&bo->rdev->gem.mutex); |
84 | list_del_init(&bo->list); | |
85 | mutex_unlock(&bo->rdev->gem.mutex); | |
86 | radeon_bo_clear_surface_reg(bo); | |
634b6a8a | 87 | WARN_ON_ONCE(!list_empty(&bo->va)); |
ce77038f GH |
88 | if (bo->tbo.base.import_attach) |
89 | drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); | |
90 | drm_gem_object_release(&bo->tbo.base); | |
4c788679 | 91 | kfree(bo); |
771fe6b9 JG |
92 | } |
93 | ||
d03d8589 JG |
94 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
95 | { | |
96 | if (bo->destroy == &radeon_ttm_bo_destroy) | |
97 | return true; | |
98 | return false; | |
99 | } | |
100 | ||
312ea8da JG |
101 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
102 | { | |
deadcb36 | 103 | u32 c = 0, i; |
312ea8da | 104 | |
312ea8da | 105 | rbo->placement.placement = rbo->placements; |
20707874 | 106 | rbo->placement.busy_placement = rbo->placements; |
c9da4a4b MD |
107 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
108 | /* Try placing BOs which don't need CPU access outside of the | |
109 | * CPU accessible part of VRAM | |
110 | */ | |
111 | if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && | |
112 | rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { | |
113 | rbo->placements[c].fpfn = | |
114 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
115 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | | |
116 | TTM_PL_FLAG_UNCACHED | | |
117 | TTM_PL_FLAG_VRAM; | |
118 | } | |
119 | ||
120 | rbo->placements[c].fpfn = 0; | |
f1217ed0 CK |
121 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
122 | TTM_PL_FLAG_UNCACHED | | |
123 | TTM_PL_FLAG_VRAM; | |
c9da4a4b | 124 | } |
f1217ed0 | 125 | |
0d0b3e74 | 126 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
02376d82 | 127 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
c9da4a4b | 128 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
129 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
130 | TTM_PL_FLAG_TT; | |
131 | ||
02376d82 MD |
132 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
133 | (rbo->rdev->flags & RADEON_IS_AGP)) { | |
c9da4a4b | 134 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
135 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
136 | TTM_PL_FLAG_UNCACHED | | |
02376d82 | 137 | TTM_PL_FLAG_TT; |
0d0b3e74 | 138 | } else { |
c9da4a4b | 139 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
140 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
141 | TTM_PL_FLAG_TT; | |
0d0b3e74 JG |
142 | } |
143 | } | |
f1217ed0 | 144 | |
0d0b3e74 | 145 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
02376d82 | 146 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
c9da4a4b | 147 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
148 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
149 | TTM_PL_FLAG_SYSTEM; | |
150 | ||
02376d82 MD |
151 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
152 | rbo->rdev->flags & RADEON_IS_AGP) { | |
c9da4a4b | 153 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
154 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
155 | TTM_PL_FLAG_UNCACHED | | |
02376d82 | 156 | TTM_PL_FLAG_SYSTEM; |
0d0b3e74 | 157 | } else { |
c9da4a4b | 158 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
159 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
160 | TTM_PL_FLAG_SYSTEM; | |
0d0b3e74 JG |
161 | } |
162 | } | |
c9da4a4b MD |
163 | if (!c) { |
164 | rbo->placements[c].fpfn = 0; | |
f1217ed0 CK |
165 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING | |
166 | TTM_PL_FLAG_SYSTEM; | |
c9da4a4b | 167 | } |
f1217ed0 | 168 | |
312ea8da JG |
169 | rbo->placement.num_placement = c; |
170 | rbo->placement.num_busy_placement = c; | |
deadcb36 | 171 | |
f1217ed0 | 172 | for (i = 0; i < c; ++i) { |
c8584039 | 173 | if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
c9da4a4b MD |
174 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
175 | !rbo->placements[i].fpfn) | |
c8584039 MD |
176 | rbo->placements[i].lpfn = |
177 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
178 | else | |
179 | rbo->placements[i].lpfn = 0; | |
f1217ed0 | 180 | } |
312ea8da JG |
181 | } |
182 | ||
441921d5 | 183 | int radeon_bo_create(struct radeon_device *rdev, |
831b6966 ML |
184 | unsigned long size, int byte_align, bool kernel, |
185 | u32 domain, u32 flags, struct sg_table *sg, | |
52791eee | 186 | struct dma_resv *resv, |
831b6966 | 187 | struct radeon_bo **bo_ptr) |
771fe6b9 | 188 | { |
4c788679 | 189 | struct radeon_bo *bo; |
771fe6b9 | 190 | enum ttm_bo_type type; |
93225b0d | 191 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
57de4ba9 | 192 | size_t acc_size; |
771fe6b9 JG |
193 | int r; |
194 | ||
441921d5 DV |
195 | size = ALIGN(size, PAGE_SIZE); |
196 | ||
771fe6b9 JG |
197 | if (kernel) { |
198 | type = ttm_bo_type_kernel; | |
40f5cf99 AD |
199 | } else if (sg) { |
200 | type = ttm_bo_type_sg; | |
771fe6b9 JG |
201 | } else { |
202 | type = ttm_bo_type_device; | |
203 | } | |
4c788679 | 204 | *bo_ptr = NULL; |
2b66b50b | 205 | |
57de4ba9 JG |
206 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
207 | sizeof(struct radeon_bo)); | |
208 | ||
4c788679 JG |
209 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
210 | if (bo == NULL) | |
771fe6b9 | 211 | return -ENOMEM; |
ce77038f | 212 | drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size); |
4c788679 | 213 | bo->rdev = rdev; |
4c788679 JG |
214 | bo->surface_reg = -1; |
215 | INIT_LIST_HEAD(&bo->list); | |
721604a1 | 216 | INIT_LIST_HEAD(&bo->va); |
bda72d58 | 217 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
3cf8bb1a JG |
218 | RADEON_GEM_DOMAIN_GTT | |
219 | RADEON_GEM_DOMAIN_CPU); | |
02376d82 MD |
220 | |
221 | bo->flags = flags; | |
222 | /* PCI GART is always snooped */ | |
223 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
224 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | |
225 | ||
96ea47c0 MD |
226 | /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx |
227 | * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 | |
228 | */ | |
229 | if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) | |
230 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | |
231 | ||
a08b588e MD |
232 | #ifdef CONFIG_X86_32 |
233 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit | |
234 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 | |
235 | */ | |
a28bbd58 | 236 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
a53fa438 MD |
237 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
238 | /* Don't try to enable write-combining when it can't work, or things | |
239 | * may be slow | |
240 | * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 | |
241 | */ | |
c02216ac | 242 | #ifndef CONFIG_COMPILE_TEST |
a53fa438 MD |
243 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
244 | thanks to write-combining | |
c02216ac | 245 | #endif |
a53fa438 | 246 | |
93820498 MD |
247 | if (bo->flags & RADEON_GEM_GTT_WC) |
248 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " | |
249 | "better performance thanks to write-combining\n"); | |
a28bbd58 | 250 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
c5244987 OG |
251 | #else |
252 | /* For architectures that don't support WC memory, | |
253 | * mask out the WC flag from the BO | |
254 | */ | |
255 | if (!drm_arch_can_wc_memory()) | |
256 | bo->flags &= ~RADEON_GEM_GTT_WC; | |
a08b588e MD |
257 | #endif |
258 | ||
1fb107fc | 259 | radeon_ttm_placement_from_domain(bo, domain); |
5cc6fbab | 260 | /* Kernel allocation are uninterruptible */ |
db7fce39 | 261 | down_read(&rdev->pm.mclk_lock); |
1fb107fc | 262 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
724daa4f CK |
263 | &bo->placement, page_align, !kernel, acc_size, |
264 | sg, resv, &radeon_ttm_bo_destroy); | |
db7fce39 | 265 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 | 266 | if (unlikely(r != 0)) { |
771fe6b9 JG |
267 | return r; |
268 | } | |
4c788679 | 269 | *bo_ptr = bo; |
441921d5 | 270 | |
99ee7fac | 271 | trace_radeon_bo_create(bo); |
441921d5 | 272 | |
771fe6b9 JG |
273 | return 0; |
274 | } | |
275 | ||
4c788679 | 276 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
771fe6b9 | 277 | { |
4c788679 | 278 | bool is_iomem; |
771fe6b9 JG |
279 | int r; |
280 | ||
4c788679 | 281 | if (bo->kptr) { |
771fe6b9 | 282 | if (ptr) { |
4c788679 | 283 | *ptr = bo->kptr; |
771fe6b9 | 284 | } |
771fe6b9 JG |
285 | return 0; |
286 | } | |
4c788679 | 287 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
771fe6b9 JG |
288 | if (r) { |
289 | return r; | |
290 | } | |
4c788679 | 291 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
771fe6b9 | 292 | if (ptr) { |
4c788679 | 293 | *ptr = bo->kptr; |
771fe6b9 | 294 | } |
4c788679 | 295 | radeon_bo_check_tiling(bo, 0, 0); |
771fe6b9 JG |
296 | return 0; |
297 | } | |
298 | ||
4c788679 | 299 | void radeon_bo_kunmap(struct radeon_bo *bo) |
771fe6b9 | 300 | { |
4c788679 | 301 | if (bo->kptr == NULL) |
771fe6b9 | 302 | return; |
4c788679 JG |
303 | bo->kptr = NULL; |
304 | radeon_bo_check_tiling(bo, 0, 0); | |
305 | ttm_bo_kunmap(&bo->kmap); | |
771fe6b9 JG |
306 | } |
307 | ||
512d8afc CK |
308 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
309 | { | |
310 | if (bo == NULL) | |
311 | return NULL; | |
312 | ||
269a8b6e | 313 | ttm_bo_get(&bo->tbo); |
512d8afc CK |
314 | return bo; |
315 | } | |
316 | ||
4c788679 | 317 | void radeon_bo_unref(struct radeon_bo **bo) |
771fe6b9 | 318 | { |
4c788679 | 319 | struct ttm_buffer_object *tbo; |
771fe6b9 | 320 | |
4c788679 | 321 | if ((*bo) == NULL) |
771fe6b9 | 322 | return; |
4c788679 | 323 | tbo = &((*bo)->tbo); |
77605e43 TZ |
324 | ttm_bo_put(tbo); |
325 | *bo = NULL; | |
771fe6b9 JG |
326 | } |
327 | ||
c4353016 MD |
328 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
329 | u64 *gpu_addr) | |
771fe6b9 | 330 | { |
19be5570 | 331 | struct ttm_operation_ctx ctx = { false, false }; |
312ea8da | 332 | int r, i; |
771fe6b9 | 333 | |
f72a113a CK |
334 | if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) |
335 | return -EPERM; | |
336 | ||
4c788679 JG |
337 | if (bo->pin_count) { |
338 | bo->pin_count++; | |
339 | if (gpu_addr) | |
340 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
d936622c MD |
341 | |
342 | if (max_offset != 0) { | |
343 | u64 domain_start; | |
344 | ||
345 | if (domain == RADEON_GEM_DOMAIN_VRAM) | |
346 | domain_start = bo->rdev->mc.vram_start; | |
347 | else | |
348 | domain_start = bo->rdev->mc.gtt_start; | |
e199fd42 MD |
349 | WARN_ON_ONCE(max_offset < |
350 | (radeon_bo_gpu_offset(bo) - domain_start)); | |
d936622c MD |
351 | } |
352 | ||
771fe6b9 JG |
353 | return 0; |
354 | } | |
ede2e019 CJHR |
355 | if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) { |
356 | /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */ | |
357 | return -EINVAL; | |
358 | } | |
359 | ||
312ea8da | 360 | radeon_ttm_placement_from_domain(bo, domain); |
f1217ed0 | 361 | for (i = 0; i < bo->placement.num_placement; i++) { |
3ca82da3 | 362 | /* force to pin into visible video ram */ |
b76ee67a | 363 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
f266f04d | 364 | !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
b76ee67a MD |
365 | (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) |
366 | bo->placements[i].lpfn = | |
367 | bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
f1217ed0 | 368 | else |
b76ee67a | 369 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
c4353016 | 370 | |
f1217ed0 | 371 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
c4353016 | 372 | } |
f1217ed0 | 373 | |
19be5570 | 374 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
4c788679 JG |
375 | if (likely(r == 0)) { |
376 | bo->pin_count = 1; | |
377 | if (gpu_addr != NULL) | |
378 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
71ecc97e AD |
379 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
380 | bo->rdev->vram_pin_size += radeon_bo_size(bo); | |
381 | else | |
382 | bo->rdev->gart_pin_size += radeon_bo_size(bo); | |
383 | } else { | |
4c788679 | 384 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
71ecc97e | 385 | } |
771fe6b9 JG |
386 | return r; |
387 | } | |
c4353016 MD |
388 | |
389 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |
390 | { | |
391 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | |
392 | } | |
771fe6b9 | 393 | |
4c788679 | 394 | int radeon_bo_unpin(struct radeon_bo *bo) |
771fe6b9 | 395 | { |
19be5570 | 396 | struct ttm_operation_ctx ctx = { false, false }; |
312ea8da | 397 | int r, i; |
771fe6b9 | 398 | |
4c788679 JG |
399 | if (!bo->pin_count) { |
400 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); | |
401 | return 0; | |
771fe6b9 | 402 | } |
4c788679 JG |
403 | bo->pin_count--; |
404 | if (bo->pin_count) | |
405 | return 0; | |
f1217ed0 CK |
406 | for (i = 0; i < bo->placement.num_placement; i++) { |
407 | bo->placements[i].lpfn = 0; | |
408 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | |
409 | } | |
19be5570 | 410 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
71ecc97e AD |
411 | if (likely(r == 0)) { |
412 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | |
413 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); | |
414 | else | |
415 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); | |
416 | } else { | |
4c788679 | 417 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
71ecc97e | 418 | } |
5cc6fbab | 419 | return r; |
cefb87ef DA |
420 | } |
421 | ||
4c788679 | 422 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
771fe6b9 | 423 | { |
d796d844 | 424 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
d6257ab5 PM |
425 | #ifndef CONFIG_HIBERNATION |
426 | if (rdev->flags & RADEON_IS_IGP) { | |
06b6476d AD |
427 | if (rdev->mc.igp_sideport_enabled == false) |
428 | /* Useless to evict on IGP chips */ | |
429 | return 0; | |
771fe6b9 | 430 | } |
d6257ab5 | 431 | #endif |
771fe6b9 JG |
432 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
433 | } | |
434 | ||
4c788679 | 435 | void radeon_bo_force_delete(struct radeon_device *rdev) |
771fe6b9 | 436 | { |
4c788679 | 437 | struct radeon_bo *bo, *n; |
771fe6b9 JG |
438 | |
439 | if (list_empty(&rdev->gem.objects)) { | |
440 | return; | |
441 | } | |
4c788679 JG |
442 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
443 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | |
4c788679 | 444 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
ce77038f GH |
445 | &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, |
446 | *((unsigned long *)&bo->tbo.base.refcount)); | |
4c788679 JG |
447 | mutex_lock(&bo->rdev->gem.mutex); |
448 | list_del_init(&bo->list); | |
449 | mutex_unlock(&bo->rdev->gem.mutex); | |
91132d6b | 450 | /* this should unref the ttm bo */ |
ce77038f | 451 | drm_gem_object_put_unlocked(&bo->tbo.base); |
771fe6b9 JG |
452 | } |
453 | } | |
454 | ||
4c788679 | 455 | int radeon_bo_init(struct radeon_device *rdev) |
771fe6b9 | 456 | { |
7cf321d1 DA |
457 | /* reserve PAT memory space to WC for VRAM */ |
458 | arch_io_reserve_memtype_wc(rdev->mc.aper_base, | |
459 | rdev->mc.aper_size); | |
460 | ||
a4d68279 | 461 | /* Add an MTRR for the VRAM */ |
a0a53aa8 | 462 | if (!rdev->fastfb_working) { |
07ebea25 AL |
463 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
464 | rdev->mc.aper_size); | |
a0a53aa8 | 465 | } |
a4d68279 JG |
466 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
467 | rdev->mc.mc_vram_size >> 20, | |
468 | (unsigned long long)rdev->mc.aper_size >> 20); | |
469 | DRM_INFO("RAM width %dbits %cDR\n", | |
470 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | |
771fe6b9 JG |
471 | return radeon_ttm_init(rdev); |
472 | } | |
473 | ||
4c788679 | 474 | void radeon_bo_fini(struct radeon_device *rdev) |
771fe6b9 JG |
475 | { |
476 | radeon_ttm_fini(rdev); | |
07ebea25 | 477 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
7cf321d1 | 478 | arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); |
771fe6b9 JG |
479 | } |
480 | ||
19dff56a MO |
481 | /* Returns how many bytes TTM can move per IB. |
482 | */ | |
483 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) | |
484 | { | |
485 | u64 real_vram_size = rdev->mc.real_vram_size; | |
486 | u64 vram_usage = atomic64_read(&rdev->vram_usage); | |
487 | ||
488 | /* This function is based on the current VRAM usage. | |
489 | * | |
490 | * - If all of VRAM is free, allow relocating the number of bytes that | |
491 | * is equal to 1/4 of the size of VRAM for this IB. | |
492 | ||
493 | * - If more than one half of VRAM is occupied, only allow relocating | |
494 | * 1 MB of data for this IB. | |
495 | * | |
496 | * - From 0 to one half of used VRAM, the threshold decreases | |
497 | * linearly. | |
498 | * __________________ | |
499 | * 1/4 of -|\ | | |
500 | * VRAM | \ | | |
501 | * | \ | | |
502 | * | \ | | |
503 | * | \ | | |
504 | * | \ | | |
505 | * | \ | | |
506 | * | \________|1 MB | |
507 | * |----------------| | |
508 | * VRAM 0 % 100 % | |
509 | * used used | |
510 | * | |
511 | * Note: It's a threshold, not a limit. The threshold must be crossed | |
512 | * for buffer relocations to stop, so any buffer of an arbitrary size | |
513 | * can be moved as long as the threshold isn't crossed before | |
514 | * the relocation takes place. We don't want to disable buffer | |
515 | * relocations completely. | |
516 | * | |
517 | * The idea is that buffers should be placed in VRAM at creation time | |
518 | * and TTM should only do a minimum number of relocations during | |
519 | * command submission. In practice, you need to submit at least | |
520 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | |
521 | * | |
522 | * Also, things can get pretty crazy under memory pressure and actual | |
523 | * VRAM usage can change a lot, so playing safe even at 50% does | |
524 | * consistently increase performance. | |
525 | */ | |
526 | ||
527 | u64 half_vram = real_vram_size >> 1; | |
528 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | |
529 | u64 bytes_moved_threshold = half_free_vram >> 1; | |
530 | return max(bytes_moved_threshold, 1024*1024ull); | |
531 | } | |
532 | ||
533 | int radeon_bo_list_validate(struct radeon_device *rdev, | |
534 | struct ww_acquire_ctx *ticket, | |
ecff665f | 535 | struct list_head *head, int ring) |
771fe6b9 | 536 | { |
19be5570 | 537 | struct ttm_operation_ctx ctx = { true, false }; |
1d0c0942 | 538 | struct radeon_bo_list *lobj; |
466be338 | 539 | struct list_head duplicates; |
771fe6b9 | 540 | int r; |
19dff56a MO |
541 | u64 bytes_moved = 0, initial_bytes_moved; |
542 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); | |
771fe6b9 | 543 | |
466be338 | 544 | INIT_LIST_HEAD(&duplicates); |
9165fb87 | 545 | r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); |
771fe6b9 | 546 | if (unlikely(r != 0)) { |
771fe6b9 JG |
547 | return r; |
548 | } | |
19dff56a | 549 | |
147666fb | 550 | list_for_each_entry(lobj, head, tv.head) { |
466be338 | 551 | struct radeon_bo *bo = lobj->robj; |
4c788679 | 552 | if (!bo->pin_count) { |
5dcd3345 | 553 | u32 domain = lobj->preferred_domains; |
3852752c | 554 | u32 allowed = lobj->allowed_domains; |
19dff56a MO |
555 | u32 current_domain = |
556 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | |
557 | ||
558 | /* Check if this buffer will be moved and don't move it | |
559 | * if we have moved too many buffers for this IB already. | |
560 | * | |
561 | * Note that this allows moving at least one buffer of | |
562 | * any size, because it doesn't take the current "bo" | |
563 | * into account. We don't want to disallow buffer moves | |
564 | * completely. | |
565 | */ | |
3852752c | 566 | if ((allowed & current_domain) != 0 && |
19dff56a MO |
567 | (domain & current_domain) == 0 && /* will be moved */ |
568 | bytes_moved > bytes_moved_threshold) { | |
569 | /* don't move it */ | |
570 | domain = current_domain; | |
571 | } | |
572 | ||
20707874 AD |
573 | retry: |
574 | radeon_ttm_placement_from_domain(bo, domain); | |
f2ba57b5 | 575 | if (ring == R600_RING_TYPE_UVD_INDEX) |
3852752c | 576 | radeon_uvd_force_into_uvd_segment(bo, allowed); |
19dff56a MO |
577 | |
578 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); | |
19be5570 | 579 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
19dff56a MO |
580 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - |
581 | initial_bytes_moved; | |
582 | ||
e376573f | 583 | if (unlikely(r)) { |
ce6758c8 CK |
584 | if (r != -ERESTARTSYS && |
585 | domain != lobj->allowed_domains) { | |
586 | domain = lobj->allowed_domains; | |
20707874 AD |
587 | goto retry; |
588 | } | |
1b6e5fd5 | 589 | ttm_eu_backoff_reservation(ticket, head); |
771fe6b9 | 590 | return r; |
e376573f | 591 | } |
771fe6b9 | 592 | } |
4c788679 JG |
593 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
594 | lobj->tiling_flags = bo->tiling_flags; | |
771fe6b9 | 595 | } |
466be338 CK |
596 | |
597 | list_for_each_entry(lobj, &duplicates, tv.head) { | |
598 | lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); | |
599 | lobj->tiling_flags = lobj->robj->tiling_flags; | |
600 | } | |
601 | ||
771fe6b9 JG |
602 | return 0; |
603 | } | |
604 | ||
550e2d92 | 605 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
771fe6b9 | 606 | { |
4c788679 | 607 | struct radeon_device *rdev = bo->rdev; |
e024e110 | 608 | struct radeon_surface_reg *reg; |
4c788679 | 609 | struct radeon_bo *old_object; |
e024e110 DA |
610 | int steal; |
611 | int i; | |
612 | ||
52791eee | 613 | dma_resv_assert_held(bo->tbo.base.resv); |
4c788679 JG |
614 | |
615 | if (!bo->tiling_flags) | |
e024e110 DA |
616 | return 0; |
617 | ||
4c788679 JG |
618 | if (bo->surface_reg >= 0) { |
619 | reg = &rdev->surface_regs[bo->surface_reg]; | |
620 | i = bo->surface_reg; | |
e024e110 DA |
621 | goto out; |
622 | } | |
623 | ||
624 | steal = -1; | |
625 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
626 | ||
627 | reg = &rdev->surface_regs[i]; | |
4c788679 | 628 | if (!reg->bo) |
e024e110 DA |
629 | break; |
630 | ||
4c788679 | 631 | old_object = reg->bo; |
e024e110 DA |
632 | if (old_object->pin_count == 0) |
633 | steal = i; | |
634 | } | |
635 | ||
636 | /* if we are all out */ | |
637 | if (i == RADEON_GEM_MAX_SURFACES) { | |
638 | if (steal == -1) | |
639 | return -ENOMEM; | |
640 | /* find someone with a surface reg and nuke their BO */ | |
641 | reg = &rdev->surface_regs[steal]; | |
4c788679 | 642 | old_object = reg->bo; |
e024e110 DA |
643 | /* blow away the mapping */ |
644 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
4c788679 | 645 | ttm_bo_unmap_virtual(&old_object->tbo); |
e024e110 DA |
646 | old_object->surface_reg = -1; |
647 | i = steal; | |
648 | } | |
649 | ||
4c788679 JG |
650 | bo->surface_reg = i; |
651 | reg->bo = bo; | |
e024e110 DA |
652 | |
653 | out: | |
4c788679 | 654 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
d961db75 | 655 | bo->tbo.mem.start << PAGE_SHIFT, |
4c788679 | 656 | bo->tbo.num_pages << PAGE_SHIFT); |
e024e110 DA |
657 | return 0; |
658 | } | |
659 | ||
4c788679 | 660 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
e024e110 | 661 | { |
4c788679 | 662 | struct radeon_device *rdev = bo->rdev; |
e024e110 DA |
663 | struct radeon_surface_reg *reg; |
664 | ||
4c788679 | 665 | if (bo->surface_reg == -1) |
e024e110 DA |
666 | return; |
667 | ||
4c788679 JG |
668 | reg = &rdev->surface_regs[bo->surface_reg]; |
669 | radeon_clear_surface_reg(rdev, bo->surface_reg); | |
e024e110 | 670 | |
4c788679 JG |
671 | reg->bo = NULL; |
672 | bo->surface_reg = -1; | |
e024e110 DA |
673 | } |
674 | ||
4c788679 JG |
675 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
676 | uint32_t tiling_flags, uint32_t pitch) | |
e024e110 | 677 | { |
285484e2 | 678 | struct radeon_device *rdev = bo->rdev; |
4c788679 JG |
679 | int r; |
680 | ||
285484e2 JG |
681 | if (rdev->family >= CHIP_CEDAR) { |
682 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | |
683 | ||
684 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | |
685 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | |
686 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | |
687 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | |
688 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | |
689 | switch (bankw) { | |
690 | case 0: | |
691 | case 1: | |
692 | case 2: | |
693 | case 4: | |
694 | case 8: | |
695 | break; | |
696 | default: | |
697 | return -EINVAL; | |
698 | } | |
699 | switch (bankh) { | |
700 | case 0: | |
701 | case 1: | |
702 | case 2: | |
703 | case 4: | |
704 | case 8: | |
705 | break; | |
706 | default: | |
707 | return -EINVAL; | |
708 | } | |
709 | switch (mtaspect) { | |
710 | case 0: | |
711 | case 1: | |
712 | case 2: | |
713 | case 4: | |
714 | case 8: | |
715 | break; | |
716 | default: | |
717 | return -EINVAL; | |
718 | } | |
719 | if (tilesplit > 6) { | |
720 | return -EINVAL; | |
721 | } | |
722 | if (stilesplit > 6) { | |
723 | return -EINVAL; | |
724 | } | |
725 | } | |
4c788679 JG |
726 | r = radeon_bo_reserve(bo, false); |
727 | if (unlikely(r != 0)) | |
728 | return r; | |
729 | bo->tiling_flags = tiling_flags; | |
730 | bo->pitch = pitch; | |
731 | radeon_bo_unreserve(bo); | |
732 | return 0; | |
e024e110 DA |
733 | } |
734 | ||
4c788679 JG |
735 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
736 | uint32_t *tiling_flags, | |
737 | uint32_t *pitch) | |
e024e110 | 738 | { |
52791eee | 739 | dma_resv_assert_held(bo->tbo.base.resv); |
977c38d5 | 740 | |
e024e110 | 741 | if (tiling_flags) |
4c788679 | 742 | *tiling_flags = bo->tiling_flags; |
e024e110 | 743 | if (pitch) |
4c788679 | 744 | *pitch = bo->pitch; |
e024e110 DA |
745 | } |
746 | ||
4c788679 JG |
747 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
748 | bool force_drop) | |
e024e110 | 749 | { |
977c38d5 | 750 | if (!force_drop) |
52791eee | 751 | dma_resv_assert_held(bo->tbo.base.resv); |
4c788679 JG |
752 | |
753 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | |
e024e110 DA |
754 | return 0; |
755 | ||
756 | if (force_drop) { | |
4c788679 | 757 | radeon_bo_clear_surface_reg(bo); |
e024e110 DA |
758 | return 0; |
759 | } | |
760 | ||
4c788679 | 761 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
e024e110 DA |
762 | if (!has_moved) |
763 | return 0; | |
764 | ||
4c788679 JG |
765 | if (bo->surface_reg >= 0) |
766 | radeon_bo_clear_surface_reg(bo); | |
e024e110 DA |
767 | return 0; |
768 | } | |
769 | ||
4c788679 | 770 | if ((bo->surface_reg >= 0) && !has_moved) |
e024e110 DA |
771 | return 0; |
772 | ||
4c788679 | 773 | return radeon_bo_get_surface_reg(bo); |
e024e110 DA |
774 | } |
775 | ||
776 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
66257db7 | 777 | bool evict, |
67e8e3f9 | 778 | struct ttm_mem_reg *new_mem) |
e024e110 | 779 | { |
d03d8589 | 780 | struct radeon_bo *rbo; |
67e8e3f9 | 781 | |
d03d8589 JG |
782 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
783 | return; | |
67e8e3f9 | 784 | |
d03d8589 | 785 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 786 | radeon_bo_check_tiling(rbo, 0, 1); |
721604a1 | 787 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
67e8e3f9 MO |
788 | |
789 | /* update statistics */ | |
790 | if (!new_mem) | |
791 | return; | |
792 | ||
793 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); | |
794 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); | |
e024e110 DA |
795 | } |
796 | ||
0a2d50e3 | 797 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
e024e110 | 798 | { |
19be5570 | 799 | struct ttm_operation_ctx ctx = { false, false }; |
0a2d50e3 | 800 | struct radeon_device *rdev; |
d03d8589 | 801 | struct radeon_bo *rbo; |
c9da4a4b MD |
802 | unsigned long offset, size, lpfn; |
803 | int i, r; | |
0a2d50e3 | 804 | |
d03d8589 | 805 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
0a2d50e3 | 806 | return 0; |
d03d8589 | 807 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 808 | radeon_bo_check_tiling(rbo, 0, 0); |
0a2d50e3 | 809 | rdev = rbo->rdev; |
54409259 CK |
810 | if (bo->mem.mem_type != TTM_PL_VRAM) |
811 | return 0; | |
812 | ||
813 | size = bo->mem.num_pages << PAGE_SHIFT; | |
814 | offset = bo->mem.start << PAGE_SHIFT; | |
815 | if ((offset + size) <= rdev->mc.visible_vram_size) | |
816 | return 0; | |
817 | ||
e1a575ad MD |
818 | /* Can't move a pinned BO to visible VRAM */ |
819 | if (rbo->pin_count > 0) | |
820 | return -EINVAL; | |
821 | ||
54409259 CK |
822 | /* hurrah the memory is not visible ! */ |
823 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | |
c9da4a4b MD |
824 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
825 | for (i = 0; i < rbo->placement.num_placement; i++) { | |
826 | /* Force into visible VRAM */ | |
827 | if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
828 | (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) | |
829 | rbo->placements[i].lpfn = lpfn; | |
830 | } | |
19be5570 | 831 | r = ttm_bo_validate(bo, &rbo->placement, &ctx); |
54409259 CK |
832 | if (unlikely(r == -ENOMEM)) { |
833 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
19be5570 | 834 | return ttm_bo_validate(bo, &rbo->placement, &ctx); |
54409259 CK |
835 | } else if (unlikely(r != 0)) { |
836 | return r; | |
0a2d50e3 | 837 | } |
54409259 CK |
838 | |
839 | offset = bo->mem.start << PAGE_SHIFT; | |
840 | /* this should never happen */ | |
841 | if ((offset + size) > rdev->mc.visible_vram_size) | |
842 | return -EINVAL; | |
843 | ||
0a2d50e3 | 844 | return 0; |
e024e110 | 845 | } |
ce580fab | 846 | |
83f30d0e | 847 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
ce580fab AK |
848 | { |
849 | int r; | |
850 | ||
dfd5e50e | 851 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); |
ce580fab AK |
852 | if (unlikely(r != 0)) |
853 | return r; | |
ce580fab AK |
854 | if (mem_type) |
855 | *mem_type = bo->tbo.mem.mem_type; | |
f2c24b83 | 856 | |
8aa6d4fc | 857 | r = ttm_bo_wait(&bo->tbo, true, no_wait); |
ce580fab AK |
858 | ttm_bo_unreserve(&bo->tbo); |
859 | return r; | |
860 | } | |
587cdda8 CK |
861 | |
862 | /** | |
863 | * radeon_bo_fence - add fence to buffer object | |
864 | * | |
865 | * @bo: buffer object in question | |
866 | * @fence: fence to add | |
867 | * @shared: true if fence should be added shared | |
868 | * | |
869 | */ | |
870 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, | |
3cf8bb1a | 871 | bool shared) |
587cdda8 | 872 | { |
52791eee | 873 | struct dma_resv *resv = bo->tbo.base.resv; |
587cdda8 CK |
874 | |
875 | if (shared) | |
52791eee | 876 | dma_resv_add_shared_fence(resv, &fence->base); |
587cdda8 | 877 | else |
52791eee | 878 | dma_resv_add_excl_fence(resv, &fence->base); |
587cdda8 | 879 | } |