]>
Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
36 | #include <drm/drmP.h> | |
37 | #include <drm/radeon_drm.h> | |
fa8a1238 | 38 | #include <linux/seq_file.h> |
5a0e3ad6 | 39 | #include <linux/slab.h> |
771fe6b9 JG |
40 | #include "radeon_reg.h" |
41 | #include "radeon.h" | |
42 | ||
43 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
44 | ||
fa8a1238 DA |
45 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
46 | ||
771fe6b9 JG |
47 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
48 | { | |
49 | struct radeon_mman *mman; | |
50 | struct radeon_device *rdev; | |
51 | ||
52 | mman = container_of(bdev, struct radeon_mman, bdev); | |
53 | rdev = container_of(mman, struct radeon_device, mman); | |
54 | return rdev; | |
55 | } | |
56 | ||
57 | ||
58 | /* | |
59 | * Global memory. | |
60 | */ | |
61 | static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref) | |
62 | { | |
63 | return ttm_mem_global_init(ref->object); | |
64 | } | |
65 | ||
66 | static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref) | |
67 | { | |
68 | ttm_mem_global_release(ref->object); | |
69 | } | |
70 | ||
71 | static int radeon_ttm_global_init(struct radeon_device *rdev) | |
72 | { | |
73 | struct ttm_global_reference *global_ref; | |
74 | int r; | |
75 | ||
76 | rdev->mman.mem_global_referenced = false; | |
77 | global_ref = &rdev->mman.mem_global_ref; | |
78 | global_ref->global_type = TTM_GLOBAL_TTM_MEM; | |
79 | global_ref->size = sizeof(struct ttm_mem_global); | |
80 | global_ref->init = &radeon_ttm_mem_global_init; | |
81 | global_ref->release = &radeon_ttm_mem_global_release; | |
82 | r = ttm_global_item_ref(global_ref); | |
83 | if (r != 0) { | |
a987fcaa TH |
84 | DRM_ERROR("Failed setting up TTM memory accounting " |
85 | "subsystem.\n"); | |
771fe6b9 JG |
86 | return r; |
87 | } | |
a987fcaa TH |
88 | |
89 | rdev->mman.bo_global_ref.mem_glob = | |
90 | rdev->mman.mem_global_ref.object; | |
91 | global_ref = &rdev->mman.bo_global_ref.ref; | |
92 | global_ref->global_type = TTM_GLOBAL_TTM_BO; | |
7f5f4db2 | 93 | global_ref->size = sizeof(struct ttm_bo_global); |
a987fcaa TH |
94 | global_ref->init = &ttm_bo_global_init; |
95 | global_ref->release = &ttm_bo_global_release; | |
96 | r = ttm_global_item_ref(global_ref); | |
97 | if (r != 0) { | |
98 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
99 | ttm_global_item_unref(&rdev->mman.mem_global_ref); | |
100 | return r; | |
101 | } | |
102 | ||
771fe6b9 JG |
103 | rdev->mman.mem_global_referenced = true; |
104 | return 0; | |
105 | } | |
106 | ||
107 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | |
108 | { | |
109 | if (rdev->mman.mem_global_referenced) { | |
a987fcaa | 110 | ttm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
771fe6b9 JG |
111 | ttm_global_item_unref(&rdev->mman.mem_global_ref); |
112 | rdev->mman.mem_global_referenced = false; | |
113 | } | |
114 | } | |
115 | ||
116 | struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev); | |
117 | ||
118 | static struct ttm_backend* | |
119 | radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev) | |
120 | { | |
121 | struct radeon_device *rdev; | |
122 | ||
123 | rdev = radeon_get_rdev(bdev); | |
124 | #if __OS_HAS_AGP | |
125 | if (rdev->flags & RADEON_IS_AGP) { | |
126 | return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge); | |
127 | } else | |
128 | #endif | |
129 | { | |
130 | return radeon_ttm_backend_create(rdev); | |
131 | } | |
132 | } | |
133 | ||
134 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
135 | { | |
136 | return 0; | |
137 | } | |
138 | ||
139 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
140 | struct ttm_mem_type_manager *man) | |
141 | { | |
142 | struct radeon_device *rdev; | |
143 | ||
144 | rdev = radeon_get_rdev(bdev); | |
145 | ||
146 | switch (type) { | |
147 | case TTM_PL_SYSTEM: | |
148 | /* System memory */ | |
149 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
150 | man->available_caching = TTM_PL_MASK_CACHING; | |
151 | man->default_caching = TTM_PL_FLAG_CACHED; | |
152 | break; | |
153 | case TTM_PL_TT: | |
d594e46a | 154 | man->gpu_offset = rdev->mc.gtt_start; |
771fe6b9 JG |
155 | man->available_caching = TTM_PL_MASK_CACHING; |
156 | man->default_caching = TTM_PL_FLAG_CACHED; | |
55c93278 | 157 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
771fe6b9 JG |
158 | #if __OS_HAS_AGP |
159 | if (rdev->flags & RADEON_IS_AGP) { | |
160 | if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { | |
161 | DRM_ERROR("AGP is not enabled for memory type %u\n", | |
162 | (unsigned)type); | |
163 | return -EINVAL; | |
164 | } | |
165 | man->io_offset = rdev->mc.agp_base; | |
166 | man->io_size = rdev->mc.gtt_size; | |
167 | man->io_addr = NULL; | |
55c93278 MD |
168 | if (!rdev->ddev->agp->cant_use_aperture) |
169 | man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | |
170 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
771fe6b9 JG |
171 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
172 | TTM_PL_FLAG_WC; | |
173 | man->default_caching = TTM_PL_FLAG_WC; | |
174 | } else | |
175 | #endif | |
176 | { | |
177 | man->io_offset = 0; | |
178 | man->io_size = 0; | |
179 | man->io_addr = NULL; | |
771fe6b9 JG |
180 | } |
181 | break; | |
182 | case TTM_PL_VRAM: | |
183 | /* "On-card" video ram */ | |
d594e46a | 184 | man->gpu_offset = rdev->mc.vram_start; |
771fe6b9 JG |
185 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
186 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | |
187 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
188 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
189 | man->default_caching = TTM_PL_FLAG_WC; | |
190 | man->io_addr = NULL; | |
191 | man->io_offset = rdev->mc.aper_base; | |
192 | man->io_size = rdev->mc.aper_size; | |
193 | break; | |
194 | default: | |
195 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
196 | return -EINVAL; | |
197 | } | |
198 | return 0; | |
199 | } | |
200 | ||
312ea8da JG |
201 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
202 | struct ttm_placement *placement) | |
771fe6b9 | 203 | { |
d03d8589 JG |
204 | struct radeon_bo *rbo; |
205 | static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
206 | ||
207 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { | |
208 | placement->fpfn = 0; | |
209 | placement->lpfn = 0; | |
210 | placement->placement = &placements; | |
211 | placement->busy_placement = &placements; | |
212 | placement->num_placement = 1; | |
213 | placement->num_busy_placement = 1; | |
214 | return; | |
215 | } | |
216 | rbo = container_of(bo, struct radeon_bo, tbo); | |
771fe6b9 | 217 | switch (bo->mem.mem_type) { |
312ea8da | 218 | case TTM_PL_VRAM: |
9270eb1b DA |
219 | if (rbo->rdev->cp.ready == false) |
220 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); | |
221 | else | |
222 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
312ea8da JG |
223 | break; |
224 | case TTM_PL_TT: | |
771fe6b9 | 225 | default: |
312ea8da | 226 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
771fe6b9 | 227 | } |
eaa5fd1a | 228 | *placement = rbo->placement; |
771fe6b9 JG |
229 | } |
230 | ||
231 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
232 | { | |
233 | return 0; | |
234 | } | |
235 | ||
236 | static void radeon_move_null(struct ttm_buffer_object *bo, | |
237 | struct ttm_mem_reg *new_mem) | |
238 | { | |
239 | struct ttm_mem_reg *old_mem = &bo->mem; | |
240 | ||
241 | BUG_ON(old_mem->mm_node != NULL); | |
242 | *old_mem = *new_mem; | |
243 | new_mem->mm_node = NULL; | |
244 | } | |
245 | ||
246 | static int radeon_move_blit(struct ttm_buffer_object *bo, | |
247 | bool evict, int no_wait, | |
248 | struct ttm_mem_reg *new_mem, | |
249 | struct ttm_mem_reg *old_mem) | |
250 | { | |
251 | struct radeon_device *rdev; | |
252 | uint64_t old_start, new_start; | |
253 | struct radeon_fence *fence; | |
254 | int r; | |
255 | ||
256 | rdev = radeon_get_rdev(bo->bdev); | |
257 | r = radeon_fence_create(rdev, &fence); | |
258 | if (unlikely(r)) { | |
259 | return r; | |
260 | } | |
261 | old_start = old_mem->mm_node->start << PAGE_SHIFT; | |
262 | new_start = new_mem->mm_node->start << PAGE_SHIFT; | |
263 | ||
264 | switch (old_mem->mem_type) { | |
265 | case TTM_PL_VRAM: | |
d594e46a | 266 | old_start += rdev->mc.vram_start; |
771fe6b9 JG |
267 | break; |
268 | case TTM_PL_TT: | |
d594e46a | 269 | old_start += rdev->mc.gtt_start; |
771fe6b9 JG |
270 | break; |
271 | default: | |
272 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
273 | return -EINVAL; | |
274 | } | |
275 | switch (new_mem->mem_type) { | |
276 | case TTM_PL_VRAM: | |
d594e46a | 277 | new_start += rdev->mc.vram_start; |
771fe6b9 JG |
278 | break; |
279 | case TTM_PL_TT: | |
d594e46a | 280 | new_start += rdev->mc.gtt_start; |
771fe6b9 JG |
281 | break; |
282 | default: | |
283 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
284 | return -EINVAL; | |
285 | } | |
286 | if (!rdev->cp.ready) { | |
287 | DRM_ERROR("Trying to move memory with CP turned off.\n"); | |
288 | return -EINVAL; | |
289 | } | |
290 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); | |
291 | /* FIXME: handle copy error */ | |
292 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, | |
293 | evict, no_wait, new_mem); | |
294 | radeon_fence_unref(&fence); | |
295 | return r; | |
296 | } | |
297 | ||
298 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |
299 | bool evict, bool interruptible, bool no_wait, | |
300 | struct ttm_mem_reg *new_mem) | |
301 | { | |
302 | struct radeon_device *rdev; | |
303 | struct ttm_mem_reg *old_mem = &bo->mem; | |
304 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
305 | u32 placements; |
306 | struct ttm_placement placement; | |
771fe6b9 JG |
307 | int r; |
308 | ||
309 | rdev = radeon_get_rdev(bo->bdev); | |
310 | tmp_mem = *new_mem; | |
311 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
312 | placement.fpfn = 0; |
313 | placement.lpfn = 0; | |
314 | placement.num_placement = 1; | |
315 | placement.placement = &placements; | |
316 | placement.num_busy_placement = 1; | |
317 | placement.busy_placement = &placements; | |
318 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
319 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
771fe6b9 JG |
320 | interruptible, no_wait); |
321 | if (unlikely(r)) { | |
322 | return r; | |
323 | } | |
df67bed9 DA |
324 | |
325 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
326 | if (unlikely(r)) { | |
327 | goto out_cleanup; | |
328 | } | |
329 | ||
771fe6b9 JG |
330 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
331 | if (unlikely(r)) { | |
332 | goto out_cleanup; | |
333 | } | |
334 | r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); | |
335 | if (unlikely(r)) { | |
336 | goto out_cleanup; | |
337 | } | |
338 | r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); | |
339 | out_cleanup: | |
340 | if (tmp_mem.mm_node) { | |
a987fcaa TH |
341 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
342 | ||
343 | spin_lock(&glob->lru_lock); | |
771fe6b9 | 344 | drm_mm_put_block(tmp_mem.mm_node); |
a987fcaa | 345 | spin_unlock(&glob->lru_lock); |
771fe6b9 JG |
346 | return r; |
347 | } | |
348 | return r; | |
349 | } | |
350 | ||
351 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |
352 | bool evict, bool interruptible, bool no_wait, | |
353 | struct ttm_mem_reg *new_mem) | |
354 | { | |
355 | struct radeon_device *rdev; | |
356 | struct ttm_mem_reg *old_mem = &bo->mem; | |
357 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
358 | struct ttm_placement placement; |
359 | u32 placements; | |
771fe6b9 JG |
360 | int r; |
361 | ||
362 | rdev = radeon_get_rdev(bo->bdev); | |
363 | tmp_mem = *new_mem; | |
364 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
365 | placement.fpfn = 0; |
366 | placement.lpfn = 0; | |
367 | placement.num_placement = 1; | |
368 | placement.placement = &placements; | |
369 | placement.num_busy_placement = 1; | |
370 | placement.busy_placement = &placements; | |
371 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
372 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); | |
771fe6b9 JG |
373 | if (unlikely(r)) { |
374 | return r; | |
375 | } | |
376 | r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); | |
377 | if (unlikely(r)) { | |
378 | goto out_cleanup; | |
379 | } | |
380 | r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); | |
381 | if (unlikely(r)) { | |
382 | goto out_cleanup; | |
383 | } | |
384 | out_cleanup: | |
385 | if (tmp_mem.mm_node) { | |
a987fcaa TH |
386 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
387 | ||
388 | spin_lock(&glob->lru_lock); | |
771fe6b9 | 389 | drm_mm_put_block(tmp_mem.mm_node); |
a987fcaa | 390 | spin_unlock(&glob->lru_lock); |
771fe6b9 JG |
391 | return r; |
392 | } | |
393 | return r; | |
394 | } | |
395 | ||
396 | static int radeon_bo_move(struct ttm_buffer_object *bo, | |
397 | bool evict, bool interruptible, bool no_wait, | |
398 | struct ttm_mem_reg *new_mem) | |
399 | { | |
400 | struct radeon_device *rdev; | |
401 | struct ttm_mem_reg *old_mem = &bo->mem; | |
402 | int r; | |
403 | ||
404 | rdev = radeon_get_rdev(bo->bdev); | |
405 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
406 | radeon_move_null(bo, new_mem); | |
407 | return 0; | |
408 | } | |
409 | if ((old_mem->mem_type == TTM_PL_TT && | |
410 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
411 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
412 | new_mem->mem_type == TTM_PL_TT)) { | |
af901ca1 | 413 | /* bind is enough */ |
771fe6b9 JG |
414 | radeon_move_null(bo, new_mem); |
415 | return 0; | |
416 | } | |
3ce0a23d | 417 | if (!rdev->cp.ready || rdev->asic->copy == NULL) { |
771fe6b9 | 418 | /* use memcpy */ |
1ab2e105 | 419 | goto memcpy; |
771fe6b9 JG |
420 | } |
421 | ||
422 | if (old_mem->mem_type == TTM_PL_VRAM && | |
423 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
1ab2e105 | 424 | r = radeon_move_vram_ram(bo, evict, interruptible, |
771fe6b9 JG |
425 | no_wait, new_mem); |
426 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | |
427 | new_mem->mem_type == TTM_PL_VRAM) { | |
1ab2e105 | 428 | r = radeon_move_ram_vram(bo, evict, interruptible, |
771fe6b9 JG |
429 | no_wait, new_mem); |
430 | } else { | |
431 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); | |
771fe6b9 | 432 | } |
1ab2e105 MD |
433 | |
434 | if (r) { | |
435 | memcpy: | |
436 | r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | |
437 | } | |
438 | ||
771fe6b9 JG |
439 | return r; |
440 | } | |
441 | ||
771fe6b9 JG |
442 | static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, |
443 | bool lazy, bool interruptible) | |
444 | { | |
445 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); | |
446 | } | |
447 | ||
448 | static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) | |
449 | { | |
450 | return 0; | |
451 | } | |
452 | ||
453 | static void radeon_sync_obj_unref(void **sync_obj) | |
454 | { | |
455 | radeon_fence_unref((struct radeon_fence **)sync_obj); | |
456 | } | |
457 | ||
458 | static void *radeon_sync_obj_ref(void *sync_obj) | |
459 | { | |
460 | return radeon_fence_ref((struct radeon_fence *)sync_obj); | |
461 | } | |
462 | ||
463 | static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) | |
464 | { | |
465 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); | |
466 | } | |
467 | ||
468 | static struct ttm_bo_driver radeon_bo_driver = { | |
771fe6b9 JG |
469 | .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, |
470 | .invalidate_caches = &radeon_invalidate_caches, | |
471 | .init_mem_type = &radeon_init_mem_type, | |
472 | .evict_flags = &radeon_evict_flags, | |
473 | .move = &radeon_bo_move, | |
474 | .verify_access = &radeon_verify_access, | |
475 | .sync_obj_signaled = &radeon_sync_obj_signaled, | |
476 | .sync_obj_wait = &radeon_sync_obj_wait, | |
477 | .sync_obj_flush = &radeon_sync_obj_flush, | |
478 | .sync_obj_unref = &radeon_sync_obj_unref, | |
479 | .sync_obj_ref = &radeon_sync_obj_ref, | |
e024e110 DA |
480 | .move_notify = &radeon_bo_move_notify, |
481 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | |
771fe6b9 JG |
482 | }; |
483 | ||
484 | int radeon_ttm_init(struct radeon_device *rdev) | |
485 | { | |
486 | int r; | |
487 | ||
488 | r = radeon_ttm_global_init(rdev); | |
489 | if (r) { | |
490 | return r; | |
491 | } | |
492 | /* No others user of address space so set it to 0 */ | |
493 | r = ttm_bo_device_init(&rdev->mman.bdev, | |
a987fcaa | 494 | rdev->mman.bo_global_ref.ref.object, |
ad49f501 DA |
495 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
496 | rdev->need_dma32); | |
771fe6b9 JG |
497 | if (r) { |
498 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
499 | return r; | |
500 | } | |
0a0c7596 | 501 | rdev->mman.initialized = true; |
4c788679 | 502 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
312ea8da | 503 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
771fe6b9 JG |
504 | if (r) { |
505 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
506 | return r; | |
507 | } | |
4c788679 JG |
508 | r = radeon_bo_create(rdev, NULL, 256 * 1024, true, |
509 | RADEON_GEM_DOMAIN_VRAM, | |
510 | &rdev->stollen_vga_memory); | |
771fe6b9 JG |
511 | if (r) { |
512 | return r; | |
513 | } | |
4c788679 JG |
514 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
515 | if (r) | |
516 | return r; | |
517 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | |
518 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
771fe6b9 | 519 | if (r) { |
4c788679 | 520 | radeon_bo_unref(&rdev->stollen_vga_memory); |
771fe6b9 JG |
521 | return r; |
522 | } | |
523 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | |
3ce0a23d | 524 | (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); |
4c788679 | 525 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
312ea8da | 526 | rdev->mc.gtt_size >> PAGE_SHIFT); |
771fe6b9 JG |
527 | if (r) { |
528 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
529 | return r; | |
530 | } | |
531 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | |
3ce0a23d | 532 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
771fe6b9 JG |
533 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
534 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | |
535 | } | |
fa8a1238 DA |
536 | |
537 | r = radeon_ttm_debugfs_init(rdev); | |
538 | if (r) { | |
539 | DRM_ERROR("Failed to init debugfs\n"); | |
540 | return r; | |
541 | } | |
771fe6b9 JG |
542 | return 0; |
543 | } | |
544 | ||
545 | void radeon_ttm_fini(struct radeon_device *rdev) | |
546 | { | |
4c788679 JG |
547 | int r; |
548 | ||
0a0c7596 JG |
549 | if (!rdev->mman.initialized) |
550 | return; | |
771fe6b9 | 551 | if (rdev->stollen_vga_memory) { |
4c788679 JG |
552 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
553 | if (r == 0) { | |
554 | radeon_bo_unpin(rdev->stollen_vga_memory); | |
555 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
556 | } | |
557 | radeon_bo_unref(&rdev->stollen_vga_memory); | |
771fe6b9 JG |
558 | } |
559 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
560 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | |
561 | ttm_bo_device_release(&rdev->mman.bdev); | |
562 | radeon_gart_fini(rdev); | |
563 | radeon_ttm_global_fini(rdev); | |
0a0c7596 | 564 | rdev->mman.initialized = false; |
771fe6b9 JG |
565 | DRM_INFO("radeon: ttm finalized\n"); |
566 | } | |
567 | ||
568 | static struct vm_operations_struct radeon_ttm_vm_ops; | |
f0f37e2f | 569 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
771fe6b9 JG |
570 | |
571 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
572 | { | |
573 | struct ttm_buffer_object *bo; | |
574 | int r; | |
575 | ||
576 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | |
577 | if (bo == NULL) { | |
578 | return VM_FAULT_NOPAGE; | |
579 | } | |
580 | r = ttm_vm_ops->fault(vma, vmf); | |
581 | return r; | |
582 | } | |
583 | ||
584 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |
585 | { | |
586 | struct drm_file *file_priv; | |
587 | struct radeon_device *rdev; | |
588 | int r; | |
589 | ||
590 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
591 | return drm_mmap(filp, vma); | |
592 | } | |
593 | ||
594 | file_priv = (struct drm_file *)filp->private_data; | |
595 | rdev = file_priv->minor->dev->dev_private; | |
596 | if (rdev == NULL) { | |
597 | return -EINVAL; | |
598 | } | |
599 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); | |
600 | if (unlikely(r != 0)) { | |
601 | return r; | |
602 | } | |
603 | if (unlikely(ttm_vm_ops == NULL)) { | |
604 | ttm_vm_ops = vma->vm_ops; | |
605 | radeon_ttm_vm_ops = *ttm_vm_ops; | |
606 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; | |
607 | } | |
608 | vma->vm_ops = &radeon_ttm_vm_ops; | |
609 | return 0; | |
610 | } | |
611 | ||
612 | ||
613 | /* | |
614 | * TTM backend functions. | |
615 | */ | |
616 | struct radeon_ttm_backend { | |
617 | struct ttm_backend backend; | |
618 | struct radeon_device *rdev; | |
619 | unsigned long num_pages; | |
620 | struct page **pages; | |
621 | struct page *dummy_read_page; | |
622 | bool populated; | |
623 | bool bound; | |
624 | unsigned offset; | |
625 | }; | |
626 | ||
627 | static int radeon_ttm_backend_populate(struct ttm_backend *backend, | |
628 | unsigned long num_pages, | |
629 | struct page **pages, | |
630 | struct page *dummy_read_page) | |
631 | { | |
632 | struct radeon_ttm_backend *gtt; | |
633 | ||
634 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
635 | gtt->pages = pages; | |
636 | gtt->num_pages = num_pages; | |
637 | gtt->dummy_read_page = dummy_read_page; | |
638 | gtt->populated = true; | |
639 | return 0; | |
640 | } | |
641 | ||
642 | static void radeon_ttm_backend_clear(struct ttm_backend *backend) | |
643 | { | |
644 | struct radeon_ttm_backend *gtt; | |
645 | ||
646 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
647 | gtt->pages = NULL; | |
648 | gtt->num_pages = 0; | |
649 | gtt->dummy_read_page = NULL; | |
650 | gtt->populated = false; | |
651 | gtt->bound = false; | |
652 | } | |
653 | ||
654 | ||
655 | static int radeon_ttm_backend_bind(struct ttm_backend *backend, | |
656 | struct ttm_mem_reg *bo_mem) | |
657 | { | |
658 | struct radeon_ttm_backend *gtt; | |
659 | int r; | |
660 | ||
661 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
662 | gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; | |
663 | if (!gtt->num_pages) { | |
664 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); | |
665 | } | |
666 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | |
667 | gtt->num_pages, gtt->pages); | |
668 | if (r) { | |
669 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
670 | gtt->num_pages, gtt->offset); | |
671 | return r; | |
672 | } | |
673 | gtt->bound = true; | |
674 | return 0; | |
675 | } | |
676 | ||
677 | static int radeon_ttm_backend_unbind(struct ttm_backend *backend) | |
678 | { | |
679 | struct radeon_ttm_backend *gtt; | |
680 | ||
681 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
682 | radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages); | |
683 | gtt->bound = false; | |
684 | return 0; | |
685 | } | |
686 | ||
687 | static void radeon_ttm_backend_destroy(struct ttm_backend *backend) | |
688 | { | |
689 | struct radeon_ttm_backend *gtt; | |
690 | ||
691 | gtt = container_of(backend, struct radeon_ttm_backend, backend); | |
692 | if (gtt->bound) { | |
693 | radeon_ttm_backend_unbind(backend); | |
694 | } | |
695 | kfree(gtt); | |
696 | } | |
697 | ||
698 | static struct ttm_backend_func radeon_backend_func = { | |
699 | .populate = &radeon_ttm_backend_populate, | |
700 | .clear = &radeon_ttm_backend_clear, | |
701 | .bind = &radeon_ttm_backend_bind, | |
702 | .unbind = &radeon_ttm_backend_unbind, | |
703 | .destroy = &radeon_ttm_backend_destroy, | |
704 | }; | |
705 | ||
706 | struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev) | |
707 | { | |
708 | struct radeon_ttm_backend *gtt; | |
709 | ||
710 | gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL); | |
711 | if (gtt == NULL) { | |
712 | return NULL; | |
713 | } | |
714 | gtt->backend.bdev = &rdev->mman.bdev; | |
715 | gtt->backend.flags = 0; | |
716 | gtt->backend.func = &radeon_backend_func; | |
717 | gtt->rdev = rdev; | |
718 | gtt->pages = NULL; | |
719 | gtt->num_pages = 0; | |
720 | gtt->dummy_read_page = NULL; | |
721 | gtt->populated = false; | |
722 | gtt->bound = false; | |
723 | return >t->backend; | |
724 | } | |
fa8a1238 DA |
725 | |
726 | #define RADEON_DEBUGFS_MEM_TYPES 2 | |
727 | ||
fa8a1238 DA |
728 | #if defined(CONFIG_DEBUG_FS) |
729 | static int radeon_mm_dump_table(struct seq_file *m, void *data) | |
730 | { | |
731 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
732 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | |
733 | struct drm_device *dev = node->minor->dev; | |
734 | struct radeon_device *rdev = dev->dev_private; | |
735 | int ret; | |
736 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | |
737 | ||
738 | spin_lock(&glob->lru_lock); | |
739 | ret = drm_mm_dump_table(m, mm); | |
740 | spin_unlock(&glob->lru_lock); | |
741 | return ret; | |
742 | } | |
743 | #endif | |
744 | ||
745 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |
746 | { | |
f4e45d02 MP |
747 | #if defined(CONFIG_DEBUG_FS) |
748 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES]; | |
749 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32]; | |
fa8a1238 DA |
750 | unsigned i; |
751 | ||
fa8a1238 DA |
752 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { |
753 | if (i == 0) | |
754 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); | |
755 | else | |
756 | sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); | |
757 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
758 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | |
759 | radeon_mem_types_list[i].driver_features = 0; | |
760 | if (i == 0) | |
761 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager; | |
762 | else | |
763 | radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager; | |
764 | ||
765 | } | |
766 | return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES); | |
767 | ||
768 | #endif | |
769 | return 0; | |
770 | } |