30 * HZ);
}
- if (bo->bdev->funcs->release_notify)
- bo->bdev->funcs->release_notify(bo);
+ if (bdev->funcs->release_notify)
+ bdev->funcs->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
- spin_lock(&bo->bdev->lru_lock);
+ spin_lock(&bdev->lru_lock);
/*
* Make pinned bos immediately available to
}
kref_init(&bo->kref);
- spin_unlock(&bo->bdev->lru_lock);
+ spin_unlock(&bdev->lru_lock);
INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
- struct ttm_device *bdev = bo->bdev;
struct ttm_resource *evict_mem;
struct ttm_placement placement;
struct ttm_place hop;
dma_resv_assert_held(bo->base.resv);
placement.num_placement = 0;
- bdev->funcs->evict_flags(bo, &placement);
+ bo->bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement) {
ret = ttm_bo_wait_ctx(bo, ctx);
const struct ttm_place *place)
{
struct ttm_resource *res = bo->resource;
- struct ttm_device *bdev = bo->bdev;
dma_resv_assert_held(bo->base.resv);
- if (bo->resource->mem_type == TTM_PL_SYSTEM)
+
+ if (res->mem_type == TTM_PL_SYSTEM)
return true;
/* Don't evict this BO if it's outside of the
* requested placement range
*/
- return ttm_resource_intersects(bdev, res, place, bo->base.size);
+ return ttm_resource_intersects(bo->bdev, res, place, bo->base.size);
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
static s64
ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
{
- struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct ttm_resource *res = bo->resource;
+ struct ttm_place place = { .mem_type = res->mem_type };
struct ttm_bo_swapout_walk *swapout_walk =
container_of(walk, typeof(*swapout_walk), walk);
struct ttm_operation_ctx *ctx = walk->arg.ctx;
+ struct ttm_device *bdev = bo->bdev;
+ struct ttm_tt *tt = bo->ttm;
s64 ret;
/*
* The driver may use the fact that we're moving from SYSTEM
* as an indication that we're about to swap out.
*/
- if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
+ if (bo->pin_count || !bdev->funcs->eviction_valuable(bo, &place)) {
ret = -EBUSY;
goto out;
}
- if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
- bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
- bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
+ if (!tt || !ttm_tt_is_populated(tt) ||
+ tt->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_SWAPPED)) {
ret = -EBUSY;
goto out;
}
if (bo->deleted) {
- pgoff_t num_pages = bo->ttm->num_pages;
+ pgoff_t num_pages = tt->num_pages;
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
/*
* Move to system cached
*/
- if (bo->resource->mem_type != TTM_PL_SYSTEM) {
+ if (res->mem_type != TTM_PL_SYSTEM) {
struct ttm_resource *evict_mem;
struct ttm_place hop;
goto out;
ttm_bo_unmap_virtual(bo);
- if (bo->bdev->funcs->swap_notify)
- bo->bdev->funcs->swap_notify(bo);
+ if (bdev->funcs->swap_notify)
+ bdev->funcs->swap_notify(bo);
- if (ttm_tt_is_populated(bo->ttm)) {
- spin_lock(&bo->bdev->lru_lock);
- ttm_resource_del_bulk_move(bo->resource, bo);
- spin_unlock(&bo->bdev->lru_lock);
+ if (ttm_tt_is_populated(tt)) {
+ spin_lock(&bdev->lru_lock);
+ ttm_resource_del_bulk_move(res, bo);
+ spin_unlock(&bdev->lru_lock);
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
+ ret = ttm_tt_swapout(bdev, tt, swapout_walk->gfp_flags);
- spin_lock(&bo->bdev->lru_lock);
+ spin_lock(&bdev->lru_lock);
if (ret)
- ttm_resource_add_bulk_move(bo->resource, bo);
- ttm_resource_move_to_lru_tail(bo->resource);
- spin_unlock(&bo->bdev->lru_lock);
+ ttm_resource_add_bulk_move(res, bo);
+ ttm_resource_move_to_lru_tail(res);
+ spin_unlock(&bdev->lru_lock);
}
out:
int ttm_bo_populate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
+ struct ttm_device *bdev = bo->bdev;
struct ttm_tt *tt = bo->ttm;
bool swapped;
int ret;
return 0;
swapped = ttm_tt_is_swapped(tt);
- ret = ttm_tt_populate(bo->bdev, tt, ctx);
+ ret = ttm_tt_populate(bdev, tt, ctx);
if (ret)
return ret;
if (swapped && !ttm_tt_is_swapped(tt) && !bo->pin_count &&
bo->resource) {
- spin_lock(&bo->bdev->lru_lock);
+ spin_lock(&bdev->lru_lock);
ttm_resource_add_bulk_move(bo->resource, bo);
ttm_resource_move_to_lru_tail(bo->resource);
- spin_unlock(&bo->bdev->lru_lock);
+ spin_unlock(&bdev->lru_lock);
}
return 0;
dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
- dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
+ dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, ttm);
if (IS_ERR(dst_iter))
return PTR_ERR(dst_iter);
src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
- src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
+ src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, ttm);
if (IS_ERR(src_iter)) {
ret = PTR_ERR(src_iter);
goto out_src_iter;
{
struct ttm_resource *mem = bo->resource;
- if (bo->resource->bus.addr) {
+ if (mem->bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
- map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
+ map->virtual = ((u8 *)mem->bus.addr) + offset;
} else {
- resource_size_t res = bo->resource->bus.offset + offset;
+ resource_size_t res = mem->bus.offset + offset;
map->bo_kmap_type = ttm_bo_map_iomap;
if (mem->bus.caching == ttm_write_combined)
struct ttm_operation_ctx ctx = { };
struct ttm_tt *ttm = bo->ttm;
struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
+ ttm_manager_type(bo->bdev, mem->mem_type);
pgprot_t prot;
int ret;
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ struct ttm_resource *res = bo->resource;
unsigned long offset, size;
int ret;
map->virtual = NULL;
map->bo = bo;
- if (num_pages > PFN_UP(bo->resource->size))
+ if (num_pages > PFN_UP(res->size))
return -EINVAL;
- if ((start_page + num_pages) > PFN_UP(bo->resource->size))
+ if ((start_page + num_pages) > PFN_UP(res->size))
return -EINVAL;
- ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
+ ret = ttm_mem_io_reserve(bo->bdev, res);
if (ret)
return ret;
- if (!bo->resource->bus.is_iomem) {
+ if (!res->bus.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
offset = start_page << PAGE_SHIFT;
iounmap(map->vaddr_iomem);
iosys_map_clear(map);
- ttm_mem_io_free(bo->bdev, bo->resource);
+ ttm_mem_io_free(bo->bdev, mem);
}
EXPORT_SYMBOL(ttm_bo_vunmap);
static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence)
{
- struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from;
struct dma_fence *tmp;
int i;
- from = ttm_manager_type(bdev, bo->resource->mem_type);
+ from = ttm_manager_type(bo->bdev, bo->resource->mem_type);
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
- struct ttm_device *bdev = bo->bdev;
- struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, new_mem->mem_type);
int ret;
ret = ttm_bo_wait_free_node(bo, man->use_tt);
struct ttm_buffer_object *bo)
{
struct ttm_lru_walk_arg *arg = curs->arg;
- struct dma_resv *resv = bo->base.resv;
int ret;
if (arg->ctx->interruptible)
- ret = dma_resv_lock_interruptible(resv, arg->ticket);
+ ret = dma_resv_lock_interruptible(bo->base.resv, arg->ticket);
else
- ret = dma_resv_lock(resv, arg->ticket);
+ ret = dma_resv_lock(bo->base.resv, arg->ticket);
if (!ret) {
curs->needs_unlock = true;
.num_placement = 1,
.placement = &sys_placement_flags,
};
- struct ttm_tt *tt = bo->ttm;
+ struct ttm_device *bdev = bo->bdev;
long lret;
dma_resv_assert_held(bo->base.resv);
return lret;
if (bo->bulk_move) {
- spin_lock(&bo->bdev->lru_lock);
+ spin_lock(&bdev->lru_lock);
ttm_resource_del_bulk_move(bo->resource, bo);
- spin_unlock(&bo->bdev->lru_lock);
+ spin_unlock(&bdev->lru_lock);
}
- lret = ttm_tt_backup(bo->bdev, tt, (struct ttm_backup_flags)
+ lret = ttm_tt_backup(bdev, bo->ttm, (struct ttm_backup_flags)
{.purge = flags.purge,
.writeback = flags.writeback});
if (lret <= 0 && bo->bulk_move) {
- spin_lock(&bo->bdev->lru_lock);
+ spin_lock(&bdev->lru_lock);
ttm_resource_add_bulk_move(bo->resource, bo);
- spin_unlock(&bo->bdev->lru_lock);
+ spin_unlock(&bdev->lru_lock);
}
if (lret < 0 && lret != -EINTR)
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
- struct ttm_device *bdev = bo->bdev;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
if (unlikely(ret != 0))
return ret;
- err = ttm_mem_io_reserve(bdev, bo->resource);
+ err = ttm_mem_io_reserve(bo->bdev, bo->resource);
if (unlikely(err != 0))
return VM_FAULT_SIGBUS;
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
- struct drm_device *ddev = bo->base.dev;
vm_fault_t ret = VM_FAULT_NOPAGE;
unsigned long address;
unsigned long pfn;
return VM_FAULT_OOM;
/* Set the page to be freed using drmm release action */
- if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page))
+ if (drmm_add_action_or_reset(bo->base.dev, ttm_bo_release_dummy_page,
+ page))
return VM_FAULT_OOM;
pfn = page_to_pfn(page);
vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- pgprot_t prot;
struct ttm_buffer_object *bo = vma->vm_private_data;
- struct drm_device *ddev = bo->base.dev;
vm_fault_t ret;
+ pgprot_t prot;
int idx;
ret = ttm_bo_vm_reserve(bo, vmf);
return ret;
prot = vma->vm_page_prot;
- if (drm_dev_enter(ddev, &idx)) {
+ if (drm_dev_enter(bo->base.dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
drm_dev_exit(idx);
} else {
int ttm_pool_restore_and_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
const struct ttm_operation_ctx *ctx)
{
+ struct ttm_pool_tt_restore *restore = tt->restore;
struct ttm_pool_alloc_state alloc;
if (WARN_ON(!ttm_tt_is_backed_up(tt)))
return -EINVAL;
- if (!tt->restore) {
+ if (!restore) {
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
ttm_pool_alloc_state_init(tt, &alloc);
if (ctx->gfp_retry_mayfail)
gfp |= __GFP_RETRY_MAYFAIL;
- tt->restore = kzalloc(sizeof(*tt->restore), gfp);
- if (!tt->restore)
+ restore = kzalloc(sizeof(*restore), gfp);
+ if (!restore)
return -ENOMEM;
- tt->restore->snapshot_alloc = alloc;
- tt->restore->pool = pool;
- tt->restore->restored_pages = 1;
- } else {
- struct ttm_pool_tt_restore *restore = tt->restore;
- int ret;
+ restore->snapshot_alloc = alloc;
+ restore->pool = pool;
+ restore->restored_pages = 1;
+ tt->restore = restore;
+ } else {
alloc = restore->snapshot_alloc;
- if (ttm_pool_restore_valid(tt->restore)) {
- ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
+ if (ttm_pool_restore_valid(restore)) {
+ int ret = ttm_pool_restore_commit(restore, tt->backup,
+ ctx, &alloc);
+
if (ret)
return ret;
}
return 0;
}
- return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
+ return __ttm_pool_alloc(pool, tt, ctx, &alloc, restore);
}
/**
struct ttm_lru_item *next_lru)
{
struct ttm_resource *next = ttm_lru_item_to_res(next_lru);
- struct ttm_lru_bulk_move *bulk = NULL;
- struct ttm_buffer_object *bo = next->bo;
+ struct ttm_lru_bulk_move *bulk;
lockdep_assert_held(&cursor->man->bdev->lru_lock);
- bulk = bo->bulk_move;
+
+ bulk = next->bo->bulk_move;
if (cursor->bulk != bulk) {
if (bulk) {