return 0;
if (bo->deleted) {
- lret = ttm_bo_wait_ctx(bo, walk->ctx);
+ lret = ttm_bo_wait_ctx(bo, walk->arg.ctx);
if (!lret)
ttm_bo_cleanup_memtype_use(bo);
} else {
- lret = ttm_bo_evict(bo, walk->ctx);
+ lret = ttm_bo_evict(bo, walk->arg.ctx);
}
if (lret)
struct ttm_bo_evict_walk evict_walk = {
.walk = {
.ops = &ttm_evict_walk_ops,
- .ctx = ctx,
- .ticket = ticket,
+ .arg = {
+ .ctx = ctx,
+ .ticket = ticket,
+ }
},
.place = place,
.evictor = evictor,
};
s64 lret;
- evict_walk.walk.trylock_only = true;
+ evict_walk.walk.arg.trylock_only = true;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
/* One more attempt if we hit low limit? */
/* Reset low limit */
evict_walk.try_low = evict_walk.hit_low = false;
/* If ticket-locking, repeat while making progress. */
- evict_walk.walk.trylock_only = false;
+ evict_walk.walk.arg.trylock_only = false;
retry:
do {
/* The walk may clear the evict_walk.walk.ticket field */
- evict_walk.walk.ticket = ticket;
+ evict_walk.walk.arg.ticket = ticket;
evict_walk.evicted = 0;
lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
} while (!lret && evict_walk.evicted);
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct ttm_bo_swapout_walk *swapout_walk =
container_of(walk, typeof(*swapout_walk), walk);
- struct ttm_operation_ctx *ctx = walk->ctx;
+ struct ttm_operation_ctx *ctx = walk->arg.ctx;
s64 ret;
/*
struct ttm_bo_swapout_walk swapout_walk = {
.walk = {
.ops = &ttm_swap_ops,
- .ctx = ctx,
- .trylock_only = true,
+ .arg = {
+ .ctx = ctx,
+ .trylock_only = true,
+ },
},
.gfp_flags = gfp_flags,
};
return ret;
}
-static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx,
+static bool ttm_lru_walk_trylock(struct ttm_lru_walk_arg *arg,
struct ttm_buffer_object *bo,
bool *needs_unlock)
{
+ struct ttm_operation_ctx *ctx = arg->ctx;
+
*needs_unlock = false;
if (dma_resv_trylock(bo->base.resv)) {
return false;
}
-static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
+static int ttm_lru_walk_ticketlock(struct ttm_lru_walk_arg *arg,
struct ttm_buffer_object *bo,
bool *needs_unlock)
{
struct dma_resv *resv = bo->base.resv;
int ret;
- if (walk->ctx->interruptible)
- ret = dma_resv_lock_interruptible(resv, walk->ticket);
+ if (arg->ctx->interruptible)
+ ret = dma_resv_lock_interruptible(resv, arg->ticket);
else
- ret = dma_resv_lock(resv, walk->ticket);
+ ret = dma_resv_lock(resv, arg->ticket);
if (!ret) {
*needs_unlock = true;
* after waiting for the ticketlock, revert back to
* trylocking for this walk.
*/
- walk->ticket = NULL;
+ arg->ticket = NULL;
} else if (ret == -EDEADLK) {
/* Caller needs to exit the ww transaction. */
ret = -ENOSPC;
* since if we do it the other way around, and the trylock fails,
* we need to drop the lru lock to put the bo.
*/
- if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock))
+ if (ttm_lru_walk_trylock(&walk->arg, bo, &bo_needs_unlock))
bo_locked = true;
- else if (!walk->ticket || walk->ctx->no_wait_gpu ||
- walk->trylock_only)
+ else if (!walk->arg.ticket || walk->arg.ctx->no_wait_gpu ||
+ walk->arg.trylock_only)
continue;
if (!ttm_bo_get_unless_zero(bo)) {
lret = 0;
if (!bo_locked)
- lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
+ lret = ttm_lru_walk_ticketlock(&walk->arg, bo, &bo_needs_unlock);
/*
* Note that in between the release of the lru lock and the
{
memset(curs, 0, sizeof(*curs));
ttm_resource_cursor_init(&curs->res_curs, man);
- curs->ctx = ctx;
+ curs->arg.ctx = ctx;
return curs;
}
{
struct ttm_buffer_object *bo = res->bo;
- if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock))
+ if (!ttm_lru_walk_trylock(&curs->arg, bo, &curs->needs_unlock))
return NULL;
if (!ttm_bo_get_unless_zero(bo)) {
};
/**
- * struct ttm_lru_walk - Structure describing a LRU walk.
+ * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk.
*/
-struct ttm_lru_walk {
- /** @ops: Pointer to the ops structure. */
- const struct ttm_lru_walk_ops *ops;
+struct ttm_lru_walk_arg {
/** @ctx: Pointer to the struct ttm_operation_ctx. */
struct ttm_operation_ctx *ctx;
/** @ticket: The struct ww_acquire_ctx if any. */
bool trylock_only;
};
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @arg: Common bo LRU walk arguments. */
+ struct ttm_lru_walk_arg arg;
+};
+
s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target);
struct ttm_bo_lru_cursor {
/** @res_curs: Embedded struct ttm_resource_cursor. */
struct ttm_resource_cursor res_curs;
- /**
- * @ctx: The struct ttm_operation_ctx used while looping.
- * governs the locking mode.
- */
- struct ttm_operation_ctx *ctx;
/**
* @bo: Buffer object pointer if a buffer object is refcounted,
* NULL otherwise.
* unlock before the next iteration or after loop exit.
*/
bool needs_unlock;
+ /** @arg: Common BO LRU walk arguments. */
+ struct ttm_lru_walk_arg arg;
};
void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);