static struct kmem_cache *slab_blocks;
+#define rbtree_get_free_block(node) rb_entry((node), struct drm_buddy_block, rb)
+
static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
struct drm_buddy_block *parent,
unsigned int order,
block->header |= order;
block->parent = parent;
+ RB_CLEAR_NODE(&block->rb);
+
BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
return block;
}
kmem_cache_free(slab_blocks, block);
}
-static void list_insert_sorted(struct drm_buddy *mm,
- struct drm_buddy_block *block)
+static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
+ const struct drm_buddy_block *node)
{
- struct drm_buddy_block *node;
- struct list_head *head;
+ return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
+}
- head = &mm->free_list[drm_buddy_block_order(block)];
- if (list_empty(head)) {
- list_add(&block->link, head);
- return;
- }
+static bool rbtree_block_offset_less(struct rb_node *block,
+ const struct rb_node *node)
+{
+ return drm_buddy_block_offset_less(rbtree_get_free_block(block),
+ rbtree_get_free_block(node));
+}
- list_for_each_entry(node, head, link)
- if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
- break;
+static void rbtree_insert(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ rb_add(&block->rb,
+ &mm->free_tree[drm_buddy_block_order(block)],
+ rbtree_block_offset_less);
+}
+
+static void rbtree_remove(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct rb_root *root;
+
+ root = &mm->free_tree[drm_buddy_block_order(block)];
+ rb_erase(&block->rb, root);
+
+ RB_CLEAR_NODE(&block->rb);
+}
+
+static struct drm_buddy_block *
+rbtree_last_entry(struct drm_buddy *mm, unsigned int order)
+{
+ struct rb_node *node = rb_last(&mm->free_tree[order]);
+
+ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
+}
- __list_add(&block->link, node->link.prev, &node->link);
+static bool rbtree_is_empty(struct drm_buddy *mm, unsigned int order)
+{
+ return RB_EMPTY_ROOT(&mm->free_tree[order]);
}
static void clear_reset(struct drm_buddy_block *block)
block->header |= DRM_BUDDY_HEADER_CLEAR;
}
-static void mark_allocated(struct drm_buddy_block *block)
+static void mark_allocated(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_ALLOCATED;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static void mark_free(struct drm_buddy *mm,
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
- list_insert_sorted(mm, block);
+ rbtree_insert(mm, block);
}
-static void mark_split(struct drm_buddy_block *block)
+static void mark_split(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_SPLIT;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
mark_cleared(parent);
}
- list_del(&buddy->link);
+ rbtree_remove(mm, buddy);
if (force_merge && drm_buddy_block_is_clear(buddy))
mm->clear_avail -= drm_buddy_block_size(mm, buddy);
return -EINVAL;
for (i = min_order - 1; i >= 0; i--) {
- struct drm_buddy_block *block, *prev;
+ struct rb_root *root = &mm->free_tree[i];
+ struct rb_node *iter;
+
+ iter = rb_last(root);
- list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
- struct drm_buddy_block *buddy;
+ while (iter) {
+ struct drm_buddy_block *block, *buddy;
u64 block_start, block_end;
- if (!block->parent)
+ block = rbtree_get_free_block(iter);
+ iter = rb_prev(iter);
+
+ if (!block || !block->parent)
continue;
block_start = drm_buddy_block_offset(block);
WARN_ON(drm_buddy_block_is_clear(block) ==
drm_buddy_block_is_clear(buddy));
- /*
- * If the prev block is same as buddy, don't access the
- * block in the next iteration as we would free the
- * buddy block as part of the free function.
- */
- if (prev == buddy)
- prev = list_prev_entry(prev, link);
+ if (iter == &buddy->rb)
+ iter = rb_prev(iter);
- list_del(&block->link);
+ rbtree_remove(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
{
unsigned int i;
- u64 offset;
+ u64 offset = 0;
if (size < chunk_size)
return -EINVAL;
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
- mm->free_list = kmalloc_array(mm->max_order + 1,
- sizeof(struct list_head),
+ mm->free_tree = kmalloc_array(mm->max_order + 1,
+ sizeof(struct rb_root),
GFP_KERNEL);
- if (!mm->free_list)
+ if (!mm->free_tree)
return -ENOMEM;
for (i = 0; i <= mm->max_order; ++i)
- INIT_LIST_HEAD(&mm->free_list[i]);
+ mm->free_tree[i] = RB_ROOT;
mm->n_roots = hweight64(size);
sizeof(struct drm_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
- goto out_free_list;
+ goto out_free_tree;
- offset = 0;
i = 0;
/*
while (i--)
drm_block_free(mm, mm->roots[i]);
kfree(mm->roots);
-out_free_list:
- kfree(mm->free_list);
+out_free_tree:
+ kfree(mm->free_tree);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buddy_init);
*
* @mm: DRM buddy manager to free
*
- * Cleanup memory manager resources and the freelist
+ * Cleanup memory manager resources and the freetree
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
WARN_ON(mm->avail != mm->size);
kfree(mm->roots);
- kfree(mm->free_list);
+ kfree(mm->free_tree);
}
EXPORT_SYMBOL(drm_buddy_fini);
clear_reset(block);
}
- mark_split(block);
+ mark_split(mm, block);
return 0;
}
* @is_clear: blocks clear state
*
* Reset the clear state based on @is_clear value for each block
- * in the freelist.
+ * in the freetree.
*/
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
{
}
for (i = 0; i <= mm->max_order; ++i) {
- struct drm_buddy_block *block;
+ struct drm_buddy_block *block, *tmp;
- list_for_each_entry_reverse(block, &mm->free_list[i], link) {
+ rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[i], rb) {
if (is_clear != drm_buddy_block_is_clear(block)) {
if (is_clear) {
mark_cleared(block);
unsigned int i;
for (i = order; i <= mm->max_order; ++i) {
+ struct rb_node *iter = rb_last(&mm->free_tree[i]);
struct drm_buddy_block *tmp_block;
- list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
+ while (iter) {
+ tmp_block = rbtree_get_free_block(iter);
- block = tmp_block;
- break;
+ if (!block_incompatible(tmp_block, flags)) {
+ block = tmp_block;
+ break;
+ }
+
+ iter = rb_prev(iter);
}
if (!block)
}
static struct drm_buddy_block *
-alloc_from_freelist(struct drm_buddy *mm,
+alloc_from_freetree(struct drm_buddy *mm,
unsigned int order,
unsigned long flags)
{
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
+ struct rb_node *iter = rb_last(&mm->free_tree[tmp]);
struct drm_buddy_block *tmp_block;
- list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
+ while (iter) {
+ tmp_block = rbtree_get_free_block(iter);
- block = tmp_block;
- break;
+ if (!block_incompatible(tmp_block, flags)) {
+ block = tmp_block;
+ break;
+ }
+
+ iter = rb_prev(iter);
}
if (block)
if (!block) {
/* Fallback method */
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- if (!list_empty(&mm->free_list[tmp])) {
- block = list_last_entry(&mm->free_list[tmp],
- struct drm_buddy_block,
- link);
- if (block)
- break;
- }
+ block = rbtree_last_entry(mm, tmp);
+ if (block)
+ break;
}
if (!block)
if (contains(start, end, block_start, block_end)) {
if (drm_buddy_block_is_free(block)) {
- mark_allocated(block);
+ mark_allocated(mm, block);
total_allocated += drm_buddy_block_size(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
{
u64 rhs_offset, lhs_offset, lhs_size, filled;
struct drm_buddy_block *block;
- struct list_head *list;
LIST_HEAD(blocks_lhs);
+ struct rb_node *iter;
unsigned long pages;
unsigned int order;
u64 modify_size;
if (order == 0)
return -ENOSPC;
- list = &mm->free_list[order];
- if (list_empty(list))
+ if (rbtree_is_empty(mm, order))
return -ENOSPC;
- list_for_each_entry_reverse(block, list, link) {
+ iter = rb_last(&mm->free_tree[order]);
+
+ while (iter) {
+ block = rbtree_get_free_block(iter);
+
/* Allocate blocks traversing RHS */
rhs_offset = drm_buddy_block_offset(block);
err = __drm_buddy_alloc_range(mm, rhs_offset, size,
}
/* Free blocks for the next iteration */
drm_buddy_free_list_internal(mm, blocks);
+
+ iter = rb_prev(iter);
}
return -ENOSPC;
list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
if (err) {
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
return __drm_buddy_alloc_range_bias(mm, start, end,
order, flags);
else
- /* Allocate from freelist */
- return alloc_from_freelist(mm, order, flags);
+ /* Allocate from freetree */
+ return alloc_from_freetree(mm, order, flags);
}
/**
* alloc_range_bias() called on range limitations, which traverses
* the tree and returns the desired block.
*
- * alloc_from_freelist() called when *no* range restrictions
- * are enforced, which picks the block from the freelist.
+ * alloc_from_freetree() called when *no* range restrictions
+ * are enforced, which picks the block from the freetree.
*
* Returns:
* 0 on success, error code on failure.
}
} while (1);
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
for (order = mm->max_order; order >= 0; order--) {
- struct drm_buddy_block *block;
+ struct drm_buddy_block *block, *tmp;
u64 count = 0, free;
- list_for_each_entry(block, &mm->free_list[order], link) {
+ rbtree_postorder_for_each_entry_safe(block, tmp, &mm->free_tree[order], rb) {
BUG_ON(!drm_buddy_block_is_free(block));
count++;
}