while ((parent = block->parent)) { struct drm_buddy_block *buddy;
buddy = __get_buddy(block);
if (!drm_buddy_block_is_free(buddy)) break;
if (!force_merge) { /* * Check the block and its buddy clear state and exit * the loop if they both have the dissimilar state.
*/ if (drm_buddy_block_is_clear(block) !=
drm_buddy_block_is_clear(buddy)) break;
if (drm_buddy_block_is_clear(block))
mark_cleared(parent);
}
list_del(&buddy->link); if (force_merge && drm_buddy_block_is_clear(buddy))
mm->clear_avail -= drm_buddy_block_size(mm, buddy);
/* * If the prev block is same as buddy, don't access the * block in the next iteration as we would free the * buddy block as part of the free function.
*/ if (prev == buddy)
prev = list_prev_entry(prev, link);
list_del(&block->link); if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
order = __drm_buddy_free(mm, block, true); if (order >= min_order) return 0;
}
}
return -ENOMEM;
}
/** * drm_buddy_init - init memory manager * * @mm: DRM buddy manager to initialize * @size: size in bytes to manage * @chunk_size: minimum page size in bytes for our allocations * * Initializes the memory manager and its resources. * * Returns: * 0 on success, error code on failure.
*/ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
{ unsignedint i;
u64 offset;
/* * Split into power-of-two blocks, in case we are given a size that is * not itself a power-of-two.
*/ do { struct drm_buddy_block *root; unsignedint order;
u64 root_size;
order = ilog2(size) - ilog2(chunk_size);
root_size = chunk_size << order;
root = drm_block_alloc(mm, NULL, order, offset); if (!root) goto out_free_roots;
if (drm_buddy_block_is_clear(block)) {
mark_cleared(block->left);
mark_cleared(block->right);
clear_reset(block);
}
mark_split(block);
return 0;
}
/** * drm_get_buddy - get buddy address * * @block: DRM buddy block * * Returns the corresponding buddy block for @block, or NULL * if this is a root block and can't be merged further. * Requires some kind of locking to protect against * any concurrent allocate and free operations.
*/ struct drm_buddy_block *
drm_get_buddy(struct drm_buddy_block *block)
{ return __get_buddy(block);
}
EXPORT_SYMBOL(drm_get_buddy);
/** * drm_buddy_reset_clear - reset blocks clear state * * @mm: DRM buddy manager * @is_clear: blocks clear state * * Reset the clear state based on @is_clear value for each block * in the freelist.
*/ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
{
u64 root_size, size, start; unsignedint order; int i;
size = mm->size; for (i = 0; i < mm->n_roots; ++i) {
order = ilog2(size) - ilog2(mm->chunk_size);
start = drm_buddy_block_offset(mm->roots[i]);
__force_merge(mm, start, start + size, order);
list_for_each_entry_safe(block, on, objects, link) { if (mark_clear)
mark_cleared(block); elseif (mark_dirty)
clear_reset(block);
drm_buddy_free_block(mm, block);
cond_resched();
}
INIT_LIST_HEAD(objects);
}
staticvoid drm_buddy_free_list_internal(struct drm_buddy *mm, struct list_head *objects)
{ /* * Don't touch the clear/dirty bit, since allocation is still internal * at this point. For example we might have just failed part of the * allocation.
*/
__drm_buddy_free_list(mm, objects, false, false);
}
if (round_down(adjusted_end + 1, req_size) <=
round_up(adjusted_start, req_size)) continue;
}
if (!fallback && block_incompatible(block, flags)) continue;
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) { /* * Find the free block within the range.
*/ if (drm_buddy_block_is_free(block)) return block;
continue;
}
if (!drm_buddy_block_is_split(block)) {
err = split_block(mm, block); if (unlikely(err)) goto err_undo;
}
list_add(&block->right->tmp_link, &dfs);
list_add(&block->left->tmp_link, &dfs);
} while (1);
return ERR_PTR(-ENOSPC);
err_undo: /* * We really don't want to leave around a bunch of split blocks, since * bigger is better, so make sure we merge everything back before we * free the allocated blocks.
*/
buddy = __get_buddy(block); if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
__drm_buddy_free(mm, block, false); return ERR_PTR(err);
}
err_undo: /* * We really don't want to leave around a bunch of split blocks, since * bigger is better, so make sure we merge everything back before we * free the allocated blocks.
*/
buddy = __get_buddy(block); if (buddy &&
(drm_buddy_block_is_free(block) &&
drm_buddy_block_is_free(buddy)))
__drm_buddy_free(mm, block, false);
/** * drm_buddy_block_trim - free unused pages * * @mm: DRM buddy manager * @start: start address to begin the trimming. * @new_size: original size requested * @blocks: Input and output list of allocated blocks. * MUST contain single block as input to be trimmed. * On success will contain the newly allocated blocks * making up the @new_size. Blocks always appear in * ascending order * * For contiguous allocation, we round up the size to the nearest * power of two value, drivers consume *actual* size, so remaining * portions are unused and can be optionally freed with this function * * Returns: * 0 on success, error code on failure.
*/ int drm_buddy_block_trim(struct drm_buddy *mm,
u64 *start,
u64 new_size, struct list_head *blocks)
{ struct drm_buddy_block *parent; struct drm_buddy_block *block;
u64 block_start, block_end;
LIST_HEAD(dfs);
u64 new_start; int err;
staticstruct drm_buddy_block *
__drm_buddy_alloc_blocks(struct drm_buddy *mm,
u64 start, u64 end, unsignedint order, unsignedlong flags)
{ if (flags & DRM_BUDDY_RANGE_ALLOCATION) /* Allocate traversing within the range */ return __drm_buddy_alloc_range_bias(mm, start, end,
order, flags); else /* Allocate from freelist */ return alloc_from_freelist(mm, order, flags);
}
/** * drm_buddy_alloc_blocks - allocate power-of-two blocks * * @mm: DRM buddy manager to allocate from * @start: start of the allowed range for this block * @end: end of the allowed range for this block * @size: size of the allocation in bytes * @min_block_size: alignment of the allocation * @blocks: output list head to add allocated blocks * @flags: DRM_BUDDY_*_ALLOCATION flags * * alloc_range_bias() called on range limitations, which traverses * the tree and returns the desired block. * * alloc_from_freelist() called when *no* range restrictions * are enforced, which picks the block from the freelist. * * Returns: * 0 on success, error code on failure.
*/ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
u64 start, u64 end, u64 size,
u64 min_block_size, struct list_head *blocks, unsignedlong flags)
{ struct drm_buddy_block *block = NULL;
u64 original_size, original_min_size; unsignedint min_order, order;
LIST_HEAD(allocated); unsignedlong pages; int err;
if (size < mm->chunk_size) return -EINVAL;
if (min_block_size < mm->chunk_size) return -EINVAL;
if (!is_power_of_2(min_block_size)) return -EINVAL;
if (!IS_ALIGNED(start | end | size, mm->chunk_size)) return -EINVAL;
if (end > mm->size) return -EINVAL;
if (range_overflows(start, size, mm->size)) return -EINVAL;
/* Actual range allocation */ if (start + size == end) { if (!IS_ALIGNED(start | end, min_block_size)) return -EINVAL;
/* Trim the allocated block to the required size */ if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
original_size != size) { struct list_head *trim_list;
LIST_HEAD(temp);
u64 trim_size;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.