// SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mempool.c * * memory buffer pool support. Such pools are mostly used * for guaranteed, deadlock-free memory allocations during * extreme VM load. * * started by Ingo Molnar, Copyright (C) 2001 * debugging by David Rientjes, Copyright (C) 2015
*/
staticvoid check_element(mempool_t *pool, void *element)
{ /* Skip checking: KASAN might save its metadata in the element. */ if (kasan_enabled()) return;
/* Mempools backed by slab allocator */ if (pool->free == mempool_kfree) {
__check_element(pool, element, (size_t)pool->pool_data);
} elseif (pool->free == mempool_free_slab) {
__check_element(pool, element, kmem_cache_size(pool->pool_data));
} elseif (pool->free == mempool_free_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_local_page((struct page *)element);
staticvoid poison_element(mempool_t *pool, void *element)
{ /* Skip poisoning: KASAN might save its metadata in the element. */ if (kasan_enabled()) return;
/* Mempools backed by slab allocator */ if (pool->alloc == mempool_kmalloc) {
__poison_element(element, (size_t)pool->pool_data);
} elseif (pool->alloc == mempool_alloc_slab) {
__poison_element(element, kmem_cache_size(pool->pool_data));
} elseif (pool->alloc == mempool_alloc_pages) { /* Mempools backed by page allocator */ int order = (int)(long)pool->pool_data; void *addr = kmap_local_page((struct page *)element);
/** * mempool_exit - exit a mempool initialized with mempool_init() * @pool: pointer to the memory pool which was initialized with * mempool_init(). * * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps. * * May be called on a zeroed but uninitialized mempool (i.e. allocated with * kzalloc()).
*/ void mempool_exit(mempool_t *pool)
{ while (pool->curr_nr) { void *element = remove_element(pool);
pool->free(element, pool->pool_data);
}
kfree(pool->elements);
pool->elements = NULL;
}
EXPORT_SYMBOL(mempool_exit);
/** * mempool_destroy - deallocate a memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * Free all reserved elements in @pool and @pool itself. This function * only sleeps if the free_fn() function sleeps.
*/ void mempool_destroy(mempool_t *pool)
{ if (unlikely(!pool)) return;
int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
spin_lock_init(&pool->lock);
pool->min_nr = min_nr;
pool->pool_data = pool_data;
pool->alloc = alloc_fn;
pool->free = free_fn;
init_waitqueue_head(&pool->wait); /* * max() used here to ensure storage for at least 1 element to support * zero minimum pool
*/
pool->elements = kmalloc_array_node(max(1, min_nr), sizeof(void *),
gfp_mask, node_id); if (!pool->elements) return -ENOMEM;
/* * First pre-allocate the guaranteed number of buffers, * also pre-allocate 1 element for zero minimum pool.
*/ while (pool->curr_nr < max(1, pool->min_nr)) { void *element;
element = pool->alloc(gfp_mask, pool->pool_data); if (unlikely(!element)) {
mempool_exit(pool); return -ENOMEM;
}
add_element(pool, element);
}
return 0;
}
EXPORT_SYMBOL(mempool_init_node);
/** * mempool_init - initialize a memory pool * @pool: pointer to the memory pool that should be initialized * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. * * Like mempool_create(), but initializes the pool in (i.e. embedded in another * structure). * * Return: %0 on success, negative error code otherwise.
*/ int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data)
{ return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
pool_data, GFP_KERNEL, NUMA_NO_NODE);
}
EXPORT_SYMBOL(mempool_init_noprof);
/** * mempool_create_node - create a memory pool * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. * @gfp_mask: memory allocation flags * @node_id: numa node to allocate on * * this function creates and allocates a guaranteed size, preallocated * memory pool. The pool can be used from the mempool_alloc() and mempool_free() * functions. This function might sleep. Both the alloc_fn() and the free_fn() * functions might sleep - as long as the mempool_alloc() function is not called * from IRQ contexts. * * Return: pointer to the created memory pool object or %NULL on error.
*/
mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data,
gfp_t gfp_mask, int node_id)
{
mempool_t *pool;
pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); if (!pool) return NULL;
/** * mempool_resize - resize an existing memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @new_min_nr: the new minimum number of elements guaranteed to be * allocated for this pool. * * This function shrinks/grows the pool. In the case of growing, * it cannot be guaranteed that the pool will be grown to the new * size immediately, but new mempool_free() calls will refill it. * This function may sleep. * * Note, the caller must guarantee that no mempool_destroy is called * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. * * Return: %0 on success, negative error code otherwise.
*/ int mempool_resize(mempool_t *pool, int new_min_nr)
{ void *element; void **new_elements; unsignedlong flags;
BUG_ON(new_min_nr <= 0);
might_sleep();
spin_lock_irqsave(&pool->lock, flags); if (new_min_nr <= pool->min_nr) { while (new_min_nr < pool->curr_nr) {
element = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags);
pool->free(element, pool->pool_data);
spin_lock_irqsave(&pool->lock, flags);
}
pool->min_nr = new_min_nr; goto out_unlock;
}
spin_unlock_irqrestore(&pool->lock, flags);
/* Grow the pool */
new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
GFP_KERNEL); if (!new_elements) return -ENOMEM;
spin_lock_irqsave(&pool->lock, flags); if (unlikely(new_min_nr <= pool->min_nr)) { /* Raced, other resize will do our work */
spin_unlock_irqrestore(&pool->lock, flags);
kfree(new_elements); goto out;
}
memcpy(new_elements, pool->elements,
pool->curr_nr * sizeof(*new_elements));
kfree(pool->elements);
pool->elements = new_elements;
pool->min_nr = new_min_nr;
/** * mempool_alloc - allocate an element from a specific memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @gfp_mask: the usual allocation bitmask. * * this function only sleeps if the alloc_fn() function sleeps or * returns NULL. Note that due to preallocation, this function * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) * Note: using __GFP_ZERO is not supported. * * Return: pointer to the allocated element or %NULL on error.
*/ void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask)
{ void *element; unsignedlong flags;
wait_queue_entry_t wait;
gfp_t gfp_temp;
element = pool->alloc(gfp_temp, pool->pool_data); if (likely(element != NULL)) return element;
spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) {
element = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags); /* paired with rmb in mempool_free(), read comment there */
smp_wmb(); /* * Update the allocation stack trace as this is more useful * for debugging.
*/
kmemleak_update_trace(element); return element;
}
/* * We use gfp mask w/o direct reclaim or IO for the first round. If * alloc failed with that and @pool was empty, retry immediately.
*/ if (gfp_temp != gfp_mask) {
spin_unlock_irqrestore(&pool->lock, flags);
gfp_temp = gfp_mask; goto repeat_alloc;
}
/* We must not sleep if !__GFP_DIRECT_RECLAIM */ if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
spin_unlock_irqrestore(&pool->lock, flags); return NULL;
}
/* Let's wait for someone else to return an element to @pool */
init_wait(&wait);
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(&pool->lock, flags);
/* * FIXME: this should be io_schedule(). The timeout is there as a * workaround for some DM problems in 2.6.18.
*/
io_schedule_timeout(5*HZ);
/** * mempool_alloc_preallocated - allocate an element from preallocated elements * belonging to a specific memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * This function is similar to mempool_alloc, but it only attempts allocating * an element from the preallocated elements. It does not sleep and immediately * returns if no preallocated elements are available. * * Return: pointer to the allocated element or %NULL if no elements are * available.
*/ void *mempool_alloc_preallocated(mempool_t *pool)
{ void *element; unsignedlong flags;
spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) {
element = remove_element(pool);
spin_unlock_irqrestore(&pool->lock, flags); /* paired with rmb in mempool_free(), read comment there */
smp_wmb(); /* * Update the allocation stack trace as this is more useful * for debugging.
*/
kmemleak_update_trace(element); return element;
}
spin_unlock_irqrestore(&pool->lock, flags);
/** * mempool_free - return an element to the pool. * @element: pool element pointer. * @pool: pointer to the memory pool which was allocated via * mempool_create(). * * this function only sleeps if the free_fn() function sleeps.
*/ void mempool_free(void *element, mempool_t *pool)
{ unsignedlong flags;
if (unlikely(element == NULL)) return;
/* * Paired with the wmb in mempool_alloc(). The preceding read is * for @element and the following @pool->curr_nr. This ensures * that the visible value of @pool->curr_nr is from after the * allocation of @element. This is necessary for fringe cases * where @element was passed to this task without going through * barriers. * * For example, assume @p is %NULL at the beginning and one task * performs "p = mempool_alloc(...);" while another task is doing * "while (!p) cpu_relax(); mempool_free(p, ...);". This function * may end up using curr_nr value which is from before allocation * of @p without the following rmb.
*/
smp_rmb();
/* * For correctness, we need a test which is guaranteed to trigger * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr * without locking achieves that and refilling as soon as possible * is desirable. * * Because curr_nr visible here is always a value after the * allocation of @element, any task which decremented curr_nr below * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets * incremented to min_nr afterwards. If curr_nr gets incremented * to min_nr after the allocation of @element, the elements * allocated after that are subject to the same guarantee. * * Waiters happen iff curr_nr is 0 and the above guarantee also * ensures that there will be frees which return elements to the * pool waking up the waiters.
*/ if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr < pool->min_nr)) {
add_element(pool, element);
spin_unlock_irqrestore(&pool->lock, flags); if (wq_has_sleeper(&pool->wait))
wake_up(&pool->wait); return;
}
spin_unlock_irqrestore(&pool->lock, flags);
}
/* * Handle the min_nr = 0 edge case: * * For zero-minimum pools, curr_nr < min_nr (0 < 0) never succeeds, * so waiters sleeping on pool->wait would never be woken by the * wake-up path of previous test. This explicit check ensures the * allocation of element when both min_nr and curr_nr are 0, and * any active waiters are properly awakened.
*/ if (unlikely(pool->min_nr == 0 &&
READ_ONCE(pool->curr_nr) == 0)) {
spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr == 0)) {
add_element(pool, element);
spin_unlock_irqrestore(&pool->lock, flags); if (wq_has_sleeper(&pool->wait))
wake_up(&pool->wait); return;
}
spin_unlock_irqrestore(&pool->lock, flags);
}
/* * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory * specified by pool_data
*/ void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
size_t size = (size_t)pool_data; return kmalloc_noprof(size, gfp_mask);
}
EXPORT_SYMBOL(mempool_kmalloc);
/* * A simple mempool-backed page allocator that allocates pages * of the order specified by pool_data.
*/ void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
{ int order = (int)(long)pool_data; return alloc_pages_noprof(gfp_mask, order);
}
EXPORT_SYMBOL(mempool_alloc_pages);
void mempool_free_pages(void *element, void *pool_data)
{ int order = (int)(long)pool_data;
__free_pages(element, order);
}
EXPORT_SYMBOL(mempool_free_pages);
Messung V0.5
¤ Dauer der Verarbeitung: 0.2 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.