/* * A Remote Heap. Remote means that we don't touch the memory that the * heap points to. Normal heap implementations use the memory they manage * to place their list. We cannot do that because the memory we manage may * have special properties, for example it is uncachable or of different * endianess. * * Author: Pantelis Antoniou <panto@intracom.gr> * * 2004 (c) INTRACOM S.A. Greece. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied.
*/ #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/err.h> #include <linux/slab.h>
#include <asm/rheap.h>
/* * Fixup a list_head, needed when copying lists. If the pointers fall * between s and e, apply the delta. This assumes that * sizeof(struct list_head *) == sizeof(unsigned long *).
*/ staticinlinevoid fixup(unsignedlong s, unsignedlong e, int d, struct list_head *l)
{ unsignedlong *pp;
pp = (unsignedlong *)&l->next; if (*pp >= s && *pp < e)
*pp += d;
pp = (unsignedlong *)&l->prev; if (*pp >= s && *pp < e)
*pp += d;
}
/* Grow the allocated blocks */ staticint grow(rh_info_t * info, int max_blocks)
{
rh_block_t *block, *blk; int i, new_blocks; int delta; unsignedlong blks, blke;
if (max_blocks <= info->max_blocks) return -EINVAL;
/* add all new blocks to the free list */
blk = block + info->max_blocks - new_blocks; for (i = 0; i < new_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
return 0;
}
/* * Assure at least the required amount of empty slots. If this function * causes a grow in the block area then all pointers kept to the block * area are invalid!
*/ staticint assure_empty(rh_info_t * info, int slots)
{ int max_blocks;
/* This function is not meant to be used to grow uncontrollably */ if (slots >= 4) return -EINVAL;
/* Enough space */ if (info->empty_slots >= slots) return 0;
/* If no more free slots, and failure to extend. */ /* XXX: You should have called assure_empty before */ if (info->empty_slots == 0) {
printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); return NULL;
}
/* Get empty slot to use */
blk = list_entry(info->empty_list.next, rh_block_t, list);
list_del_init(&blk->list);
info->empty_slots--;
/* Find the block immediately before the given one (if any) */
list_for_each(l, &info->taken_list) {
blk = list_entry(l, rh_block_t, list); if (blk->start > blkn->start) {
list_add_tail(&blkn->list, &blk->list); return;
}
}
list_add_tail(&blkn->list, &info->taken_list);
}
/* * Create a remote heap dynamically. Note that no memory for the blocks * are allocated. It will upon the first allocation
*/
rh_info_t *rh_create(unsignedint alignment)
{
rh_info_t *info;
/* Alignment must be a power of two */ if ((alignment & (alignment - 1)) != 0) return ERR_PTR(-EINVAL);
info = kmalloc(sizeof(*info), GFP_ATOMIC); if (info == NULL) return ERR_PTR(-ENOMEM);
/* * Destroy a dynamically created remote heap. Deallocate only if the areas * are not static
*/ void rh_destroy(rh_info_t * info)
{ if ((info->flags & RHIF_STATIC_BLOCK) == 0)
kfree(info->block);
if ((info->flags & RHIF_STATIC_INFO) == 0)
kfree(info);
}
EXPORT_SYMBOL_GPL(rh_destroy);
/* * Initialize in place a remote heap info block. This is needed to support * operation very early in the startup of the kernel, when it is not yet safe * to call kmalloc.
*/ void rh_init(rh_info_t * info, unsignedint alignment, int max_blocks,
rh_block_t * block)
{ int i;
rh_block_t *blk;
/* Alignment must be a power of two */ if ((alignment & (alignment - 1)) != 0) return;
/* Add all new blocks to the free list */ for (i = 0, blk = block; i < max_blocks; i++, blk++)
list_add(&blk->list, &info->empty_list);
}
EXPORT_SYMBOL_GPL(rh_init);
/* Attach a free memory region, coalesces regions if adjacent */ int rh_attach_region(rh_info_t * info, unsignedlong start, int size)
{
rh_block_t *blk; unsignedlong s, e, m; int r;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (IS_ERR_VALUE(e) || (e < s)) return -ERANGE;
/* Take final values */
start = s;
size = e - s;
/* Grow the blocks, if needed */
r = assure_empty(info, 1); if (r < 0) return r;
/* The region must be aligned */
s = start;
e = s + size;
m = info->alignment - 1;
/* Round start up */
s = (s + m) & ~m;
/* Round end down */
e = e & ~m;
if (assure_empty(info, 1) < 0) return (unsignedlong) -ENOMEM;
blk = NULL;
list_for_each(l, &info->free_list) {
blk = list_entry(l, rh_block_t, list); /* The range must lie entirely inside one free block */
bs = blk->start;
be = blk->start + blk->size; if (s >= bs && e <= be) break;
blk = NULL;
}
if (blk == NULL) return (unsignedlong) -ENOMEM;
/* Perfect fit */ if (bs == s && be == e) { /* Delete from free list, release slot */
list_del(&blk->list);
release_slot(info, blk); return s;
}
/* blk still in free list, with updated start and/or size */ if (bs == s || be == e) { if (bs == s)
blk->start += size;
blk->size -= size;
} else { /* The front free fragment */
blk->size = s - bs;
/* the back free fragment */
newblk = get_slot(info);
newblk->start = e;
newblk->size = be - e;
list_add(&newblk->list, &blk->list);
}
return s;
}
EXPORT_SYMBOL_GPL(rh_detach_region);
/* Allocate a block of memory at the specified alignment. The value returned * is an offset into the buffer initialized by rh_init(), or a negative number * if there is an error.
*/ unsignedlong rh_alloc_align(rh_info_t * info, int size, int alignment, constchar *owner)
{ struct list_head *l;
rh_block_t *blk;
rh_block_t *newblk; unsignedlong start, sp_size;
/* Validate size, and alignment must be power of two */ if (size <= 0 || (alignment & (alignment - 1)) != 0) return (unsignedlong) -EINVAL;
/* Just fits */ if (blk->size == size) { /* Move from free list to taken list */
list_del(&blk->list);
newblk = blk;
} else { /* Fragment caused, split if needed */ /* Create block for fragment in the beginning */
sp_size = start - blk->start; if (sp_size) {
rh_block_t *spblk;
/* blk still in free list, with updated start and size
* for fragment in the end */
blk->start = start + size;
blk->size -= sp_size + size; /* No fragment in the end, remove blk */ if (blk->size == 0) {
list_del(&blk->list);
release_slot(info, blk);
}
}
/* Allocate a block of memory at the default alignment. The value returned is * an offset into the buffer initialized by rh_init(), or a negative number if * there is an error.
*/ unsignedlong rh_alloc(rh_info_t * info, int size, constchar *owner)
{ return rh_alloc_align(info, size, info->alignment, owner);
}
EXPORT_SYMBOL_GPL(rh_alloc);
/* Allocate a block of memory at the given offset, rounded up to the default * alignment. The value returned is an offset into the buffer initialized by * rh_init(), or a negative number if there is an error.
*/ unsignedlong rh_alloc_fixed(rh_info_t * info, unsignedlong start, int size, constchar *owner)
{ struct list_head *l;
rh_block_t *blk, *newblk1, *newblk2; unsignedlong s, e, m, bs = 0, be = 0;
/* Deallocate the memory previously allocated by one of the rh_alloc functions. * The return value is the size of the deallocated block, or a negative number * if there is an error.
*/ int rh_free(rh_info_t * info, unsignedlong start)
{
rh_block_t *blk, *blk2; struct list_head *l; int size;
/* Linear search for block */
blk = NULL;
list_for_each(l, &info->taken_list) {
blk2 = list_entry(l, rh_block_t, list); if (start < blk2->start) break;
blk = blk2;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.