/* initialize elements of percpu objpool_slot */
slot->mask = pool->capacity - 1;
for (i = 0; i < nodes; i++) { if (objinit) { int rc = objinit(obj, context); if (rc) return rc;
}
slot->entries[slot->tail & slot->mask] = obj;
obj = obj + pool->obj_size;
slot->tail++;
slot->last = slot->tail;
pool->nr_objs++;
}
return 0;
}
/* allocate and initialize percpu slots */ staticint
objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, void *context, objpool_init_obj_cb objinit)
{ int i, cpu_count = 0;
for (i = 0; i < nr_cpu_ids; i++) {
struct objpool_slot *slot; int nodes, size, rc;
/* skip the cpu node which could never be present */ if (!cpu_possible(i)) continue;
/* compute how many objects to be allocated with this slot */
nodes = nr_objs / pool->nr_possible_cpus; if (cpu_count < (nr_objs % pool->nr_possible_cpus))
nodes++;
cpu_count++;
/* * here we allocate percpu-slot & objs together in a single * allocation to make it more compact, taking advantage of * warm caches and TLB hits. in default vmalloc is used to * reduce the pressure of kernel slab system. as we know, * mimimal size of vmalloc is one page since vmalloc would * always align the requested size to page size. * but if vmalloc fails or it is not available (e.g. GFP_ATOMIC) * allocate percpu slot with kmalloc.
*/
slot = NULL;
if (!slot) {
slot = kmalloc_node(size, pool->gfp, cpu_to_node(i)); if (!slot) return -ENOMEM;
}
memset(slot, 0, size);
pool->cpu_slots[i] = slot;
/* initialize the objpool_slot of cpu node i */
rc = objpool_init_percpu_slot(pool, slot, nodes, context, objinit); if (rc) return rc;
}
return 0;
}
/* cleanup all percpu slots of the object pool */ staticvoid objpool_fini_percpu_slots(struct objpool_head *pool)
{ int i;
if (!pool->cpu_slots) return;
for (i = 0; i < nr_cpu_ids; i++)
kvfree(pool->cpu_slots[i]);
kfree(pool->cpu_slots);
}
/* initialize object pool and pre-allocate objects */ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
gfp_t gfp, void *context, objpool_init_obj_cb objinit,
objpool_fini_cb release)
{ int rc, capacity, slot_size;
/* call user's cleanup callback if provided */ if (pool->release)
pool->release(pool, pool->context);
}
EXPORT_SYMBOL_GPL(objpool_free);
/* drop the allocated object, rather reclaim it to objpool */ int objpool_drop(void *obj, struct objpool_head *pool)
{ if (!obj || !pool) return -EINVAL;
if (refcount_dec_and_test(&pool->ref)) {
objpool_free(pool); return 0;
}
/* drop unused objects and defref objpool for releasing */ void objpool_fini(struct objpool_head *pool)
{ int count = 1; /* extra ref for objpool itself */
/* drop all remained objects from objpool */ while (objpool_pop(pool))
count++;
if (refcount_sub_and_test(count, &pool->ref))
objpool_free(pool);
}
EXPORT_SYMBOL_GPL(objpool_fini);
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.