struct rcu_pending_seq { /* * We're using a radix tree like a vector - we're just pushing elements * onto the end; we're using a radix tree instead of an actual vector to * avoid reallocation overhead
*/
GENRADIX(struct rcu_head *) objs;
size_t nr; struct rcu_head **cursor;
rcu_gp_poll_state_t seq;
};
struct rcu_pending_pcpu { struct rcu_pending *parent;
spinlock_t lock; int cpu;
/* * We can't bound the number of unprocessed gp sequence numbers, and we * can't efficiently merge radix trees for expired grace periods, so we * need darray/vector:
*/
DARRAY_PREALLOCATED(struct rcu_pending_seq, 4) objs;
/* Third entry is for expired objects: */ struct rcu_pending_list lists[NUM_ACTIVE_RCU_POLL_OLDSTATE + 1];
case RCU_PENDING_CALL_RCU: for (size_t i = 0; i < objs.nr; i++) { struct rcu_head *obj = *genradix_ptr(&objs.objs, i);
obj->func(obj);
}
genradix_free(&objs.objs);
while (list) { struct rcu_head *obj = list; #ifdef __KERNEL__
list = obj->next; #else
list = (void *) obj->next.next; #endif
obj->func(obj);
} break;
default: for (size_t i = 0; i < objs.nr; i++)
pending->process(pending, *genradix_ptr(&objs.objs, i));
genradix_free(&objs.objs);
while (list) { struct rcu_head *obj = list; #ifdef __KERNEL__
list = obj->next; #else
list = (void *) obj->next.next; #endif
pending->process(pending, obj);
} break;
}
}
staticbool process_finished_items(struct rcu_pending *pending, struct rcu_pending_pcpu *p, unsignedlong flags)
{ /* * XXX: we should grab the gp seq once and avoid multiple function * calls, this is called from __rcu_pending_enqueue() fastpath in * may_sleep==true mode
*/ if ((p->objs.nr && __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) ||
(p->lists[0].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[0].seq)) ||
(p->lists[1].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[1].seq)) ||
p->lists[2].head) {
__process_finished_items(pending, p, flags); returntrue;
}
static noinline bool
rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, rcu_gp_poll_state_t seq, struct rcu_head *head, void *ptr, unsignedlong *flags)
{ if (ptr) { if (!head) { /* * kvfree_rcu_mightsleep(): we weren't passed an * rcu_head, but we need one: use the low bit of the * ponter to free to flag that the head needs to be * freed as well:
*/
ptr = (void *)(((unsignedlong) ptr)|1UL);
head = kmalloc(sizeof(*head), __GFP_NOWARN); if (!head) {
spin_unlock_irqrestore(&p->lock, *flags);
head = kmalloc(sizeof(*head), GFP_KERNEL|__GFP_NOFAIL); /* * dropped lock, did GFP_KERNEL allocation, * check for gp expiration
*/ if (unlikely(__poll_state_synchronize_rcu(p->parent->srcu, seq))) {
kvfree(--ptr);
kfree(head);
spin_lock_irqsave(&p->lock, *flags); returnfalse;
}
}
}
head->func = ptr;
}
again: for (struct rcu_pending_list *i = p->lists;
i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) { if (rcu_gp_poll_cookie_eq(i->seq, seq)) {
rcu_pending_list_add(i, head); returnfalse;
}
}
for (struct rcu_pending_list *i = p->lists;
i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) { if (!i->head) {
i->seq = seq;
rcu_pending_list_add(i, head); returntrue;
}
}
merge_expired_lists(p); goto again;
}
/* * __rcu_pending_enqueue: enqueue a pending RCU item, to be processed (via * pending->pracess) once grace period elapses. * * Attempt to enqueue items onto a radix tree; if memory allocation fails, fall * back to a linked list. * * - If @ptr is NULL, we're enqueuing an item for a generic @pending with a * process callback * * - If @ptr and @head are both not NULL, we're kvfree_rcu() * * - If @ptr is not NULL and @head is, we're kvfree_rcu_mightsleep() * * - If @may_sleep is true, will do GFP_KERNEL memory allocations and process * expired items.
*/ static __always_inline void
__rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head, void *ptr, bool may_sleep)
{
/* We could technically be scheduled before taking the lock and end up * using a different cpu's rcu_pending_pcpu: that's ok, it needs a lock * anyways * * And we have to do it this way to avoid breaking PREEMPT_RT, which * redefines how spinlocks work:
*/
p = raw_cpu_ptr(pending->p);
spin_lock_irqsave(&p->lock, flags);
rcu_gp_poll_state_t seq = __get_state_synchronize_rcu(pending->srcu);
restart: if (may_sleep &&
unlikely(process_finished_items(pending, p, flags))) goto check_expired;
/* * In kvfree_rcu() mode, the radix tree is only for slab pointers so * that we can do kfree_bulk() - vmalloc pointers always use the linked * list:
*/ if (ptr && unlikely(is_vmalloc_addr(ptr))) goto list_add;
objs = get_object_radix(p, seq); if (unlikely(!objs)) goto list_add;
if (unlikely(!objs->cursor)) { /* * New radix tree nodes must be added under @p->lock because the * tree root is in a darray that can be resized (typically, * genradix supports concurrent unlocked allocation of new * nodes) - hence preallocation and the retry loop:
*/
objs->cursor = genradix_ptr_alloc_preallocated_inlined(&objs->objs,
objs->nr, &new_node, GFP_ATOMIC|__GFP_NOWARN); if (unlikely(!objs->cursor)) { if (may_sleep) {
spin_unlock_irqrestore(&p->lock, flags);
gfp_t gfp = GFP_KERNEL; if (!head)
gfp |= __GFP_NOFAIL;
*objs->cursor++ = ptr ?: head; /* zero cursor if we hit the end of a radix tree node: */ if (!(((ulong) objs->cursor) & (GENRADIX_NODE_SIZE - 1)))
objs->cursor = NULL;
start_gp = !objs->nr;
objs->nr++;
start_gp: if (unlikely(start_gp)) { /* * We only have one callback (ideally, we would have one for * every outstanding graceperiod) - so if our callback is * already in flight, we may still have to start a grace period * (since we used get_state() above, not start_poll())
*/ if (!p->cb_armed) {
p->cb_armed = true;
__call_rcu(pending->srcu, &p->cb, rcu_pending_rcu_cb);
} else {
__start_poll_synchronize_rcu(pending->srcu);
}
}
spin_unlock_irqrestore(&p->lock, flags);
free_node: if (new_node)
genradix_free_node(new_node); return;
check_expired: if (unlikely(__poll_state_synchronize_rcu(pending->srcu, seq))) { switch ((ulong) pending->process) { case RCU_PENDING_KVFREE:
kvfree(ptr); break; case RCU_PENDING_CALL_RCU:
head->func(head); break; default:
pending->process(pending, head); break;
} goto free_node;
}
p = raw_cpu_ptr(pending->p);
spin_lock_irqsave(&p->lock, flags); goto restart;
}
/** * rcu_pending_init: - initialize a rcu_pending * * @pending: Object to init * @srcu: May optionally be used with an srcu_struct; if NULL, uses normal * RCU flavor * @process: Callback function invoked on objects once their RCU barriers * have completed; if NULL, kvfree() is used.
*/ int rcu_pending_init(struct rcu_pending *pending, struct srcu_struct *srcu,
rcu_pending_process_fn process)
{
pending->p = alloc_percpu(struct rcu_pending_pcpu); if (!pending->p) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.