int lockdep_rht_bucket_is_held(conststruct bucket_table *tbl, u32 hash)
{ if (!debug_locks) return 1; if (unlikely(tbl->nest)) return 1; return bit_spin_is_locked(0, (unsignedlong *)&tbl->buckets[hash]);
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) #endif
staticinlineunion nested_table *nested_table_top( conststruct bucket_table *tbl)
{ /* The top-level bucket entry does not need RCU protection * because it's set at the same time as tbl->nest.
*/ return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
}
if (!bkt) return 0;
flags = rht_lock(old_tbl, bkt);
while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
;
if (err == -ENOENT)
err = 0;
rht_unlock(old_tbl, bkt, flags);
return err;
}
staticint rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl)
{ /* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. * As cmpxchg() provides strong barriers, we do not need * rcu_assign_pointer().
*/
if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
new_tbl) != NULL) return -EEXIST;
/* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. * We do this inside the locked region so that * rhashtable_walk_stop() can use rcu_head_after_call_rcu() * to check if it should not re-link the table.
*/
call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
spin_unlock(&ht->lock);
err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); if (err)
bucket_table_free(new_tbl);
return err;
}
/** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink * * This function shrinks the hash table to fit, i.e., the smallest * size would not cause it to expand right away automatically. * * The caller must ensure that no concurrent resizing occurs by holding * ht->mutex. * * The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. * * It is valid to have concurrent insertions and deletions protected by per * bucket locks or concurrent RCU protected lookups and traversals.
*/ staticint rhashtable_shrink(struct rhashtable *ht)
{ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); unsignedint nelems = atomic_read(&ht->nelems); unsignedint size = 0;
if (nelems)
size = roundup_pow_of_two(nelems * 3 / 2); if (size < ht->p.min_size)
size = ht->p.min_size;
if (old_tbl->size <= size) return 0;
if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST;
RCU_INIT_POINTER(list->next, plist);
head = rht_dereference_bucket(head->next, tbl, hash);
RCU_INIT_POINTER(list->rhead.next, head); if (pprev)
rcu_assign_pointer(*pprev, obj); else /* Need to preserve the bit lock */
rht_assign_locked(bkt, obj);
/** * rhashtable_walk_enter - Initialise an iterator * @ht: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * * This function may be called from any process context, including * non-preemptible context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns.
*/ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
{
iter->ht = ht;
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
iter->end_of_table = 0;
/** * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * * This function frees resources allocated by rhashtable_walk_enter.
*/ void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
spin_lock(&iter->ht->lock); if (iter->walker.tbl)
list_del(&iter->walker.list);
spin_unlock(&iter->ht->lock);
}
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
/** * rhashtable_walk_start_check - Start a hash table walk * @iter: Hash table iterator * * Start a hash table walk at the current iterator position. Note that we take * the RCU lock in all cases including when we return an error. So you must * always call rhashtable_walk_stop to clean up. * * Returns zero if successful. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may use it immediately * by calling rhashtable_walk_next. * * rhashtable_walk_start is defined as an inline variant that returns * void. This is preferred in cases where the caller would ignore * resize events and always continue.
*/ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
__acquires(RCU)
{ struct rhashtable *ht = iter->ht; bool rhlist = ht->rhlist;
rcu_read_lock();
spin_lock(&ht->lock); if (iter->walker.tbl)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
if (iter->end_of_table) return 0; if (!iter->walker.tbl) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
iter->slot = 0;
iter->skip = 0; return -EAGAIN;
}
if (iter->p && !rhlist) { /* * We need to validate that 'p' is still in the table, and * if so, update 'skip'
*/ struct rhash_head *p; int skip = 0;
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
skip++; if (p == iter->p) {
iter->skip = skip; goto found;
}
}
iter->p = NULL;
} elseif (iter->p && rhlist) { /* Need to validate that 'list' is still in the table, and * if so, update 'skip' and 'p'.
*/ struct rhash_head *p; struct rhlist_head *list; int skip = 0;
rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { for (list = container_of(p, struct rhlist_head, rhead);
list;
list = rcu_dereference(list->next)) {
skip++; if (list == iter->list) {
iter->p = p;
iter->skip = skip; goto found;
}
}
}
iter->p = NULL;
}
found: return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
/** * __rhashtable_walk_find_next - Find the next element in a table (or the first * one in case of a new walk). * * @iter: Hash table iterator * * Returns the found object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred.
*/ staticvoid *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
{ struct bucket_table *tbl = iter->walker.tbl; struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist;
if (!tbl) return NULL;
for (; iter->slot < tbl->size; iter->slot++) { int skip = iter->skip;
rht_for_each_rcu(p, tbl, iter->slot) { if (rhlist) {
list = container_of(p, struct rhlist_head,
rhead); do { if (!skip) goto next;
skip--;
list = rcu_dereference(list->next);
} while (list);
/** * rhashtable_walk_next - Return the next object and advance the iterator * @iter: Hash table iterator * * Note that you must call rhashtable_walk_stop when you are finished * with the walk. * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it.
*/ void *rhashtable_walk_next(struct rhashtable_iter *iter)
{ struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist;
if (p) { if (!rhlist || !(list = rcu_dereference(list->next))) {
p = rcu_dereference(p->next);
list = container_of(p, struct rhlist_head, rhead);
} if (!rht_is_a_nulls(p)) {
iter->skip++;
iter->p = p;
iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p);
}
/* At the end of this slot, switch to next one and then find * next entry from that point.
*/
iter->skip = 0;
iter->slot++;
}
/** * rhashtable_walk_peek - Return the next object but don't advance the iterator * @iter: Hash table iterator * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it.
*/ void *rhashtable_walk_peek(struct rhashtable_iter *iter)
{ struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p;
if (p) return rht_obj(ht, ht->rhlist ? &list->rhead : p);
/* No object found in current iter, find next one in the table. */
if (iter->skip) { /* A nonzero skip value points to the next entry in the table * beyond that last one that was found. Decrement skip so * we find the current value. __rhashtable_walk_find_next * will restore the original value of skip assuming that * the table hasn't changed.
*/
iter->skip--;
}
/** * rhashtable_walk_stop - Finish a hash table walk * @iter: Hash table iterator * * Finish a hash table walk. Does not reset the iterator to the start of the * hash table.
*/ void rhashtable_walk_stop(struct rhashtable_iter *iter)
__releases(RCU)
{ struct rhashtable *ht; struct bucket_table *tbl = iter->walker.tbl;
if (!tbl) goto out;
ht = iter->ht;
spin_lock(&ht->lock); if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) /* This bucket table is being freed, don't re-link it. */
iter->walker.tbl = NULL; else
list_add(&iter->walker.list, &tbl->walkers);
spin_unlock(&ht->lock);
/* * This is api initialization and thus we need to guarantee the * initial rhashtable allocation. Upon failure, retry with the * smallest possible size with __GFP_NOFAIL semantics.
*/
tbl = bucket_table_alloc(ht, size, GFP_KERNEL); if (unlikely(tbl == NULL)) {
size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
}
/** * rhltable_init - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * * See documentation for rhashtable_init.
*/ int rhltable_init_noprof(struct rhltable *hlt, conststruct rhashtable_params *params)
{ int err;
if (!ht->rhlist) {
free_fn(rht_obj(ht, obj), arg); return;
}
list = container_of(obj, struct rhlist_head, rhead); do {
obj = &list->rhead;
list = rht_dereference(list->next, ht);
free_fn(rht_obj(ht, obj), arg);
} while (list);
}
/** * rhashtable_free_and_destroy - free elements and destroy hash table * @ht: the hash table to destroy * @free_fn: callback to release resources of element * @arg: pointer passed to free_fn * * Stops an eventual async resize. If defined, invokes free_fn for each * element to releasal resources. Please note that RCU protected * readers may still be accessing the elements. Releasing of resources * must occur in a compatible manner. Then frees the bucket array. * * This function will eventually sleep to wait for an async resize * to complete. The caller is responsible that no further write operations * occurs in parallel.
*/ void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg)
{ struct bucket_table *tbl, *next_tbl; unsignedint i;
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
restart: if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.