This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
*/
#include <linux/module.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/string.h> /* for memset */ #include <linux/seq_file.h> /* for seq_printf */ #include <linux/lru_cache.h>
MODULE_AUTHOR("Philipp Reisner , " "Lars Ellenberg ");
MODULE_DESCRIPTION("lru_cache - Track sets of hot objects");
MODULE_LICENSE("GPL");
/* this is developers aid only.
* it catches concurrent access (lack of locking on the users part) */ #define PARANOIA_ENTRY() do { \
BUG_ON(!lc); \
BUG_ON(!lc->nr_elements); \
BUG_ON(test_and_set_bit(__LC_PARANOIA, &lc->flags)); \
} while (0)
#defineRETURN(x...) do { \
clear_bit_unlock(__LC_PARANOIA, &lc->flags); \ return x ; } while (0)
/* BUG() if e is not one of the elements tracked by lc */ #define PARANOIA_LC_ELEMENT(lc, e) do { \ struct lru_cache *lc_ = (lc); \ struct lc_element *e_ = (e); \ unsigned i = e_->lc_index; \
BUG_ON(i >= lc_->nr_elements); \
BUG_ON(lc_->lc_element[i] != e_); } while (0)
/* We need to atomically * - try to grab the lock (set LC_LOCKED) * - only if there is no pending transaction * (neither LC_DIRTY nor LC_STARVING is set) * Because of PARANOIA_ENTRY() above abusing lc->flags as well, * it is not sufficient to just say * return 0 == cmpxchg(&lc->flags, 0, LC_LOCKED);
*/ int lc_try_lock(struct lru_cache *lc)
{ unsignedlong val; do {
val = cmpxchg(&lc->flags, 0, LC_LOCKED);
} while (unlikely (val == LC_PARANOIA)); /* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */ return 0 == val;
}
/** * lc_create - prepares to track objects in an active set * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details * @cache: cache root pointer * @max_pending_changes: maximum changes to accumulate until a transaction is required * @e_count: number of elements allowed to be active simultaneously * @e_size: size of the tracked objects * @e_off: offset to the &struct lc_element member in a tracked object * * Returns a pointer to a newly initialized struct lru_cache on success, * or NULL on (allocation) failure.
*/ struct lru_cache *lc_create(constchar *name, struct kmem_cache *cache, unsigned max_pending_changes, unsigned e_count, size_t e_size, size_t e_off)
{ struct hlist_head *slot = NULL; struct lc_element **element = NULL; struct lru_cache *lc; struct lc_element *e; unsigned cache_obj_size = kmem_cache_size(cache); unsigned i;
WARN_ON(cache_obj_size < e_size); if (cache_obj_size < e_size) return NULL;
/* e_count too big; would probably fail the allocation below anyways.
* for typical use cases, e_count should be few thousand at most. */ if (e_count > LC_MAX_ACTIVE) return NULL;
slot = kcalloc(e_count, sizeof(struct hlist_head), GFP_KERNEL); if (!slot) goto out_fail;
element = kcalloc(e_count, sizeof(struct lc_element *), GFP_KERNEL); if (!element) goto out_fail;
lc = kzalloc(sizeof(*lc), GFP_KERNEL); if (!lc) goto out_fail;
/* preallocate all objects */ for (i = 0; i < e_count; i++) { void *p = kmem_cache_alloc(cache, GFP_KERNEL); if (!p) break;
memset(p, 0, lc->element_size);
e = p + e_off;
e->lc_index = i;
e->lc_number = LC_FREE;
e->lc_new_number = LC_FREE;
list_add(&e->list, &lc->free);
element[i] = e;
} if (i == e_count) return lc;
/* else: could not allocate all elements, give up */ while (i) { void *p = element[--i];
kmem_cache_free(cache, p - e_off);
}
kfree(lc);
out_fail:
kfree(element);
kfree(slot); return NULL;
}
/** * lc_destroy - frees memory allocated by lc_create() * @lc: the lru cache to destroy
*/ void lc_destroy(struct lru_cache *lc)
{ unsigned i; if (!lc) return; for (i = 0; i < lc->nr_elements; i++)
lc_free_by_index(lc, i);
kfree(lc->lc_element);
kfree(lc->lc_slot);
kfree(lc);
}
/** * lc_reset - does a full reset for @lc and the hash table slots. * @lc: the lru cache to operate on * * It is roughly the equivalent of re-allocating a fresh lru_cache object, * basically a short cut to lc_destroy(lc); lc = lc_create(...);
*/ void lc_reset(struct lru_cache *lc)
{ unsigned i;
for (i = 0; i < lc->nr_elements; i++) { struct lc_element *e = lc->lc_element[i]; void *p = e;
p -= lc->element_off;
memset(p, 0, lc->element_size); /* re-init it */
e->lc_index = i;
e->lc_number = LC_FREE;
e->lc_new_number = LC_FREE;
list_add(&e->list, &lc->free);
}
}
/** * lc_seq_printf_stats - print stats about @lc into @seq * @seq: the seq_file to print into * @lc: the lru cache to print statistics of
*/ void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc)
{ /* NOTE: * total calls to lc_get are * (starving + hits + misses) * misses include "locked" count (update from an other thread in * progress) and "changed", when this in fact lead to an successful * update of the cache.
*/
seq_printf(seq, "\t%s: used:%u/%u hits:%lu misses:%lu starving:%lu locked:%lu changed:%lu\n",
lc->name, lc->used, lc->nr_elements,
lc->hits, lc->misses, lc->starving, lc->locked, lc->changed);
}
BUG_ON(!lc);
BUG_ON(!lc->nr_elements);
hlist_for_each_entry(e, lc_hash_slot(lc, enr), collision) { /* "about to be changed" elements, pending transaction commit, * are hashed by their "new number". "Normal" elements have
* lc_number == lc_new_number. */ if (e->lc_new_number != enr) continue; if (e->lc_new_number == e->lc_number || include_changing) return e; break;
} return NULL;
}
/** * lc_find - find element by label, if present in the hash table * @lc: The lru_cache object * @enr: element number * * Returns the pointer to an element, if the element with the requested * "label" or element number is present in the hash table, * or NULL if not found. Does not change the refcnt. * Ignores elements that are "about to be used", i.e. not yet in the active * set, but still pending transaction commit.
*/ struct lc_element *lc_find(struct lru_cache *lc, unsignedint enr)
{ return __lc_find(lc, enr, 0);
}
/** * lc_is_used - find element by label * @lc: The lru_cache object * @enr: element number * * Returns true, if the element with the requested "label" or element number is * present in the hash table, and is used (refcnt > 0). * Also finds elements that are not _currently_ used but only "about to be * used", i.e. on the "to_be_changed" list, pending transaction commit.
*/ bool lc_is_used(struct lru_cache *lc, unsignedint enr)
{ struct lc_element *e = __lc_find(lc, enr, 1); return e && e->refcnt;
}
/** * lc_del - removes an element from the cache * @lc: The lru_cache object * @e: The element to remove * * @e must be unused (refcnt == 0). Moves @e from "lru" to "free" list, * sets @e->enr to %LC_FREE.
*/ void lc_del(struct lru_cache *lc, struct lc_element *e)
{
PARANOIA_ENTRY();
PARANOIA_LC_ELEMENT(lc, e);
BUG_ON(e->refcnt);
staticint lc_unused_element_available(struct lru_cache *lc)
{ if (!list_empty(&lc->free)) return 1; /* something on the free list */ if (!list_empty(&lc->lru)) return 1; /* something to evict */
return 0;
}
/* used as internal flags to __lc_get */ enum {
LC_GET_MAY_CHANGE = 1,
LC_GET_MAY_USE_UNCOMMITTED = 2,
};
PARANOIA_ENTRY(); if (test_bit(__LC_STARVING, &lc->flags)) {
++lc->starving; RETURN(NULL);
}
e = __lc_find(lc, enr, 1); /* if lc_new_number != lc_number, * this enr is currently being pulled in already, * and will be available once the pending transaction
* has been committed. */ if (e) { if (e->lc_new_number != e->lc_number) { /* It has been found above, but on the "to_be_changed" * list, not yet committed. Don't pull it in twice, * wait for the transaction, then try again...
*/ if (!(flags & LC_GET_MAY_USE_UNCOMMITTED)) RETURN(NULL); /* ... unless the caller is aware of the implications,
* probably preparing a cumulative transaction. */
++e->refcnt;
++lc->hits; RETURN(e);
} /* else: lc_new_number == lc_number; a real hit. */
++lc->hits; if (e->refcnt++ == 0)
lc->used++;
list_move(&e->list, &lc->in_use); /* Not evictable... */ RETURN(e);
} /* e == NULL */
++lc->misses; if (!(flags & LC_GET_MAY_CHANGE)) RETURN(NULL);
/* To avoid races with lc_try_lock(), first, mark us dirty
* (using test_and_set_bit, as it implies memory barriers), ... */
test_and_set_bit(__LC_DIRTY, &lc->flags);
/* ... only then check if it is locked anyways. If lc_unlock clears * the dirty bit again, that's not a problem, we will come here again.
*/ if (test_bit(__LC_LOCKED, &lc->flags)) {
++lc->locked; RETURN(NULL);
}
/* In case there is nothing available and we can not kick out * the LRU element, we have to wait ...
*/ if (!lc_unused_element_available(lc)) {
set_bit(__LC_STARVING, &lc->flags); RETURN(NULL);
}
/* It was not present in the active set. We are going to recycle an * unused (or even "free") element, but we won't accumulate more than
* max_pending_changes changes. */ if (lc->pending_changes >= lc->max_pending_changes) RETURN(NULL);
/** * lc_get - get element by label, maybe change the active set * @lc: the lru cache to operate on * @enr: the label to look up * * Finds an element in the cache, increases its usage count, * "touches" and returns it. * * In case the requested number is not present, it needs to be added to the * cache. Therefore it is possible that an other element becomes evicted from * the cache. In either case, the user is notified so he is able to e.g. keep * a persistent log of the cache changes, and therefore the objects in use. * * Return values: * NULL * The cache was marked %LC_STARVING, * or the requested label was not in the active set * and a changing transaction is still pending (@lc was marked %LC_DIRTY). * Or no unused or free element could be recycled (@lc will be marked as * %LC_STARVING, blocking further lc_get() operations). * * pointer to the element with the REQUESTED element number. * In this case, it can be used right away * * pointer to an UNUSED element with some different element number, * where that different number may also be %LC_FREE. * * In this case, the cache is marked %LC_DIRTY, * so lc_try_lock() will no longer succeed. * The returned element pointer is moved to the "to_be_changed" list, * and registered with the new element number on the hash collision chains, * so it is possible to pick it up from lc_is_used(). * Up to "max_pending_changes" (see lc_create()) can be accumulated. * The user now should do whatever housekeeping is necessary, * typically serialize on lc_try_lock_for_transaction(), then call * lc_committed(lc) and lc_unlock(), to finish the change. * * NOTE: The user needs to check the lc_number on EACH use, so he recognizes * any cache set change.
*/ struct lc_element *lc_get(struct lru_cache *lc, unsignedint enr)
{ return __lc_get(lc, enr, LC_GET_MAY_CHANGE);
}
/** * lc_get_cumulative - like lc_get; also finds to-be-changed elements * @lc: the lru cache to operate on * @enr: the label to look up * * Unlike lc_get this also returns the element for @enr, if it is belonging to * a pending transaction, so the return values are like for lc_get(), * plus: * * pointer to an element already on the "to_be_changed" list. * In this case, the cache was already marked %LC_DIRTY. * * Caller needs to make sure that the pending transaction is completed, * before proceeding to actually use this element.
*/ struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsignedint enr)
{ return __lc_get(lc, enr, LC_GET_MAY_CHANGE|LC_GET_MAY_USE_UNCOMMITTED);
}
/** * lc_try_get - get element by label, if present; do not change the active set * @lc: the lru cache to operate on * @enr: the label to look up * * Finds an element in the cache, increases its usage count, * "touches" and returns it. * * Return values: * NULL * The cache was marked %LC_STARVING, * or the requested label was not in the active set * * pointer to the element with the REQUESTED element number. * In this case, it can be used right away
*/ struct lc_element *lc_try_get(struct lru_cache *lc, unsignedint enr)
{ return __lc_get(lc, enr, 0);
}
/** * lc_committed - tell @lc that pending changes have been recorded * @lc: the lru cache to operate on * * User is expected to serialize on explicit lc_try_lock_for_transaction() * before the transaction is started, and later needs to lc_unlock() explicitly * as well.
*/ void lc_committed(struct lru_cache *lc)
{ struct lc_element *e, *tmp;
PARANOIA_ENTRY();
list_for_each_entry_safe(e, tmp, &lc->to_be_changed, list) { /* count number of changes, not number of transactions */
++lc->changed;
e->lc_number = e->lc_new_number;
list_move(&e->list, &lc->in_use);
}
lc->pending_changes = 0; RETURN();
}
/** * lc_put - give up refcnt of @e * @lc: the lru cache to operate on * @e: the element to put * * If refcnt reaches zero, the element is moved to the lru list, * and a %LC_STARVING (if set) is cleared. * Returns the new (post-decrement) refcnt.
*/ unsignedint lc_put(struct lru_cache *lc, struct lc_element *e)
{
PARANOIA_ENTRY();
PARANOIA_LC_ELEMENT(lc, e);
BUG_ON(e->refcnt == 0);
BUG_ON(e->lc_number != e->lc_new_number); if (--e->refcnt == 0) { /* move it to the front of LRU. */
list_move(&e->list, &lc->lru);
lc->used--;
clear_bit_unlock(__LC_STARVING, &lc->flags);
} RETURN(e->refcnt);
}
/** * lc_element_by_index * @lc: the lru cache to operate on * @i: the index of the element to return
*/ struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
{
BUG_ON(i >= lc->nr_elements);
BUG_ON(lc->lc_element[i] == NULL);
BUG_ON(lc->lc_element[i]->lc_index != i); return lc->lc_element[i];
}
/** * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form. * @lc: the lru cache to operate on * @seq: the &struct seq_file pointer to seq_printf into * @utext: user supplied additional "heading" or other info * @detail: function pointer the user may provide to dump further details * of the object the lc_element is embedded in. May be NULL. * Note: a leading space ' ' and trailing newline '\n' is implied.
*/ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, void (*detail) (struct seq_file *, struct lc_element *))
{ unsignedint nr_elements = lc->nr_elements; struct lc_element *e; int i;
seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext); for (i = 0; i < nr_elements; i++) {
e = lc_element_by_index(lc, i); if (e->lc_number != e->lc_new_number)
seq_printf(seq, "\t%5d: %6d %8d %6d ",
i, e->lc_number, e->lc_new_number, e->refcnt); else
seq_printf(seq, "\t%5d: %6d %-8s %6d ",
i, e->lc_number, "-\"-", e->refcnt); if (detail)
detail(seq, e);
seq_putc(seq, '\n');
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.