/* Helpers to get the local list index */ #define LOCAL_LIST_IDX(t) ((t) - BPF_LOCAL_LIST_T_OFFSET) #define LOCAL_FREE_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE) #define LOCAL_PENDING_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING) #define IS_LOCAL_LIST_TYPE(t) ((t) >= BPF_LOCAL_LIST_T_OFFSET)
staticint get_next_cpu(int cpu)
{
cpu = cpumask_next(cpu, cpu_possible_mask); if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_possible_mask); return cpu;
}
/* Local list helpers */ staticstruct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
{ return &loc_l->lists[LOCAL_FREE_LIST_IDX];
}
/* If the removing node is the next_inactive_rotation candidate, * move the next_inactive_rotation pointer also.
*/ if (&node->list == l->next_inactive_rotation)
l->next_inactive_rotation = l->next_inactive_rotation->prev;
/* Move nodes from local list to the LRU list */ staticvoid __bpf_lru_node_move_in(struct bpf_lru_list *l, struct bpf_lru_node *node, enum bpf_lru_list_type tgt_type)
{ if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type))) return;
/* Move nodes between or within active and inactive list (like * active to inactive, inactive to active or tail of active back to * the head of active).
*/ staticvoid __bpf_lru_node_move(struct bpf_lru_list *l, struct bpf_lru_node *node, enum bpf_lru_list_type tgt_type)
{ if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type))) return;
/* If the moving node is the next_inactive_rotation candidate, * move the next_inactive_rotation pointer also.
*/ if (&node->list == l->next_inactive_rotation)
l->next_inactive_rotation = l->next_inactive_rotation->prev;
/* Rotate the active list: * 1. Start from tail * 2. If the node has the ref bit set, it will be rotated * back to the head of active list with the ref bit cleared. * Give this node one more chance to survive in the active list. * 3. If the ref bit is not set, move it to the head of the * inactive list. * 4. It will at most scan nr_scans nodes
*/ staticvoid __bpf_lru_list_rotate_active(struct bpf_lru *lru, struct bpf_lru_list *l)
{ struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; struct bpf_lru_node *node, *tmp_node, *first_node; unsignedint i = 0;
/* Rotate the inactive list. It starts from the next_inactive_rotation * 1. If the node has ref bit set, it will be moved to the head * of active list with the ref bit cleared. * 2. If the node does not have ref bit set, it will leave it * at its current location (i.e. do nothing) so that it can * be considered during the next inactive_shrink. * 3. It will at most scan nr_scans nodes
*/ staticvoid __bpf_lru_list_rotate_inactive(struct bpf_lru *lru, struct bpf_lru_list *l)
{ struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; struct list_head *cur, *last, *next = inactive; struct bpf_lru_node *node; unsignedint i = 0;
if (list_empty(inactive)) return;
last = l->next_inactive_rotation->next; if (last == inactive)
last = last->next;
cur = l->next_inactive_rotation; while (i < lru->nr_scans) { if (cur == inactive) {
cur = cur->prev; continue;
}
node = list_entry(cur, struct bpf_lru_node, list);
next = cur->prev; if (bpf_lru_node_is_ref(node))
__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); if (cur == last) break;
cur = next;
i++;
}
l->next_inactive_rotation = next;
}
/* Shrink the inactive list. It starts from the tail of the * inactive list and only move the nodes without the ref bit * set to the designated free list.
*/ staticunsignedint
__bpf_lru_list_shrink_inactive(struct bpf_lru *lru, struct bpf_lru_list *l, unsignedint tgt_nshrink, struct list_head *free_list, enum bpf_lru_list_type tgt_free_type)
{ struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; struct bpf_lru_node *node, *tmp_node; unsignedint nshrinked = 0; unsignedint i = 0;
/* 1. Rotate the active list (if needed) * 2. Always rotate the inactive list
*/ staticvoid __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
{ if (bpf_lru_list_inactive_low(l))
__bpf_lru_list_rotate_active(lru, l);
__bpf_lru_list_rotate_inactive(lru, l);
}
/* Calls __bpf_lru_list_shrink_inactive() to shrink some * ref-bit-cleared nodes and move them to the designated * free list. * * If it cannot get a free node after calling * __bpf_lru_list_shrink_inactive(). It will just remove * one node from either inactive or active list without * honoring the ref-bit. It prefers inactive list to active * list in this situation.
*/ staticunsignedint __bpf_lru_list_shrink(struct bpf_lru *lru, struct bpf_lru_list *l, unsignedint tgt_nshrink, struct list_head *free_list, enum bpf_lru_list_type tgt_free_type)
/* Do a force shrink by ignoring the reference bit */ if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE]; else
force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
/* Flush the nodes from the local pending list to the LRU list */ staticvoid __local_list_flush(struct bpf_lru_list *l, struct bpf_lru_locallist *loc_l)
{ struct bpf_lru_node *node, *tmp_node;
if (node)
__local_list_add_pending(lru, loc_l, cpu, node, hash);
raw_spin_unlock_irqrestore(&loc_l->lock, flags);
if (node) return node;
/* No free nodes found from the local free list and * the global LRU list. * * Steal from the local free/pending list of the * current CPU and remote CPU in RR. It starts * with the loc_l->next_steal CPU.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.