/* * Freelist pointer and counter to cmpxchg together, avoids the typical ABA * problems with cmpxchg of just a pointer.
*/ typedefunion { struct { void *freelist; unsignedlong counter;
};
freelist_full_t full;
} freelist_aba_t;
/* Reuses the bits in struct page */ struct slab { unsignedlong flags;
struct kmem_cache *slab_cache; union { struct { union { struct list_head slab_list; #ifdef CONFIG_SLUB_CPU_PARTIAL struct { struct slab *next; int slabs; /* Nr of slabs left */
}; #endif
}; /* Double-word boundary */ union { struct { void *freelist; /* first free object */ union { unsignedlong counters; struct { unsigned inuse:16; unsigned objects:15; /* * If slab debugging is enabled then the * frozen bit can be reused to indicate * that the slab was corrupted
*/ unsigned frozen:1;
};
};
}; #ifdef system_has_freelist_aba
freelist_aba_t freelist_counter; #endif
};
}; struct rcu_head rcu_head;
};
/** * folio_slab - Converts from folio to slab. * @folio: The folio. * * Currently struct slab is a different representation of a folio where * folio_test_slab() is true. * * Return: The slab which contains this folio.
*/ #define folio_slab(folio) (_Generic((folio), \ conststruct folio *: (conststruct slab *)(folio), \ struct folio *: (struct slab *)(folio)))
/** * slab_folio - The folio allocated for a slab * @s: The slab. * * Slabs are allocated as folios that contain the individual objects and are * using some fields in the first struct page of the folio - those fields are * now accessed by struct slab. It is occasionally necessary to convert back to * a folio in order to communicate with the rest of the mm. Please use this * helper function instead of casting yourself, as the implementation may change * in the future.
*/ #define slab_folio(s) (_Generic((s), \ conststruct slab *: (conststruct folio *)s, \ struct slab *: (struct folio *)s))
/** * page_slab - Converts from first struct page to slab. * @p: The first (either head of compound or single) page of slab. * * A temporary wrapper to convert struct page to struct slab in situations where * we know the page is the compound head, or single order-0 page. * * Long-term ideally everything would work with struct slab directly or go * through folio to struct slab. * * Return: The slab which contains this page
*/ #define page_slab(p) (_Generic((p), \ conststruct page *: (conststruct slab *)(p), \ struct page *: (struct slab *)(p)))
/** * slab_page - The first struct page allocated for a slab * @s: The slab. * * A convenience wrapper for converting slab to the first struct page of the * underlying folio, to communicate with code not yet converted to folio or * struct slab.
*/ #define slab_page(s) folio_page(slab_folio(s), 0)
/* * Word size structure that can be atomically updated or read and that * contains both the order and the number of objects that a slab of the * given order would contain.
*/ struct kmem_cache_order_objects { unsignedint x;
};
/* * Slab cache management.
*/ struct kmem_cache { #ifndef CONFIG_SLUB_TINY struct kmem_cache_cpu __percpu *cpu_slab; #endif /* Used for retrieving partial slabs, etc. */
slab_flags_t flags; unsignedlong min_partial; unsignedint size; /* Object size including metadata */ unsignedint object_size; /* Object size without metadata */ struct reciprocal_value reciprocal_size; unsignedint offset; /* Free pointer offset */ #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ unsignedint cpu_partial; /* Number of per cpu partial slabs to keep around */ unsignedint cpu_partial_slabs; #endif struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */ struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(void *object); /* Object constructor */ unsignedint inuse; /* Offset to metadata */ unsignedint align; /* Alignment */ unsignedint red_left_pad; /* Left redzone padding size */ constchar *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ #endif #ifdef CONFIG_SLAB_FREELIST_HARDENED unsignedlong random; #endif
#ifdef CONFIG_NUMA /* * Defragmentation by allocating from a remote node.
*/ unsignedint remote_node_defrag_ratio; #endif
result = fixup_red_left(cache, result); return result;
}
/* Determine object index from a given position */ staticinlineunsignedint __obj_to_index(conststruct kmem_cache *cache, void *addr, void *obj)
{ return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
/* * State of the slab allocator. * * This is used to describe the states of the allocator during bootup. * Allocators use this to gradually bootstrap themselves. Most allocators * have the problem that the structures used for managing slab caches are * allocated from slab caches themselves.
*/ enum slab_state {
DOWN, /* No slab functionality yet */
PARTIAL, /* SLUB: kmem_cache_node available */
UP, /* Slab caches usable but not all extras yet */
FULL /* Everything is working */
};
externenum slab_state slab_state;
/* The slab cache mutex protects the management structures during changes */ externstruct mutex slab_mutex;
/* The list of all slab caches on the system */ externstruct list_head slab_caches;
/* The slab cache that manages slab cache information */ externstruct kmem_cache *kmem_cache;
/* A table of kmalloc cache names and sizes */ externconststruct kmalloc_info_struct { constchar *name[NR_KMALLOC_TYPES]; unsignedint size;
} kmalloc_info[];
/* Kmalloc array related functions */ void setup_kmalloc_cache_index_table(void); void create_kmalloc_caches(void);
/* * Find the kmem_cache structure that serves a given size of * allocation * * This assumes size is larger than zero and not larger than * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
*/ staticinlinestruct kmem_cache *
kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsignedlong caller)
{ unsignedint index;
if (!b)
b = &kmalloc_caches[kmalloc_type(flags, caller)]; if (size <= 192)
index = kmalloc_size_index[size_index_elem(size)]; else
index = fls(size - 1);
return (*b)[index];
}
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */ int do_kmem_cache_create(struct kmem_cache *s, constchar *name, unsignedint size, struct kmem_cache_args *args,
slab_flags_t flags);
/* * Returns true if any of the specified slab_debug flags is enabled for the * cache. Use only for flags parsed by setup_slub_debug() as it also enables * the static key.
*/ staticinlinebool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
{ if (IS_ENABLED(CONFIG_SLUB_DEBUG))
VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); if (__slub_debug_enabled()) return s->flags & flags; returnfalse;
}
/* * slab_obj_exts - get the pointer to the slab object extension vector * associated with a slab. * @slab: a pointer to the slab struct * * Returns a pointer to the object extension vector associated with the slab, * or NULL if no such vector has been associated yet.
*/ staticinlinestruct slabobj_ext *slab_obj_exts(struct slab *slab)
{ unsignedlong obj_exts = READ_ONCE(slab->obj_exts);
#ifdef CONFIG_MEMCG /* * obj_exts should be either NULL, a valid pointer with * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL.
*/
VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) &&
obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); #endif return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
}
staticinline size_t slab_ksize(conststruct kmem_cache *s)
{ #ifdef CONFIG_SLUB_DEBUG /* * Debugging requires use of the padding between object * and whatever may come after it.
*/ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) return s->object_size; #endif if (s->flags & SLAB_KASAN) return s->object_size; /* * If we have the need to store the freelist pointer * back there or track user information then we can * only use the space before that information.
*/ if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) return s->inuse; /* * Else we can use all the padding etc for the allocation
*/ return s->size;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.