/** * SMP_CACHE_ALIGN - align a value to the L2 cacheline size * @x: value to align * * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes, * this needs to be accounted. * * Return: aligned value.
*/ #ifndef SMP_CACHE_ALIGN #define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES) #endif
/* * ``__aligned_largest`` aligns a field to the value most optimal for the * target architecture to perform memory operations. Get the actual value * to be able to use it anywhere else.
*/ #ifndef __LARGEST_ALIGN #define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest) #endif
/* * __read_mostly is used to keep rarely changing variables out of frequently * updated cachelines. Its use should be reserved for data that is used * frequently in hot paths. Performance traces can help decide when to use * this. You want __read_mostly data to be tightly packed, so that in the * best case multiple frequently read variables for a hot path will be next * to each other in order to reduce the number of cachelines needed to * execute a critical path. We should be mindful and selective of its use. * ie: if you're going to use it please supply a *good* justification in your * commit log
*/ #ifndef __read_mostly #define __read_mostly #endif
/* * __ro_after_init is used to mark things that are read-only after init (i.e. * after mark_rodata_ro() has been called). These are effectively read-only, * but may get written to during init, so can't live in .rodata (via "const").
*/ #ifndef __ro_after_init #define __ro_after_init __section(".data..ro_after_init") #endif
/* * The maximum alignment needed for some critical structures * These could be inter-node cacheline sizes/L3 cacheline * size etc. Define this in asm/cache.h for your arch
*/ #ifndef INTERNODE_CACHE_SHIFT #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT #endif
/** * __cacheline_group_begin_aligned - declare an aligned group start * @GROUP: name of the group * @...: optional group alignment * * The following block inside a struct: * * __cacheline_group_begin_aligned(grp); * field a; * field b; * __cacheline_group_end_aligned(grp); * * will always be aligned to either the specified alignment or * ``SMP_CACHE_BYTES``.
*/ #define __cacheline_group_begin_aligned(GROUP, ...) \
__cacheline_group_begin(GROUP) \
__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
/** * __cacheline_group_end_aligned - declare an aligned group end * @GROUP: name of the group * @...: optional alignment (same as was in __cacheline_group_begin_aligned()) * * Note that the end marker is aligned to sizeof(long) to allow more precise * size assertion. It also declares a padding at the end to avoid next field * falling into this cacheline.
*/ #define __cacheline_group_end_aligned(GROUP, ...) \
__cacheline_group_end(GROUP) __aligned(sizeof(long)); \ struct { } __cacheline_group_pad__##GROUP \
__aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES)
/* * Helper to add padding within a struct to ensure data fall into separate * cachelines.
*/ #ifdefined(CONFIG_SMP) struct cacheline_padding { char x[0];
} ____cacheline_internodealigned_in_smp; #define CACHELINE_PADDING(name) struct cacheline_padding name #else #define CACHELINE_PADDING(name) #endif