/** * is_aligned - is this pointer & size okay for word-wide copying? * @base: pointer to data * @size: size of each element * @align: required alignment (typically 4 or 8) * * Returns true if elements can be copied using word loads and stores. * The size must be a multiple of the alignment, and the base address must * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. * * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" * to "if ((a | b) & mask)", so we do that by hand.
*/
__attribute_const__ __always_inline staticbool is_aligned(constvoid *base, size_t size, unsignedchar align)
{ unsignedchar lsbits = (unsignedchar)size;
/** * swap_words_32 - swap two elements in 32-bit chunks * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size (must be a multiple of 4) * * Exchange the two objects in memory. This exploits base+index addressing, * which basically all CPUs have, to minimize loop overhead computations. * * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the * bottom of the loop, even though the zero flag is still valid from the * subtract (since the intervening mov instructions don't alter the flags). * Gcc 8.1.0 doesn't have that problem.
*/ staticvoid swap_words_32(void *a, void *b, size_t n)
{ do {
u32 t = *(u32 *)(a + (n -= 4));
*(u32 *)(a + n) = *(u32 *)(b + n);
*(u32 *)(b + n) = t;
} while (n);
}
/** * swap_words_64 - swap two elements in 64-bit chunks * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size (must be a multiple of 8) * * Exchange the two objects in memory. This exploits base+index * addressing, which basically all CPUs have, to minimize loop overhead * computations. * * We'd like to use 64-bit loads if possible. If they're not, emulating * one requires base+index+4 addressing which x86 has but most other * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads, * but it's possible to have 64-bit loads without 64-bit pointers (e.g. * x32 ABI). Are there any cases the kernel needs to worry about?
*/ staticvoid swap_words_64(void *a, void *b, size_t n)
{ do { #ifdef CONFIG_64BIT
u64 t = *(u64 *)(a + (n -= 8));
*(u64 *)(a + n) = *(u64 *)(b + n);
*(u64 *)(b + n) = t; #else /* Use two 32-bit transfers to avoid base+index+4 addressing */
u32 t = *(u32 *)(a + (n -= 4));
*(u32 *)(a + n) = *(u32 *)(b + n);
*(u32 *)(b + n) = t;
/** * swap_bytes - swap two elements a byte at a time * @a: pointer to the first element to swap * @b: pointer to the second element to swap * @n: element size * * This is the fallback if alignment doesn't allow using larger chunks.
*/ staticvoid swap_bytes(void *a, void *b, size_t n)
{ do { char t = ((char *)a)[--n];
((char *)a)[n] = ((char *)b)[n];
((char *)b)[n] = t;
} while (n);
}
/* * The values are arbitrary as long as they can't be confused with * a pointer, but small integers make for the smallest compare * instructions.
*/ #define SWAP_WORDS_64 (swap_r_func_t)0 #define SWAP_WORDS_32 (swap_r_func_t)1 #define SWAP_BYTES (swap_r_func_t)2 #define SWAP_WRAPPER (swap_r_func_t)3
/* * The function pointer is last to make tail calls most efficient if the * compiler decides not to inline this function.
*/ staticvoid do_swap(void *a, void *b, size_t size, swap_r_func_t swap_func, constvoid *priv)
{ if (swap_func == SWAP_WRAPPER) {
((conststruct wrapper *)priv)->swap_func(a, b, (int)size); return;
}
staticvoid eytzinger1_sort_r(void *base1, size_t n, size_t size,
cmp_r_func_t cmp_func,
swap_r_func_t swap_func, constvoid *priv)
{ unsigned i, j, k;
/* called from 'sort' without swap function, let's pick the default */ if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
swap_func = NULL;
/* heapify */ for (i = n / 2; i >= 1; --i) { /* Find the sift-down path all the way to the leaves. */ for (j = i; k = j * 2, k < n;)
j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
/* Special case for the last leaf with no sibling. */ if (j * 2 == n)
j *= 2;
/* Backtrack to the correct location. */ while (j != i && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, i, j) >= 0)
j /= 2;
/* Shift the element into its correct place. */ for (k = j; j != i;) {
j /= 2;
eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k);
}
}
/* sort */ for (i = n; i > 1; --i) {
eytzinger1_do_swap(base1, n, size, swap_func, priv, 1, i);
/* Find the sift-down path all the way to the leaves. */ for (j = 1; k = j * 2, k + 1 < i;)
j = eytzinger1_do_cmp(base1, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
/* Special case for the last leaf with no sibling. */ if (j * 2 + 1 == i)
j *= 2;
/* Backtrack to the correct location. */ while (j >= 1 && eytzinger1_do_cmp(base1, n, size, cmp_func, priv, 1, j) >= 0)
j /= 2;
/* Shift the element into its correct place. */ for (k = j; j > 1;) {
j /= 2;
eytzinger1_do_swap(base1, n, size, swap_func, priv, j, k);
}
}
}
void eytzinger0_sort_r(void *base, size_t n, size_t size,
cmp_r_func_t cmp_func,
swap_r_func_t swap_func, constvoid *priv)
{ void *base1 = base - size;
return eytzinger1_sort_r(base1, n, size, cmp_func, swap_func, priv);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.