/* * We over-read the buffer and this makes KASAN unhappy. Instead, disable * instrumentation and call kasan explicitly.
*/ unsignedint __no_sanitize_address do_csum(constunsignedchar *buff, int len)
{ unsignedint offset, shift, sum; const u64 *ptr;
u64 data, sum64 = 0;
if (unlikely(len <= 0)) return 0;
offset = (unsignedlong)buff & 7; /* * This is to all intents and purposes safe, since rounding down cannot * result in a different page or cache line being accessed, and @buff * should absolutely not be pointing to anything read-sensitive. We do, * however, have to be careful not to piss off KASAN, which means using * unchecked reads to accommodate the head and tail, for which we'll * compensate with an explicit check up-front.
*/
kasan_check_read(buff, len);
ptr = (u64 *)(buff - offset);
len = len + offset - 8;
/* * Head: zero out any excess leading bytes. Shifting back by the same * amount should be at least as fast as any other way of handling the * odd/even alignment, and means we can ignore it until the very end.
*/
shift = offset * 8;
data = *ptr++; #ifdef __LITTLE_ENDIAN
data = (data >> shift) << shift; #else
data = (data << shift) >> shift; #endif
/* * Body: straightforward aligned loads from here on (the paired loads * underlying the quadword type still only need dword alignment). The * main loop strictly excludes the tail, so the second loop will always * run at least once.
*/ while (unlikely(len > 64)) {
__uint128_t tmp1, tmp2, tmp3, tmp4;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.