static __always_inline __init void mult_sh_align_mod(long *v1, long *v2, long *w, constint align, constint mod)
{ unsignedlong flags; int m1, m2; long p, s, lv1, lv2, lw;
/* * We want the multiply and the shift to be isolated from the * rest of the code to disable gcc optimizations. Hence the * asm statements that execute nothing, but make gcc not know * what the values of m1, m2 and s are and what lv2 and p are * used for.
*/
local_irq_save(flags); /* * The following code leads to a wrong result of the first * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId * 00000422 or 00000430, respectively). * * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for * details. I got no permission to duplicate them here, * sigh... --macro
*/ asmvolatile( ""
: "=r" (m1), "=r" (m2), "=r" (s)
: "0" (5), "1" (8), "2" (5));
align_mod(align, mod); /* * The trailing nop is needed to fulfill the two-instruction * requirement between reading hi/lo and staring a mult/div. * Leaving it out may cause gas insert a nop itself breaking * the desired alignment of the next chunk.
*/ asmvolatile( ".set push\n\t" ".set noat\n\t" ".set noreorder\n\t" ".set nomacro\n\t" "mult %2, %3\n\t" "dsll32 %0, %4, %5\n\t" "mflo $0\n\t" "dsll32 %1, %4, %5\n\t" "nop\n\t" ".set pop"
: "=&r" (lv1), "=r" (lw)
: "r" (m1), "r" (m2), "r" (s), "I" (0)
: "hi", "lo", "$0"); /* We have to use single integers for m1 and m2 and a double * one for p to be sure the mulsidi3 gcc's RTL multiplication * instruction has the workaround applied. Older versions of * gcc have correct umulsi3 and mulsi3, but other * multiplication variants lack the workaround.
*/ asmvolatile( ""
: "=r" (m1), "=r" (m2), "=r" (s)
: "0" (m1), "1" (m2), "2" (s));
align_mod(align, mod);
p = m1 * m2;
lv2 = s << 32; asmvolatile( ""
: "=r" (lv2)
: "0" (lv2), "r" (p));
local_irq_restore(flags);
*v1 = lv1;
*v2 = lv2;
*w = lw;
}
static __always_inline __init void check_mult_sh(void)
{ long v1[8], v2[8], w[8]; int bug, fix, i;
printk("Checking for the multiply/shift bug... ");
/* * Testing discovered false negatives for certain code offsets * into cache lines. Hence we test all possible offsets for * the worst assumption of an R4000 I-cache line width of 32 * bytes. * * We can't use a loop as alignment directives need to be * immediates.
*/
mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
bug = 0; for (i = 0; i < 8; i++) if (v1[i] != w[i])
bug = 1;
if (bug == 0) {
pr_cont("no.\n"); return;
}
pr_cont("yes, workaround... ");
fix = 1; for (i = 0; i < 8; i++) if (v2[i] != w[i])
fix = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.