#ifdef CONFIG_32BIT #define runtime_const_ptr(sym) \
({ \
typeof(sym) __ret; \
asm_inline(".option push\n\t" \ ".option norvc\n\t" \ "1:\t" \ "lui %[__ret],0x89abd\n\t" \ "addi %[__ret],%[__ret],-0x211\n\t" \ ".option pop\n\t" \ ".pushsection runtime_ptr_"#sym",\"a\"\n\t" \ ".long 1b - .\n\t" \ ".popsection" \
: [__ret] "=r" (__ret)); \
__ret; \
}) #else /* * Loading 64-bit constants into a register from immediates is a non-trivial * task on riscv64. To get it somewhat performant, load 32 bits into two * different registers and then combine the results. * * If the processor supports the Zbkb extension, we can combine the final * "slli,slli,srli,add" into the single "pack" instruction. If the processor * doesn't support Zbkb but does support the Zbb extension, we can * combine the final "slli,srli,add" into one instruction "add.uw".
*/ #define RISCV_RUNTIME_CONST_64_PREAMBLE \ ".option push\n\t" \ ".option norvc\n\t" \ "1:\t" \ "lui %[__ret],0x89abd\n\t" \ "lui %[__tmp],0x1234\n\t" \ "addiw %[__ret],%[__ret],-0x211\n\t" \ "addiw %[__tmp],%[__tmp],0x567\n\t" \
staticinlinevoid __runtime_fixup_caches(void *where, unsignedint insns)
{ /* On riscv there are currently only cache-wide flushes so va is ignored. */
__always_unused uintptr_t va = (uintptr_t)where;
flush_icache_range(va, va + 4 * insns);
}
/* * The 32-bit immediate is stored in a lui+addi pairing. * lui holds the upper 20 bits of the immediate in the first 20 bits of the instruction. * addi holds the lower 12 bits of the immediate in the first 12 bits of the instruction.
*/ staticinlinevoid __runtime_fixup_32(__le16 *lui_parcel, __le16 *addi_parcel, unsignedint val)
{ unsignedint lower_immediate, upper_immediate;
u32 lui_insn, addi_insn, addi_insn_mask;
__le32 lui_res, addi_res;
/* Mask out upper 12 bit of addi */
addi_insn_mask = 0x000fffff;
if (upper_immediate & 0xfffff000) { /* replace upper 20 bits of lui with upper immediate */
lui_insn &= 0x00000fff;
lui_insn |= upper_immediate & 0xfffff000;
} else { /* replace lui with nop if immediate is small enough to fit in addi */
lui_insn = RISCV_INSN_NOP4; /* * lui is being skipped, so do a load instead of an add. A load * is performed by adding with the x0 register. Setting rs to * zero with the following mask will accomplish this goal.
*/
addi_insn_mask &= 0x07fff;
}
if (lower_immediate & 0x00000fff || lui_insn == RISCV_INSN_NOP4) { /* replace upper 12 bits of addi with lower 12 bits of val */
addi_insn &= addi_insn_mask;
addi_insn |= (lower_immediate & 0x00000fff) << 20;
} else { /* replace addi with nop if lower_immediate is empty */
addi_insn = RISCV_INSN_NOP4;
}
staticinlinevoid __runtime_fixup_ptr(void *where, unsignedlong val)
{ #ifdef CONFIG_32BIT
__runtime_fixup_32(where, where + 4, val);
__runtime_fixup_caches(where, 2); #else
__runtime_fixup_32(where, where + 8, val);
__runtime_fixup_32(where + 4, where + 12, val >> 32);
__runtime_fixup_caches(where, 4); #endif
}
/* * Replace the least significant 5 bits of the srli/srliw immediate that is * located at bits 20-24
*/ staticinlinevoid __runtime_fixup_shift(void *where, unsignedlong val)
{
__le16 *parcel = where;
__le32 res;
u32 insn;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.