staticinlineint bpf2la_offset(int bpf_insn, int off, conststruct jit_ctx *ctx)
{ /* BPF JMP offset is relative to the next instruction */
bpf_insn++; /* * Whereas LoongArch branch instructions encode the offset * from the branch itself, so we must subtract 1 from the * instruction offset.
*/ return (ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1));
}
staticinlineint epilogue_offset(conststruct jit_ctx *ctx)
{ int from = ctx->idx; int to = ctx->epilogue_offset;
/* ori rd, rd, imm_11_0 */
imm_11_0 = imm & 0xfff; if (imm_11_0 != 0)
emit_insn(ctx, ori, rd, rd, imm_11_0);
if (!is_signed_imm32(imm)) { if (imm_51_0 != 0) { /* * If bit[51:31] is all 0 or all 1, * it means bit[51:32] is sign extended by lu12iw, * no need to call lu32id to do a new filled operation.
*/
imm_51_31 = (imm >> 31) & 0x1fffff; if (imm_51_31 != 0 && imm_51_31 != 0x1fffff) { /* lu32id rd, imm_51_32 */
imm_51_32 = (imm >> 32) & 0xfffff;
emit_insn(ctx, lu32id, rd, imm_51_32);
}
}
staticinlineint invert_jmp_cond(u8 cond)
{ switch (cond) { case BPF_JEQ: return BPF_JNE; case BPF_JNE: case BPF_JSET: return BPF_JEQ; case BPF_JGT: return BPF_JLE; case BPF_JGE: return BPF_JLT; case BPF_JLT: return BPF_JGE; case BPF_JLE: return BPF_JGT; case BPF_JSGT: return BPF_JSLE; case BPF_JSGE: return BPF_JSLT; case BPF_JSLT: return BPF_JSGE; case BPF_JSLE: return BPF_JSGT;
} return -1;
}
staticinlinevoid cond_jmp_offset(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, enum loongarch_gpr rd, int jmp_offset)
{ switch (cond) { case BPF_JEQ: /* PC += jmp_offset if rj == rd */
emit_insn(ctx, beq, rj, rd, jmp_offset); return; case BPF_JNE: case BPF_JSET: /* PC += jmp_offset if rj != rd */
emit_insn(ctx, bne, rj, rd, jmp_offset); return; case BPF_JGT: /* PC += jmp_offset if rj > rd (unsigned) */
emit_insn(ctx, bltu, rd, rj, jmp_offset); return; case BPF_JLT: /* PC += jmp_offset if rj < rd (unsigned) */
emit_insn(ctx, bltu, rj, rd, jmp_offset); return; case BPF_JGE: /* PC += jmp_offset if rj >= rd (unsigned) */
emit_insn(ctx, bgeu, rj, rd, jmp_offset); return; case BPF_JLE: /* PC += jmp_offset if rj <= rd (unsigned) */
emit_insn(ctx, bgeu, rd, rj, jmp_offset); return; case BPF_JSGT: /* PC += jmp_offset if rj > rd (signed) */
emit_insn(ctx, blt, rd, rj, jmp_offset); return; case BPF_JSLT: /* PC += jmp_offset if rj < rd (signed) */
emit_insn(ctx, blt, rj, rd, jmp_offset); return; case BPF_JSGE: /* PC += jmp_offset if rj >= rd (signed) */
emit_insn(ctx, bge, rj, rd, jmp_offset); return; case BPF_JSLE: /* PC += jmp_offset if rj <= rd (signed) */
emit_insn(ctx, bge, rd, rj, jmp_offset); return;
}
}
staticinlineint emit_cond_jmp(struct jit_ctx *ctx, u8 cond, enum loongarch_gpr rj, enum loongarch_gpr rd, int jmp_offset)
{ /* * A large PC-relative jump offset may overflow the immediate field of * the native conditional branch instruction, triggering a conversion * to use an absolute jump instead, this jump sequence is particularly * nasty. For now, use cond_jmp_offs26() directly to keep it simple. * In the future, maybe we can add support for far branching, the branch * relaxation requires more than two passes to converge, the code seems * too complex to understand, not quite sure whether it is necessary and * worth the extra pain. Anyway, just leave it as it is to enhance code * readability now.
*/ if (is_signed_imm26(jmp_offset)) {
cond_jmp_offs26(ctx, cond, rj, rd, jmp_offset); return 0;
}
return -EINVAL;
}
staticinlineint emit_uncond_jmp(struct jit_ctx *ctx, int jmp_offset)
{ if (is_signed_imm26(jmp_offset)) {
uncond_jmp_offs26(ctx, jmp_offset); return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.