/* Caller-saved CPU registers available for JIT use */ #define JIT_CALLER_REGS \
(BIT(MIPS_R_A5) | \
BIT(MIPS_R_A6) | \
BIT(MIPS_R_A7)) /* * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers. * MIPS registers t4 - t7 may be used by the JIT as temporary registers. * MIPS registers t8 - t9 are reserved for single-register common functions.
*/ staticconst u8 bpf2mips64[] = { /* Return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = MIPS_R_V0, /* Arguments from eBPF program to in-kernel function */
[BPF_REG_1] = MIPS_R_A0,
[BPF_REG_2] = MIPS_R_A1,
[BPF_REG_3] = MIPS_R_A2,
[BPF_REG_4] = MIPS_R_A3,
[BPF_REG_5] = MIPS_R_A4, /* Callee-saved registers that in-kernel function will preserve */
[BPF_REG_6] = MIPS_R_S0,
[BPF_REG_7] = MIPS_R_S1,
[BPF_REG_8] = MIPS_R_S2,
[BPF_REG_9] = MIPS_R_S3, /* Read-only frame pointer to access the eBPF stack */
[BPF_REG_FP] = MIPS_R_FP, /* Temporary register for blinding constants */
[BPF_REG_AX] = MIPS_R_AT, /* Tail call count register, caller-saved */
[JIT_REG_TC] = MIPS_R_A5, /* Constant for register zero-extension */
[JIT_REG_ZX] = MIPS_R_V1,
};
/* * MIPS 32-bit operations on 64-bit registers generate a sign-extended * result. However, the eBPF ISA mandates zero-extension, so we rely on the * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic * operations, right shift and byte swap require properly sign-extended * operands or the result is unpredictable. We emit explicit sign-extensions * in those cases.
*/
/* Zero extension */ staticvoid emit_zext(struct jit_context *ctx, u8 dst)
{ if (cpu_has_mips64r2 || cpu_has_mips64r6) {
emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
} else {
emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]);
access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */
}
clobber_reg(ctx, dst);
}
/* Zero extension, if verifier does not do it for us */ staticvoid emit_zext_ver(struct jit_context *ctx, u8 dst)
{ if (!ctx->program->aux->verifier_zext)
emit_zext(ctx, dst);
}
emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */
emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */
emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */
emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */
emit_swap_r64(ctx, dst, t1, 8); /* dst = swap8(dst) */
} break; /* Swap bytes in a half word */ /* Swap bytes in a word */ case 32: case 16:
emit_sext(ctx, dst, dst);
emit_bswap_r(ctx, dst, width); if (cpu_has_mips64r2 || cpu_has_mips64r6)
emit_zext(ctx, dst); break;
}
clobber_reg(ctx, dst);
}
/* Truncate a register double word, word or half word */ staticvoid emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width)
{ switch (width) { case 64: break; /* Zero-extend a word */ case 32:
emit_zext(ctx, dst); break; /* Zero-extend a half word */ case 16:
emit(ctx, andi, dst, dst, 0xffff); break;
}
clobber_reg(ctx, dst);
}
/* Load operation: dst = *(size*)(src + off) */ staticvoid emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size)
{ switch (size) { /* Load a byte */ case BPF_B:
emit(ctx, lbu, dst, off, src); break; /* Load a half word */ case BPF_H:
emit(ctx, lhu, dst, off, src); break; /* Load a word */ case BPF_W:
emit(ctx, lwu, dst, off, src); break; /* Load a double word */ case BPF_DW:
emit(ctx, ld, dst, off, src); break;
}
clobber_reg(ctx, dst);
}
/* Store operation: *(size *)(dst + off) = src */ staticvoid emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size)
{ switch (size) { /* Store a byte */ case BPF_B:
emit(ctx, sb, src, off, dst); break; /* Store a half word */ case BPF_H:
emit(ctx, sh, src, off, dst); break; /* Store a word */ case BPF_W:
emit(ctx, sw, src, off, dst); break; /* Store a double word */ case BPF_DW:
emit(ctx, sd, src, off, dst); break;
}
}
/* Function tail call */ staticint emit_tail_call(struct jit_context *ctx)
{
u8 ary = bpf2mips64[BPF_REG_2];
u8 ind = bpf2mips64[BPF_REG_3];
u8 tcc = bpf2mips64[JIT_REG_TC];
u8 tmp = MIPS_R_T6; int off;
/* * Tail call: * eBPF R1 - function argument (context ptr), passed in a0-a1 * eBPF R2 - ptr to object with array of function entry points * eBPF R3 - array index of function to be called
*/
/* if (ind >= ary->map.max_entries) goto out */
off = offsetof(struct bpf_array, map.max_entries); if (off > 0x7fff) return -1;
emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/
emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */
emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/
/* * Stack frame layout for a JITed program (stack grows down). * * Higher address : Previous stack frame : * +===========================+ <--- MIPS sp before call * | Callee-saved registers, | * | including RA and FP | * +---------------------------+ <--- eBPF FP (MIPS fp) * | Local eBPF variables | * | allocated by program | * +---------------------------+ * | Reserved for caller-saved | * | registers | * Lower address +===========================+ <--- MIPS sp
*/
/* Build program prologue to set up the stack and registers */ void build_prologue(struct jit_context *ctx)
{
u8 fp = bpf2mips64[BPF_REG_FP];
u8 tc = bpf2mips64[JIT_REG_TC];
u8 zx = bpf2mips64[JIT_REG_ZX]; int stack, saved, locals, reserved;
/* * In the unlikely event that the TCC limit is raised to more * than 16 bits, it is clamped to the maximum value allowed for * the generated code (0xffff). It is better fail to compile * instead of degrading gracefully.
*/
BUILD_BUG_ON(MAX_TAIL_CALL_CNT > 0xffff);
/* * The first instruction initializes the tail call count register. * On a tail call, the calling function jumps into the prologue * after this instruction.
*/
emit(ctx, ori, tc, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
/* === Entry-point for tail calls === */
/* * If the eBPF frame pointer and tail call count registers were * accessed they must be preserved. Mark them as clobbered here * to save and restore them on the stack as needed.
*/ if (ctx->accessed & BIT(BPF_REG_FP))
clobber_reg(ctx, fp); if (ctx->accessed & BIT(JIT_REG_TC))
clobber_reg(ctx, tc); if (ctx->accessed & BIT(JIT_REG_ZX))
clobber_reg(ctx, zx);
/* Compute the stack space needed for callee-saved registers */
saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64);
saved = ALIGN(saved, MIPS_STACK_ALIGNMENT);
/* Stack space used by eBPF program local data */
locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT);
/* * If we are emitting function calls, reserve extra stack space for * caller-saved registers needed by the JIT. The required space is * computed automatically during resource usage discovery (pass 1).
*/
reserved = ctx->stack_used;
/* Build the program epilogue to restore the stack and registers */ void build_epilogue(struct jit_context *ctx, int dest_reg)
{ /* Restore callee-saved registers from stack */
pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0,
ctx->stack_size - ctx->saved_size);
/* Release the stack frame */ if (ctx->stack_size)
emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size);
/* Jump to return address and sign-extend the 32-bit return value */
emit(ctx, jr, dest_reg);
emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */
}
if (res == dst) /* Don't overwrite dst */
tmp = MIPS_R_T4;
emit_sext(ctx, tmp, res);
emit_sext(ctx, MIPS_R_T5, src);
emit_cmpxchg_r(ctx, dst, MIPS_R_T5, tmp, off); if (res == dst) /* Restore result */
emit_mov_r(ctx, res, MIPS_R_T4); /* Result zext inserted by verifier */
} break; default: goto notyet;
} break; /* PC += off if dst == src */ /* PC += off if dst != src */ /* PC += off if dst & src */ /* PC += off if dst > src */ /* PC += off if dst >= src */ /* PC += off if dst < src */ /* PC += off if dst <= src */ /* PC += off if dst > src (signed) */ /* PC += off if dst >= src (signed) */ /* PC += off if dst < src (signed) */ /* PC += off if dst <= src (signed) */ case BPF_JMP32 | BPF_JEQ | BPF_X: case BPF_JMP32 | BPF_JNE | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JGT | BPF_X: case BPF_JMP32 | BPF_JGE | BPF_X: case BPF_JMP32 | BPF_JLT | BPF_X: case BPF_JMP32 | BPF_JLE | BPF_X: case BPF_JMP32 | BPF_JSGT | BPF_X: case BPF_JMP32 | BPF_JSGE | BPF_X: case BPF_JMP32 | BPF_JSLT | BPF_X: case BPF_JMP32 | BPF_JSLE | BPF_X: if (off == 0) break;
setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */
emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */
emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp); if (finish_jmp(ctx, jmp, off) < 0) goto toofar; break; /* PC += off if dst == imm */ /* PC += off if dst != imm */ /* PC += off if dst & imm */ /* PC += off if dst > imm */ /* PC += off if dst >= imm */ /* PC += off if dst < imm */ /* PC += off if dst <= imm */ /* PC += off if dst > imm (signed) */ /* PC += off if dst >= imm (signed) */ /* PC += off if dst < imm (signed) */ /* PC += off if dst <= imm (signed) */ case BPF_JMP32 | BPF_JEQ | BPF_K: case BPF_JMP32 | BPF_JNE | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JGT | BPF_K: case BPF_JMP32 | BPF_JGE | BPF_K: case BPF_JMP32 | BPF_JLT | BPF_K: case BPF_JMP32 | BPF_JLE | BPF_K: case BPF_JMP32 | BPF_JSGT | BPF_K: case BPF_JMP32 | BPF_JSGE | BPF_K: case BPF_JMP32 | BPF_JSLT | BPF_K: case BPF_JMP32 | BPF_JSLE | BPF_K: if (off == 0) break;
setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel);
emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */ if (valid_jmp_i(jmp, imm)) {
emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp);
} else { /* Move large immediate to register, sign-extended */
emit_mov_i(ctx, MIPS_R_T5, imm);
emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp);
} if (finish_jmp(ctx, jmp, off) < 0) goto toofar; break; /* PC += off if dst == src */ /* PC += off if dst != src */ /* PC += off if dst & src */ /* PC += off if dst > src */ /* PC += off if dst >= src */ /* PC += off if dst < src */ /* PC += off if dst <= src */ /* PC += off if dst > src (signed) */ /* PC += off if dst >= src (signed) */ /* PC += off if dst < src (signed) */ /* PC += off if dst <= src (signed) */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: if (off == 0) break;
setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
emit_jmp_r(ctx, dst, src, rel, jmp); if (finish_jmp(ctx, jmp, off) < 0) goto toofar; break; /* PC += off if dst == imm */ /* PC += off if dst != imm */ /* PC += off if dst & imm */ /* PC += off if dst > imm */ /* PC += off if dst >= imm */ /* PC += off if dst < imm */ /* PC += off if dst <= imm */ /* PC += off if dst > imm (signed) */ /* PC += off if dst >= imm (signed) */ /* PC += off if dst < imm (signed) */ /* PC += off if dst <= imm (signed) */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: if (off == 0) break;
setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel); if (valid_jmp_i(jmp, imm)) {
emit_jmp_i(ctx, dst, imm, rel, jmp);
} else { /* Move large immediate to register */
emit_mov_i(ctx, MIPS_R_T4, imm);
emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp);
} if (finish_jmp(ctx, jmp, off) < 0) goto toofar; break; /* PC += off */ case BPF_JMP | BPF_JA: if (off == 0) break; if (emit_ja(ctx, off) < 0) goto toofar; break; /* Tail call */ case BPF_JMP | BPF_TAIL_CALL: if (emit_tail_call(ctx) < 0) goto invalid; break; /* Function call */ case BPF_JMP | BPF_CALL: if (emit_call(ctx, insn) < 0) goto invalid; break; /* Function return */ case BPF_JMP | BPF_EXIT: /* * Optimization: when last instruction is EXIT * simply continue to epilogue.
*/ if (ctx->bpf_index == ctx->program->len - 1) break; if (emit_exit(ctx) < 0) goto toofar; break;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.34Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.