WARN(true, "BPF JIT is asking about unknown registers, will crash the stack"); /* Use the hole we have left for alignment */ return BPF_PPC_STACKFRAME(ctx) - 4;
}
staticinlinebool bpf_has_stack_frame(struct codegen_context *ctx)
{ /* * We only need a stack frame if: * - we call other functions (kernel helpers), or * - we use non volatile registers, or * - we use tail call counter * - the bpf program uses its stack area * The latter condition is deduced from the usage of BPF_REG_FP
*/ return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
}
while (ctx->seen & nvreg_mask &&
(ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) { int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab)); intnew = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa)); int i;
for (i = BPF_REG_0; i <= TMP_REG; i++) { if (ctx->b2p[i] != old) continue;
ctx->b2p[i] = new;
bpf_set_seen_register(ctx, new);
bpf_clear_seen_register(ctx, old); if (i != TMP_REG) {
bpf_set_seen_register(ctx, new - 1);
bpf_clear_seen_register(ctx, old - 1);
} break;
}
}
}
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{ int i;
/* Instruction for trampoline attach */
EMIT(PPC_RAW_NOP());
/* Initialize tail_call_cnt, to be skipped if we do tail calls. */ if (ctx->seen & SEEN_TAILCALL)
EMIT(PPC_RAW_LI(_R4, 0)); else
EMIT(PPC_RAW_NOP());
#define BPF_TAILCALL_PROLOGUE_SIZE 8
if (bpf_has_stack_frame(ctx))
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_TAILCALL)
EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
/* First arg comes in as a 32 bits pointer. */
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
/* * We need a stack frame, but we don't necessarily need to * save/restore LR unless we call other functions
*/ if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_MFLR(_R0));
/* * Back up non-volatile regs -- registers r18-r31
*/ for (i = BPF_PPC_NVR_MIN; i <= 31; i++) if (bpf_is_seen_register(ctx, i))
EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
/* Setup frame pointer to point to the bpf stack area */ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
}
staticvoid bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
{ int i;
/* Restore NVRs */ for (i = BPF_PPC_NVR_MIN; i <= 31; i++) if (bpf_is_seen_register(ctx, i))
EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
/* Tear down our stack frame */ if (bpf_has_stack_frame(ctx))
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_MTLR(_R0));
/* Relative offset needs to be calculated based on final image location */ int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
s32 rel = (s32)func - (s32)(fimage + ctx->idx);
if (image && rel < 0x2000000 && rel >= -0x2000000) {
EMIT(PPC_RAW_BL(rel));
} else { /* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
EMIT(PPC_RAW_MTCTR(_R0));
EMIT(PPC_RAW_BCTRL());
}
return 0;
}
staticint bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{ /* * By now, the eBPF program has already setup parameters in r3-r6 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program * r5-r6/BPF_REG_2 - pointer to bpf_array * r7-r8/BPF_REG_3 - index in bpf_array
*/ int b2p_bpf_array = bpf_to_ppc(BPF_REG_2); int b2p_index = bpf_to_ppc(BPF_REG_3);
/* Assemble the body code between the prologue & epilogue */ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{ conststruct bpf_insn *insn = fp->insnsi; int flen = fp->len; int i, ret;
/* Start of epilogue code - will only be valid 2nd pass onwards */
u32 exit_addr = addrs[flen];
/* * addrs[] maps a BPF bytecode address into a real offset from * the start of the body code.
*/
addrs[i] = ctx->idx * 4;
/* * As an optimization, we note down which registers * are used so that we can only save/restore those in our * prologue and epilogue. We do this here regardless of whether * the actual BPF instruction uses src/dst registers or not * (for instance, BPF_CALL does not use them). The expectation * is that those instructions will have src_reg/dst_reg set to * 0. Even otherwise, we just lose some prologue/epilogue * optimization but everything else should work without * any issues.
*/ if (dst_reg >= 3 && dst_reg < 32) {
bpf_set_seen_register(ctx, dst_reg);
bpf_set_seen_register(ctx, dst_reg_h);
}
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off)); /* * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync' * before and after the operation. * * This is a requirement in the Linux Kernel Memory Model. * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
*/ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4; /* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
/* Save old value in BPF_REG_AX */ if (imm & BPF_FETCH)
EMIT(PPC_RAW_MR(ax_reg, _R0));
switch (imm) { case BPF_ADD: case BPF_ADD | BPF_FETCH:
EMIT(PPC_RAW_ADD(_R0, _R0, src_reg)); break; case BPF_AND: case BPF_AND | BPF_FETCH:
EMIT(PPC_RAW_AND(_R0, _R0, src_reg)); break; case BPF_OR: case BPF_OR | BPF_FETCH:
EMIT(PPC_RAW_OR(_R0, _R0, src_reg)); break; case BPF_XOR: case BPF_XOR | BPF_FETCH:
EMIT(PPC_RAW_XOR(_R0, _R0, src_reg)); break; case BPF_CMPXCHG: /* * Return old value in BPF_REG_0 for BPF_CMPXCHG & * in src_reg for other cases.
*/
ret_reg = bpf_to_ppc(BPF_REG_0);
/* Compare with old value in BPF_REG_0 */
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0)); /* Don't set if different from old value */
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
fallthrough; case BPF_XCHG:
save_reg = src_reg; break; default:
pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i); return -EOPNOTSUPP;
}
/* store new value */
EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg)); /* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
/* For the BPF_FETCH variant, get old data into src_reg */ if (imm & BPF_FETCH) { /* Emit 'sync' to enforce full ordering */ if (IS_ENABLED(CONFIG_SMP))
EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, ax_reg)); if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
} break;
/* * BPF_LDX
*/ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_MEMSX | BPF_B: case BPF_LDX | BPF_PROBE_MEM | BPF_B: case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_MEMSX | BPF_H: case BPF_LDX | BPF_PROBE_MEM | BPF_H: case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_MEMSX | BPF_W: case BPF_LDX | BPF_PROBE_MEM | BPF_W: case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_DW: /* * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM * load only if addr is kernel address (see is_kernel_addr()), otherwise * set dst_reg=0 and move on.
*/ if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
PPC_LI32(_R0, TASK_SIZE - off);
EMIT(PPC_RAW_CMPLW(src_reg, _R0));
PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
EMIT(PPC_RAW_LI(dst_reg, 0)); /* * For BPF_DW case, "li reg_h,0" would be needed when * !fp->aux->verifier_zext. Emit NOP otherwise. * * Note that "li reg_h,0" is emitted for BPF_B/H/W case, * if necessary. So, jump there instead of emitting an * additional "li reg_h,0" instruction.
*/ if (size == BPF_DW && !fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(dst_reg_h, 0)); else
EMIT(PPC_RAW_NOP()); /* * Need to jump two instructions instead of one for BPF_DW case * as there are two load instructions for dst_reg_h & dst_reg * respectively.
*/ if (size == BPF_DW ||
(size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX))
PPC_JMP((ctx->idx + 3) * 4); else
PPC_JMP((ctx->idx + 2) * 4);
}
if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) { switch (size) { case BPF_B:
EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg)); break; case BPF_H:
EMIT(PPC_RAW_LHA(dst_reg, src_reg, off)); break; case BPF_W:
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); break;
} if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
} else { switch (size) { case BPF_B:
EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off)); break; case BPF_H:
EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off)); break; case BPF_W:
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off)); break; case BPF_DW:
EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4)); break;
} if (size != BPF_DW && !fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
}
if (BPF_MODE(code) == BPF_PROBE_MEM) { int insn_idx = ctx->idx - 1; int jmp_off = 4;
/* * In case of BPF_DW, two lwz instructions are emitted, one * for higher 32-bit and another for lower 32-bit. So, set * ex->insn to the first of the two and jump over both * instructions in fixup. * * Similarly, with !verifier_zext, two instructions are * emitted for BPF_B/H/W case. So, set ex->insn to the * instruction that could fault and skip over both * instructions.
*/ if (size == BPF_DW || !fp->aux->verifier_zext) {
insn_idx -= 1;
jmp_off += 4;
}
ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
jmp_off, dst_reg); if (ret) return ret;
} break;
/* * Doubleword load * 16 byte instruction that uses two 'struct bpf_insn'
*/ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
PPC_LI32(dst_reg, (u32)insn[i].imm); /* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4; break;
/* * Return/Exit
*/ case BPF_JMP | BPF_EXIT: /* * If this isn't the very last instruction, branch to * the epilogue. If we _are_ the last instruction, * we'll just fall through to the epilogue.
*/ if (i != flen - 1) {
ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr); if (ret) return ret;
} /* else fall through to the epilogue */ break;
/* * Call kernel helper or bpf function
*/ case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
&func_addr, &func_addr_fixed); if (ret < 0) return ret;
/* * Jumps and branches
*/ case BPF_JMP | BPF_JA:
PPC_JMP(addrs[i + 1 + off]); break; case BPF_JMP32 | BPF_JA:
PPC_JMP(addrs[i + 1 + imm]); break;
case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP32 | BPF_JGT | BPF_K: case BPF_JMP32 | BPF_JGT | BPF_X: case BPF_JMP32 | BPF_JSGT | BPF_K: case BPF_JMP32 | BPF_JSGT | BPF_X:
true_cond = COND_GT; goto cond_branch; case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP32 | BPF_JLT | BPF_K: case BPF_JMP32 | BPF_JLT | BPF_X: case BPF_JMP32 | BPF_JSLT | BPF_K: case BPF_JMP32 | BPF_JSLT | BPF_X:
true_cond = COND_LT; goto cond_branch; case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP32 | BPF_JGE | BPF_K: case BPF_JMP32 | BPF_JGE | BPF_X: case BPF_JMP32 | BPF_JSGE | BPF_K: case BPF_JMP32 | BPF_JSGE | BPF_X:
true_cond = COND_GE; goto cond_branch; case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP32 | BPF_JLE | BPF_K: case BPF_JMP32 | BPF_JLE | BPF_X: case BPF_JMP32 | BPF_JSLE | BPF_K: case BPF_JMP32 | BPF_JSLE | BPF_X:
true_cond = COND_LE; goto cond_branch; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP32 | BPF_JEQ | BPF_K: case BPF_JMP32 | BPF_JEQ | BPF_X:
true_cond = COND_EQ; goto cond_branch; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP32 | BPF_JNE | BPF_K: case BPF_JMP32 | BPF_JNE | BPF_X:
true_cond = COND_NE; goto cond_branch; case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP32 | BPF_JSET | BPF_K: case BPF_JMP32 | BPF_JSET | BPF_X:
true_cond = COND_NE; /* fallthrough; */
cond_branch: switch (code) { case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: /* unsigned comparison */
EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); break; case BPF_JMP32 | BPF_JGT | BPF_X: case BPF_JMP32 | BPF_JLT | BPF_X: case BPF_JMP32 | BPF_JGE | BPF_X: case BPF_JMP32 | BPF_JLE | BPF_X: case BPF_JMP32 | BPF_JEQ | BPF_X: case BPF_JMP32 | BPF_JNE | BPF_X: /* unsigned comparison */
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); break; case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: /* signed comparison */
EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg)); break; case BPF_JMP32 | BPF_JSGT | BPF_X: case BPF_JMP32 | BPF_JSLT | BPF_X: case BPF_JMP32 | BPF_JSGE | BPF_X: case BPF_JMP32 | BPF_JSLE | BPF_X: /* signed comparison */
EMIT(PPC_RAW_CMPW(dst_reg, src_reg)); break; case BPF_JMP | BPF_JSET | BPF_X:
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); break; case BPF_JMP32 | BPF_JSET | BPF_X: {
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg)); break; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: /* * Need sign-extended load, so only positive * values can be used as imm in cmplwi
*/ if (imm >= 0 && imm < 32768) {
EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else { /* sign-extending load ... but unsigned comparison */
PPC_EX32(_R0, imm);
EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
} break; case BPF_JMP32 | BPF_JNE | BPF_K: case BPF_JMP32 | BPF_JEQ | BPF_K: case BPF_JMP32 | BPF_JGT | BPF_K: case BPF_JMP32 | BPF_JLT | BPF_K: case BPF_JMP32 | BPF_JGE | BPF_K: case BPF_JMP32 | BPF_JLE | BPF_K: if (imm >= 0 && imm < 65536) {
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
} break;
} case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: if (imm >= 0 && imm < 65536) {
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else { /* sign-extending load */
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
} break; case BPF_JMP32 | BPF_JSGT | BPF_K: case BPF_JMP32 | BPF_JSLT | BPF_K: case BPF_JMP32 | BPF_JSGE | BPF_K: case BPF_JMP32 | BPF_JSLE | BPF_K: /* * signed comparison, so any 16-bit value * can be used in cmpwi
*/ if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_CMPWI(dst_reg, imm));
} else { /* sign-extending load */
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_CMPW(dst_reg, _R0));
} break; case BPF_JMP | BPF_JSET | BPF_K: /* andi does not sign-extend the immediate */ if (imm >= 0 && imm < 32768) { /* PPC_ANDI is _only/always_ dot-form */
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
PPC_LI32(_R0, imm); if (imm < 0) {
EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
}
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
} break; case BPF_JMP32 | BPF_JSET | BPF_K: /* andi does not sign-extend the immediate */ if (imm >= 0 && imm < 32768) { /* PPC_ANDI is _only/always_ dot-form */
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
} break;
}
PPC_BCC(true_cond, addrs[i + 1 + off]); break;
/* * Tail call
*/ case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); if (ret < 0) return ret; break;
default: /* * The filter contains something cruel & unusual. * We don't handle it, but also there shouldn't be * anything missing from our list.
*/
pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i); return -EOPNOTSUPP;
} if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
!insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
EMIT(PPC_RAW_LI(dst_reg_h, 0));
}
/* Set end-of-body-code address for exit. */
addrs[i] = ctx->idx * 4;
return 0;
}
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.46Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.