/* A note about branch offset calculations. The addrs[] array, * indexed by BPF instruction, records the address after all the * sparc instructions emitted for that BPF instruction. * * The most common case is to emit a branch at the end of such * a code sequence. So this would be two instructions, the * branch and its delay slot. * * Therefore by default the branch emitters calculate the branch * offset field as: * * destination - (addrs[i] - 8) * * This "addrs[i] - 8" is the address of the branch itself or * what "." would be in assembler notation. The "8" part is * how we take into consideration the branch and its delay * slot mentioned above. * * Sometimes we need to emit a branch earlier in the code * sequence. And in these situations we adjust "destination" * to accommodate this difference. For example, if we needed * to emit a branch (and its delay slot) right before the * final instruction emitted for a BPF opcode, we'd use * "destination + 4" instead of just plain "destination" above. * * This is why you see all of these funny emit_branch() and * emit_jump() calls with adjusted offsets.
*/
/* Make sure we dont leek kernel memory. */ if (seen_or_pass0 & SEEN_XREG)
emit_clear(r_X);
/* If this filter needs to access skb data, * load %o4 and %o5 with: * %o4 = skb->len - skb->data_len * %o5 = skb->data * And also back up %o7 into r_saved_O7 so we can * invoke the stubs using 'call'.
*/ if (seen_or_pass0 & SEEN_DATAREF) {
emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
}
}
emit_reg_move(O7, r_saved_O7);
/* Make sure we dont leak kernel information to the user. */ if (bpf_needs_clear_a(&filter[0]))
emit_clear(r_A); /* A = 0 */
for (i = 0; i < flen; i++) { unsignedint K = filter[i].k; unsignedint t_offset; unsignedint f_offset;
u32 t_op, f_op;
u16 code = bpf_anc_helper(&filter[i]); int ilen;
switch (code) { case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
emit_alu_X(ADD); break; case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
emit_alu_K(ADD, K); break; case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
emit_alu_X(SUB); break; case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
emit_alu_K(SUB, K); break; case BPF_ALU | BPF_AND | BPF_X: /* A &= X */
emit_alu_X(AND); break; case BPF_ALU | BPF_AND | BPF_K: /* A &= K */
emit_alu_K(AND, K); break; case BPF_ALU | BPF_OR | BPF_X: /* A |= X */
emit_alu_X(OR); break; case BPF_ALU | BPF_OR | BPF_K: /* A |= K */
emit_alu_K(OR, K); break; case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ case BPF_ALU | BPF_XOR | BPF_X:
emit_alu_X(XOR); break; case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
emit_alu_K(XOR, K); break; case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */
emit_alu_X(SLL); break; case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */
emit_alu_K(SLL, K); break; case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */
emit_alu_X(SRL); break; case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */
emit_alu_K(SRL, K); break; case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
emit_alu_X(MUL); break; case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
emit_alu_K(MUL, K); break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/ if (K == 1) break;
emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use.
*/
emit_nop();
emit_nop();
emit_nop();
emit_alu_K(DIV, K); break; case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
emit_cmpi(r_X, 0); if (pc_ret0 > 0) {
t_offset = addrs[pc_ret0 - 1];
emit_branch(BE, t_offset + 20);
emit_nop(); /* delay slot */
} else {
emit_branch_off(BNE, 16);
emit_nop();
emit_jump(cleanup_addr + 20);
emit_clear(r_A);
}
emit_write_y(G0); /* The Sparc v8 architecture requires * three instructions between a %y * register write and the first use.
*/
emit_nop();
emit_nop();
emit_nop();
emit_alu_X(DIV); break; case BPF_ALU | BPF_NEG:
emit_neg(); break; case BPF_RET | BPF_K: if (!K) { if (pc_ret0 == -1)
pc_ret0 = i;
emit_clear(r_A);
} else {
emit_loadimm(K, r_A);
}
fallthrough; case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) {
emit_jump(cleanup_addr);
emit_nop(); break;
} if (seen_or_pass0 & SEEN_MEM) { unsignedint sz = BASE_STACKFRAME;
sz += BPF_MEMWORDS * sizeof(u32);
emit_release_stack(sz);
}
} /* jmpl %r_saved_O7 + 8, %g0 */
emit_jmpl(r_saved_O7, 8, G0);
emit_reg_move(r_A, O0); /* delay slot */ break; case BPF_MISC | BPF_TAX:
seen |= SEEN_XREG;
emit_reg_move(r_A, r_X); break; case BPF_MISC | BPF_TXA:
seen |= SEEN_XREG;
emit_reg_move(r_X, r_A); break; case BPF_ANC | SKF_AD_CPU:
emit_load_cpu(r_A); break; case BPF_ANC | SKF_AD_PROTOCOL:
emit_skb_load16(protocol, r_A); break; case BPF_ANC | SKF_AD_PKTTYPE:
__emit_skb_load8(__pkt_type_offset, r_A);
emit_andi(r_A, PKT_TYPE_MAX, r_A);
emit_alu_K(SRL, 5); break; case BPF_ANC | SKF_AD_IFINDEX:
emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0);
emit_branch(BE_PTR, cleanup_addr + 4);
emit_nop();
emit_load32(r_A, struct net_device, ifindex, r_A); break; case BPF_ANC | SKF_AD_MARK:
emit_skb_load32(mark, r_A); break; case BPF_ANC | SKF_AD_QUEUE:
emit_skb_load16(queue_mapping, r_A); break; case BPF_ANC | SKF_AD_HATYPE:
emit_skb_loadptr(dev, r_A);
emit_cmpi(r_A, 0);
emit_branch(BE_PTR, cleanup_addr + 4);
emit_nop();
emit_load16(r_A, struct net_device, type, r_A); break; case BPF_ANC | SKF_AD_RXHASH:
emit_skb_load32(hash, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG:
emit_skb_load16(vlan_tci, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
emit_skb_load32(vlan_all, r_A);
emit_cmpi(r_A, 0);
emit_branch_off(BE, 12);
emit_nop();
emit_loadimm(1, r_A); break; case BPF_LD | BPF_W | BPF_LEN:
emit_skb_load32(len, r_A); break; case BPF_LDX | BPF_W | BPF_LEN:
emit_skb_load32(len, r_X); break; case BPF_LD | BPF_IMM:
emit_loadimm(K, r_A); break; case BPF_LDX | BPF_IMM:
emit_loadimm(K, r_X); break; case BPF_LD | BPF_MEM:
seen |= SEEN_MEM;
emit_ldmem(K * 4, r_A); break; case BPF_LDX | BPF_MEM:
seen |= SEEN_MEM | SEEN_XREG;
emit_ldmem(K * 4, r_X); break; case BPF_ST:
seen |= SEEN_MEM;
emit_stmem(K * 4, r_A); break; case BPF_STX:
seen |= SEEN_MEM | SEEN_XREG;
emit_stmem(K * 4, r_X); break;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.