diff options
Diffstat (limited to 'arch/powerpc/net/bpf_jit_comp.c')
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 110 |
1 files changed, 52 insertions, 58 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 82e82cadcde5..17cea18a09d3 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -1,8 +1,9 @@ -/* bpf_jit_comp.c: BPF JIT compiler for PPC64 +/* bpf_jit_comp.c: BPF JIT compiler * * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation * * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) + * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -25,7 +26,7 @@ static inline void bpf_flush_icache(void *start, void *end) flush_icache_range((unsigned long)start, (unsigned long)end); } -static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, +static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx) { int i; @@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, if (ctx->seen & SEEN_DATAREF) { /* If we call any helpers (for loads), save LR */ EMIT(PPC_INST_MFLR | __PPC_RT(R0)); - PPC_STD(0, 1, 16); + PPC_BPF_STL(0, 1, PPC_LR_STKOFF); /* Back up non-volatile regs. */ - PPC_STD(r_D, 1, -(8*(32-r_D))); - PPC_STD(r_HL, 1, -(8*(32-r_HL))); + PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D))); + PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL))); } if (ctx->seen & SEEN_MEM) { /* @@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, */ for (i = r_M; i < (r_M+16); i++) { if (ctx->seen & (1 << (i-r_M))) - PPC_STD(i, 1, -(8*(32-i))); + PPC_BPF_STL(i, 1, -(REG_SZ*(32-i))); } } - EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | - (-BPF_PPC_STACKFRAME & 0xfffc)); + PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME); } if (ctx->seen & SEEN_DATAREF) { @@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, data_len)); PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); PPC_SUB(r_HL, r_HL, r_scratch1); - PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); + PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); } if (ctx->seen & SEEN_XREG) { @@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); if (ctx->seen & SEEN_DATAREF) { - PPC_LD(0, 1, 16); + PPC_BPF_LL(0, 1, PPC_LR_STKOFF); PPC_MTLR(0); - PPC_LD(r_D, 1, -(8*(32-r_D))); - PPC_LD(r_HL, 1, -(8*(32-r_HL))); + PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D))); + PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL))); } if (ctx->seen & SEEN_MEM) { /* Restore any saved non-vol registers */ for (i = r_M; i < (r_M+16); i++) { if (ctx->seen & (1 << (i-r_M))) - PPC_LD(i, 1, -(8*(32-i))); + PPC_BPF_LL(i, 1, -(REG_SZ*(32-i))); } } } @@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) /* Assemble the body code between the prologue & epilogue. */ -static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, +static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, unsigned int *addrs) { @@ -181,6 +181,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, } break; case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { @@ -190,9 +191,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } - PPC_DIVWU(r_scratch1, r_A, r_X); - PPC_MUL(r_scratch1, r_X, r_scratch1); - PPC_SUB(r_A, r_A, r_scratch1); + if (code == (BPF_ALU | BPF_MOD | BPF_X)) { + PPC_DIVWU(r_scratch1, r_A, r_X); + PPC_MUL(r_scratch1, r_X, r_scratch1); + PPC_SUB(r_A, r_A, r_scratch1); + } else { + PPC_DIVWU(r_A, r_A, r_X); + } break; case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ PPC_LI32(r_scratch2, K); @@ -200,22 +205,6 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_MUL(r_scratch1, r_scratch2, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1); break; - case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ - ctx->seen |= SEEN_XREG; - PPC_CMPWI(r_X, 0); - if (ctx->pc_ret0 != -1) { - PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); - } else { - /* - * Exit, returning 0; first pass hits here - * (longer worst-case code size). - */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); - PPC_LI(r_ret, 0); - PPC_JMP(exit_addr); - } - PPC_DIVWU(r_A, r_A, r_X); - break; case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; @@ -361,21 +350,30 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, protocol)); break; case BPF_ANC | SKF_AD_IFINDEX: - PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, + case BPF_ANC | SKF_AD_HATYPE: + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + ifindex) != 4); + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, + type) != 2); + PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); PPC_CMPDI(r_scratch1, 0); if (ctx->pc_ret0 != -1) { PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]); } else { /* Exit, returning 0; first pass hits here. */ - PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12); + PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12); PPC_LI(r_ret, 0); PPC_JMP(exit_addr); } - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - ifindex) != 4); - PPC_LWZ_OFFS(r_A, r_scratch1, + if (code == (BPF_ANC | SKF_AD_IFINDEX)) { + PPC_LWZ_OFFS(r_A, r_scratch1, offsetof(struct net_device, ifindex)); + } else { + PPC_LHZ_OFFS(r_A, r_scratch1, + offsetof(struct net_device, type)); + } + break; case BPF_ANC | SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); @@ -407,21 +405,14 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, queue_mapping)); break; + case BPF_ANC | SKF_AD_PKTTYPE: + PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET()); + PPC_ANDI(r_A, r_A, PKT_TYPE_MAX); + PPC_SRWI(r_A, r_A, 5); + break; case BPF_ANC | SKF_AD_CPU: -#ifdef CONFIG_SMP - /* - * PACA ptr is r13: - * raw_smp_processor_id() = local_paca->paca_index - */ - BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, - paca_index) != 2); - PPC_LHZ_OFFS(r_A, 13, - offsetof(struct paca_struct, paca_index)); -#else - PPC_LI(r_A, 0); -#endif + PPC_BPF_LOAD_CPU(r_A); break; - /*** Absolute loads from packet header/data ***/ case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_word); @@ -434,7 +425,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, common_load: /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; - PPC_LI64(r_scratch1, func); + PPC_FUNC_ADDR(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); PPC_BLRL(); @@ -460,7 +451,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, * in the helper functions. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; - PPC_LI64(r_scratch1, func); + PPC_FUNC_ADDR(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_ADDI(r_addr, r_X, IMM_L(K)); if (K >= 32768) @@ -569,7 +560,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, return 0; } -void bpf_jit_compile(struct sk_filter *fp) +void bpf_jit_compile(struct bpf_prog *fp) { unsigned int proglen; unsigned int alloclen; @@ -682,20 +673,23 @@ void bpf_jit_compile(struct sk_filter *fp) if (image) { bpf_flush_icache(code_base, code_base + (proglen/4)); +#ifdef CONFIG_PPC64 /* Function descriptor nastiness: Address + TOC */ ((u64 *)image)[0] = (u64)code_base; ((u64 *)image)[1] = local_paca->kernel_toc; +#endif fp->bpf_func = (void *)image; - fp->jited = 1; + fp->jited = true; } out: kfree(addrs); return; } -void bpf_jit_free(struct sk_filter *fp) +void bpf_jit_free(struct bpf_prog *fp) { if (fp->jited) - module_free(NULL, fp->bpf_func); - kfree(fp); + module_memfree(fp->bpf_func); + + bpf_prog_unlock_free(fp); } |