summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>2022-02-14 13:41:41 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2022-03-07 16:04:58 +0300
commitb10cb163c4b31b03ac5014abbfd0b868913fd8e3 (patch)
tree4b37da73efaf0aabb1fd086489cd022d1ea8cc8e /arch/powerpc
parent4eeac2b0aaadc3d1943d348d8565f7cfb93272b9 (diff)
downloadlinux-b10cb163c4b31b03ac5014abbfd0b868913fd8e3.tar.xz
powerpc64/bpf elfv2: Setup kernel TOC in r2 on entry
In preparation for using kernel TOC, load the same in r2 on entry. With elfv1, the kernel TOC is already setup by our caller. We adjust the number of instructions to skip on a tail call accordingly. We get rid of the #ifdef in bpf_jit_emit_tail_call() since FUNCTION_DESCR_SIZE is itself under a #ifdef. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/18a05a4ceec14a8617c9dd4b7128d0afa83fd14e.1644834730.git.naveen.n.rao@linux.vnet.ibm.com
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 27ac2fc76702..44314ee60155 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -73,6 +73,9 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
+ if (__is_defined(PPC64_ELF_ABI_v2))
+ PPC_BPF_LL(_R2, _R13, offsetof(struct paca_struct, kernel_toc));
+
/*
* Initialize tail_call_cnt if we do tail calls.
* Otherwise, put in NOPs so that it can be skipped when we are
@@ -87,8 +90,6 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_NOP());
}
-#define BPF_TAILCALL_PROLOGUE_SIZE 8
-
if (bpf_has_stack_frame(ctx)) {
/*
* We need a stack frame, but we don't necessarily need to
@@ -217,6 +218,10 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
*/
int b2p_bpf_array = b2p[BPF_REG_2];
int b2p_index = b2p[BPF_REG_3];
+ int bpf_tailcall_prologue_size = 8;
+
+ if (__is_defined(PPC64_ELF_ABI_v2))
+ bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
* if (index >= array->map.max_entries)
@@ -255,13 +260,8 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
/* goto *(prog->bpf_func + prologue_size); */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
-#ifdef PPC64_ELF_ABI_v1
- /* skip past the function descriptor */
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
- FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
-#else
- EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
-#endif
+ FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
/* tear down stack, restore NVRs, ... */