Skip to content

Commit 55302bc

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-inline-helpers-in-arm64-and-riscv-jits'
Puranjay Mohan says: ==================== bpf: Inline helpers in arm64 and riscv JITs Changes in v5 -> v6: arm64 v5: https://lore.kernel.org/all/[email protected]/ riscv v2: https://lore.kernel.org/all/[email protected]/ - Combine riscv and arm64 changes in single series - Some coding style fixes Changes in v4 -> v5: v4: https://lore.kernel.org/all/[email protected]/ - Implement the inlining of the bpf_get_smp_processor_id() in the JIT. NOTE: This needs to be based on: https://lore.kernel.org/all/[email protected]/ to be built. Manual run of bpf-ci with this series rebased on above: kernel-patches/bpf#6929 Changes in v3 -> v4: v3: https://lore.kernel.org/all/[email protected]/ - Fix coding style issue related to C89 standards. Changes in v2 -> v3: v2: https://lore.kernel.org/all/[email protected]/ - Fixed the xlated dump of percpu mov to "r0 = &(void __percpu *)(r0)" - Made ARM64 and x86-64 use the same code for inlining. The only difference that remains is the per-cpu address of the cpu_number. Changes in v1 -> v2: v1: https://lore.kernel.org/all/[email protected]/ - Add a patch to inline bpf_get_smp_processor_id() - Fix an issue in MRS instruction encoding as pointed out by Will - Remove CONFIG_SMP check because arm64 kernel always compiles with CONFIG_SMP This series adds the support of internal only per-CPU instructions and inlines the bpf_get_smp_processor_id() helper call for ARM64 and RISC-V BPF JITs. Here is an example of calls to bpf_get_smp_processor_id() and percpu_array_map_lookup_elem() before and after this series on ARM64. BPF ===== BEFORE AFTER -------- ------- int cpu = bpf_get_smp_processor_id(); int cpu = bpf_get_smp_processor_id(); (85) call bpf_get_smp_processor_id#229032 (85) call bpf_get_smp_processor_id#8 p = bpf_map_lookup_elem(map, &zero); p = bpf_map_lookup_elem(map, &zero); (18) r1 = map[id:78] (18) r1 = map[id:153] (18) r2 = map[id:82][0]+65536 (18) r2 = map[id:157][0]+65536 (85) call percpu_array_map_lookup_elem#313512 (07) r1 += 496 (61) r0 = *(u32 *)(r2 +0) (35) if r0 >= 0x1 goto pc+5 (67) r0 <<= 3 (0f) r0 += r1 (79) r0 = *(u64 *)(r0 +0) (bf) r0 = &(void __percpu *)(r0) (05) goto pc+1 (b7) r0 = 0 ARM64 JIT =========== BEFORE AFTER -------- ------- int cpu = bpf_get_smp_processor_id(); int cpu = bpf_get_smp_processor_id(); mov x10, #0xfffffffffffff4d0 mrs x10, sp_el0 movk x10, #0x802b, lsl #16 ldr w7, [x10, #24] movk x10, #0x8000, lsl #32 blr x10 add x7, x0, #0x0 p = bpf_map_lookup_elem(map, &zero); p = bpf_map_lookup_elem(map, &zero); mov x0, #0xffff0003ffffffff mov x0, #0xffff0003ffffffff movk x0, #0xce5c, lsl #16 movk x0, #0xe0f3, lsl #16 movk x0, #0xca00 movk x0, #0x7c00 mov x1, #0xffff8000ffffffff mov x1, #0xffff8000ffffffff movk x1, #0x8bdb, lsl #16 movk x1, #0xb0c7, lsl #16 movk x1, #0x6000 movk x1, #0xe000 mov x10, #0xffffffffffff3ed0 add x0, x0, #0x1f0 movk x10, #0x802d, lsl #16 ldr w7, [x1] movk x10, #0x8000, lsl #32 cmp x7, #0x1 blr x10 b.cs 0x0000000000000090 add x7, x0, #0x0 lsl x7, x7, #3 add x7, x7, x0 ldr x7, [x7] mrs x10, tpidr_el1 add x7, x7, x10 b 0x0000000000000094 mov x7, #0x0 Performance improvement found using benchmark[1] ./benchs/run_bench_trigger.sh glob-arr-inc arr-inc hash-inc +---------------+-------------------+-------------------+--------------+ | Name | Before | After | % change | |---------------+-------------------+-------------------+--------------| | glob-arr-inc | 23.380 ± 1.675M/s | 25.893 ± 0.026M/s | + 10.74% | | arr-inc | 23.928 ± 0.034M/s | 25.213 ± 0.063M/s | + 5.37% | | hash-inc | 12.352 ± 0.005M/s | 12.609 ± 0.013M/s | + 2.08% | +---------------+-------------------+-------------------+--------------+ [1] anakryiko/linux@8dec900975ef RISCV64 JIT output for `call bpf_get_smp_processor_id` ======================================================= Before After -------- ------- auipc t1,0x848c ld a5,32(tp) jalr 604(t1) mv a5,a0 Benchmark using [1] on Qemu. ./benchs/run_bench_trigger.sh glob-arr-inc arr-inc hash-inc +---------------+------------------+------------------+--------------+ | Name | Before | After | % change | |---------------+------------------+------------------+--------------| | glob-arr-inc | 1.077 ± 0.006M/s | 1.336 ± 0.010M/s | + 24.04% | | arr-inc | 1.078 ± 0.002M/s | 1.332 ± 0.015M/s | + 23.56% | | hash-inc | 0.494 ± 0.004M/s | 0.653 ± 0.001M/s | + 32.18% | +---------------+------------------+------------------+--------------+ ==================== Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents f122668 + 75fe4c0 commit 55302bc

File tree

8 files changed

+132
-0
lines changed

8 files changed

+132
-0
lines changed

arch/arm64/include/asm/insn.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,12 @@ enum aarch64_insn_special_register {
135135
AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210
136136
};
137137

138+
enum aarch64_insn_system_register {
139+
AARCH64_INSN_SYSREG_TPIDR_EL1 = 0x4684,
140+
AARCH64_INSN_SYSREG_TPIDR_EL2 = 0x6682,
141+
AARCH64_INSN_SYSREG_SP_EL0 = 0x4208,
142+
};
143+
138144
enum aarch64_insn_variant {
139145
AARCH64_INSN_VARIANT_32BIT,
140146
AARCH64_INSN_VARIANT_64BIT
@@ -686,6 +692,8 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
686692
}
687693
#endif
688694
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
695+
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
696+
enum aarch64_insn_system_register sysreg);
689697

690698
s32 aarch64_get_branch_offset(u32 insn);
691699
u32 aarch64_set_branch_offset(u32 insn, s32 offset);

arch/arm64/lib/insn.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1515,3 +1515,14 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
15151515

15161516
return insn;
15171517
}
1518+
1519+
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
1520+
enum aarch64_insn_system_register sysreg)
1521+
{
1522+
u32 insn = aarch64_insn_get_mrs_value();
1523+
1524+
insn &= ~GENMASK(19, 0);
1525+
insn |= sysreg << 5;
1526+
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT,
1527+
insn, result);
1528+
}

arch/arm64/net/bpf_jit.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -297,4 +297,12 @@
297297
#define A64_ADR(Rd, offset) \
298298
aarch64_insn_gen_adr(0, offset, Rd, AARCH64_INSN_ADR_TYPE_ADR)
299299

300+
/* MRS */
301+
#define A64_MRS_TPIDR_EL1(Rt) \
302+
aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL1)
303+
#define A64_MRS_TPIDR_EL2(Rt) \
304+
aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_TPIDR_EL2)
305+
#define A64_MRS_SP_EL0(Rt) \
306+
aarch64_insn_gen_mrs(Rt, AARCH64_INSN_SYSREG_SP_EL0)
307+
300308
#endif /* _BPF_JIT_H */

arch/arm64/net/bpf_jit_comp.c

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -890,6 +890,15 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
890890
emit(A64_ORR(1, tmp, dst, tmp), ctx);
891891
emit(A64_MOV(1, dst, tmp), ctx);
892892
break;
893+
} else if (insn_is_mov_percpu_addr(insn)) {
894+
if (dst != src)
895+
emit(A64_MOV(1, dst, src), ctx);
896+
if (cpus_have_cap(ARM64_HAS_VIRT_HOST_EXTN))
897+
emit(A64_MRS_TPIDR_EL2(tmp), ctx);
898+
else
899+
emit(A64_MRS_TPIDR_EL1(tmp), ctx);
900+
emit(A64_ADD(1, dst, dst, tmp), ctx);
901+
break;
893902
}
894903
switch (insn->off) {
895904
case 0:
@@ -1219,6 +1228,21 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
12191228
const u8 r0 = bpf2a64[BPF_REG_0];
12201229
bool func_addr_fixed;
12211230
u64 func_addr;
1231+
u32 cpu_offset;
1232+
1233+
/* Implement helper call to bpf_get_smp_processor_id() inline */
1234+
if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
1235+
cpu_offset = offsetof(struct thread_info, cpu);
1236+
1237+
emit(A64_MRS_SP_EL0(tmp), ctx);
1238+
if (is_lsi_offset(cpu_offset, 2)) {
1239+
emit(A64_LDR32I(r0, tmp, cpu_offset), ctx);
1240+
} else {
1241+
emit_a64_mov_i(1, tmp2, cpu_offset, ctx);
1242+
emit(A64_LDR32(r0, tmp, tmp2), ctx);
1243+
}
1244+
break;
1245+
}
12221246

12231247
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
12241248
&func_addr, &func_addr_fixed);
@@ -2559,6 +2583,21 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
25592583
return true;
25602584
}
25612585

2586+
bool bpf_jit_supports_percpu_insn(void)
2587+
{
2588+
return true;
2589+
}
2590+
2591+
bool bpf_jit_inlines_helper_call(s32 imm)
2592+
{
2593+
switch (imm) {
2594+
case BPF_FUNC_get_smp_processor_id:
2595+
return true;
2596+
default:
2597+
return false;
2598+
}
2599+
}
2600+
25622601
void bpf_jit_free(struct bpf_prog *prog)
25632602
{
25642603
if (prog->jited) {

arch/riscv/net/bpf_jit_comp64.c

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include <linux/stop_machine.h>
1313
#include <asm/patch.h>
1414
#include <asm/cfi.h>
15+
#include <asm/percpu.h>
1516
#include "bpf_jit.h"
1617

1718
#define RV_FENTRY_NINSNS 2
@@ -1089,6 +1090,24 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
10891090
emit_or(RV_REG_T1, rd, RV_REG_T1, ctx);
10901091
emit_mv(rd, RV_REG_T1, ctx);
10911092
break;
1093+
} else if (insn_is_mov_percpu_addr(insn)) {
1094+
if (rd != rs)
1095+
emit_mv(rd, rs, ctx);
1096+
#ifdef CONFIG_SMP
1097+
/* Load current CPU number in T1 */
1098+
emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
1099+
RV_REG_TP, ctx);
1100+
/* << 3 because offsets are 8 bytes */
1101+
emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx);
1102+
/* Load address of __per_cpu_offset array in T2 */
1103+
emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
1104+
/* Add offset of current CPU to __per_cpu_offset */
1105+
emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx);
1106+
/* Load __per_cpu_offset[cpu] in T1 */
1107+
emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
1108+
/* Add the offset to Rd */
1109+
emit_add(rd, rd, RV_REG_T1, ctx);
1110+
#endif
10921111
}
10931112
if (imm == 1) {
10941113
/* Special mov32 for zext */
@@ -1474,6 +1493,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
14741493
bool fixed_addr;
14751494
u64 addr;
14761495

1496+
/* Inline calls to bpf_get_smp_processor_id()
1497+
*
1498+
* RV_REG_TP holds the address of the current CPU's task_struct and thread_info is
1499+
* at offset 0 in task_struct.
1500+
* Load cpu from thread_info:
1501+
* Set R0 to ((struct thread_info *)(RV_REG_TP))->cpu
1502+
*
1503+
* This replicates the implementation of raw_smp_processor_id() on RISCV
1504+
*/
1505+
if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
1506+
/* Load current CPU number in R0 */
1507+
emit_ld(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
1508+
RV_REG_TP, ctx);
1509+
break;
1510+
}
1511+
14771512
mark_call(ctx);
14781513
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
14791514
&addr, &fixed_addr);
@@ -2038,3 +2073,18 @@ bool bpf_jit_supports_arena(void)
20382073
{
20392074
return true;
20402075
}
2076+
2077+
bool bpf_jit_supports_percpu_insn(void)
2078+
{
2079+
return true;
2080+
}
2081+
2082+
bool bpf_jit_inlines_helper_call(s32 imm)
2083+
{
2084+
switch (imm) {
2085+
case BPF_FUNC_get_smp_processor_id:
2086+
return true;
2087+
default:
2088+
return false;
2089+
}
2090+
}

include/linux/filter.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -993,6 +993,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
993993
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
994994
void bpf_jit_compile(struct bpf_prog *prog);
995995
bool bpf_jit_needs_zext(void);
996+
bool bpf_jit_inlines_helper_call(s32 imm);
996997
bool bpf_jit_supports_subprog_tailcalls(void);
997998
bool bpf_jit_supports_percpu_insn(void);
998999
bool bpf_jit_supports_kfunc_call(void);

kernel/bpf/core.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2941,6 +2941,17 @@ bool __weak bpf_jit_needs_zext(void)
29412941
return false;
29422942
}
29432943

2944+
/* Return true if the JIT inlines the call to the helper corresponding to
2945+
* the imm.
2946+
*
2947+
* The verifier will not patch the insn->imm for the call to the helper if
2948+
* this returns true.
2949+
*/
2950+
bool __weak bpf_jit_inlines_helper_call(s32 imm)
2951+
{
2952+
return false;
2953+
}
2954+
29442955
/* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
29452956
bool __weak bpf_jit_supports_subprog_tailcalls(void)
29462957
{

kernel/bpf/verifier.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19996,6 +19996,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
1999619996
goto next_insn;
1999719997
}
1999819998

19999+
/* Skip inlining the helper call if the JIT does it. */
20000+
if (bpf_jit_inlines_helper_call(insn->imm))
20001+
goto next_insn;
20002+
1999920003
if (insn->imm == BPF_FUNC_get_route_realm)
2000020004
prog->dst_needed = 1;
2000120005
if (insn->imm == BPF_FUNC_get_prandom_u32)

0 commit comments

Comments
 (0)