|
12 | 12 | #include <linux/stop_machine.h> |
13 | 13 | #include <asm/patch.h> |
14 | 14 | #include <asm/cfi.h> |
| 15 | +#include <asm/percpu.h> |
15 | 16 | #include "bpf_jit.h" |
16 | 17 |
|
17 | 18 | #define RV_FENTRY_NINSNS 2 |
@@ -1089,6 +1090,24 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, |
1089 | 1090 | emit_or(RV_REG_T1, rd, RV_REG_T1, ctx); |
1090 | 1091 | emit_mv(rd, RV_REG_T1, ctx); |
1091 | 1092 | break; |
| 1093 | + } else if (insn_is_mov_percpu_addr(insn)) { |
| 1094 | + if (rd != rs) |
| 1095 | + emit_mv(rd, rs, ctx); |
| 1096 | +#ifdef CONFIG_SMP |
| 1097 | + /* Load current CPU number in T1 */ |
| 1098 | + emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu), |
| 1099 | + RV_REG_TP, ctx); |
| 1100 | + /* << 3 because offsets are 8 bytes */ |
| 1101 | + emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx); |
| 1102 | + /* Load address of __per_cpu_offset array in T2 */ |
| 1103 | + emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx); |
| 1104 | + /* Add offset of current CPU to __per_cpu_offset */ |
| 1105 | + emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx); |
| 1106 | + /* Load __per_cpu_offset[cpu] in T1 */ |
| 1107 | + emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx); |
| 1108 | + /* Add the offset to Rd */ |
| 1109 | + emit_add(rd, rd, RV_REG_T1, ctx); |
| 1110 | +#endif |
1092 | 1111 | } |
1093 | 1112 | if (imm == 1) { |
1094 | 1113 | /* Special mov32 for zext */ |
@@ -1474,6 +1493,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, |
1474 | 1493 | bool fixed_addr; |
1475 | 1494 | u64 addr; |
1476 | 1495 |
|
| 1496 | + /* Inline calls to bpf_get_smp_processor_id() |
| 1497 | + * |
| 1498 | + * RV_REG_TP holds the address of the current CPU's task_struct and thread_info is |
| 1499 | + * at offset 0 in task_struct. |
| 1500 | + * Load cpu from thread_info: |
| 1501 | + * Set R0 to ((struct thread_info *)(RV_REG_TP))->cpu |
| 1502 | + * |
| 1503 | + * This replicates the implementation of raw_smp_processor_id() on RISCV |
| 1504 | + */ |
| 1505 | + if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) { |
| 1506 | + /* Load current CPU number in R0 */ |
| 1507 | + emit_ld(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu), |
| 1508 | + RV_REG_TP, ctx); |
| 1509 | + break; |
| 1510 | + } |
| 1511 | + |
1477 | 1512 | mark_call(ctx); |
1478 | 1513 | ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, |
1479 | 1514 | &addr, &fixed_addr); |
@@ -2038,3 +2073,18 @@ bool bpf_jit_supports_arena(void) |
2038 | 2073 | { |
2039 | 2074 | return true; |
2040 | 2075 | } |
| 2076 | + |
| 2077 | +bool bpf_jit_supports_percpu_insn(void) |
| 2078 | +{ |
| 2079 | + return true; |
| 2080 | +} |
| 2081 | + |
| 2082 | +bool bpf_jit_inlines_helper_call(s32 imm) |
| 2083 | +{ |
| 2084 | + switch (imm) { |
| 2085 | + case BPF_FUNC_get_smp_processor_id: |
| 2086 | + return true; |
| 2087 | + default: |
| 2088 | + return false; |
| 2089 | + } |
| 2090 | +} |
0 commit comments