mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
Merge branch 'emit-endbr-bti-instructions-for-indirect'
Xu Kuohai says: ==================== emit ENDBR/BTI instructions for indirect On architectures with CFI protection enabled that require landing pad instructions at indirect jump targets, such as x86 with CET/IBT enabled and arm64 with BTI enabled, kernel panics when an indirect jump lands on a target without landing pad. Therefore, the JIT must emit landing pad instructions for indirect jump targets. The verifier already recognizes which instructions are indirect jump targets during the verification phase. So we can store this information in env->insn_aux_data and pass it to the JIT as new parameter, allowing the JIT to consult env->insn_aux_data to determine which instructions are indirect jump targets. During JIT, constants blinding is performed. It rewrites the private copy of instructions for the JITed program, but it does not adjust the global env->insn_aux_data array. As a result, after constants blinding, the instruction indexes used by JIT may no longer match the indexes in env->insn_aux_data, so the JIT can not use env->insn_aux_data directly. To avoid this mismatch, and given that all existing arch-specific JITs already implement constants blinding with largely duplicated code, move constants blinding from JIT to generic code. v15: - Rebase and target bpf tree - Resotre subprog_start of the fake 'exit' subprog on failure - Fix wrong function name used in comment v14: https://lore.kernel.org/all/cover.1776062885.git.xukuohai@hotmail.com/ - Rebase - Fix comment style - Fix incorrect variable and function name used in commit message v13: https://lore.kernel.org/bpf/20260411133847.1042658-1-xukuohai@huaweicloud.com - Use vmalloc to allocate memory for insn_aux_data copies to match with vfree - Do not free the copied memory of insn_aux_data when restoring from failure - Code cleanup v12: https://lore.kernel.org/bpf/20260403132811.753894-1-xukuohai@huaweicloud.com - Restore env->insn_aux_data on JIT failure - Fix incorrect error code sign (-EFAULT vs EFAULT) - Fix incorrect prog used in the restore path v11: https://lore.kernel.org/bpf/20260403090915.473493-1-xukuohai@huaweicloud.com - Restore env->subprog_info after jit_subprogs() fails - Clear prog->jit_requested and prog->blinding_requested on failure - Use the actual env->insn_aux_data size in clear_insn_aux_data() on failure v10: https://lore.kernel.org/bpf/20260324122052.342751-1-xukuohai@huaweicloud.com - Fix the incorrect call_imm restore in jit_subprogs - Define a dummy void version of bpf_jit_prog_release_other and bpf_patch_insn_data when the corresponding config is not set - Remove the unnecessary #ifdef in x86_64 JIT (Leon Hwang) v9: https://lore.kernel.org/bpf/20260312170255.3427799-1-xukuohai@huaweicloud.com - Make constant blinding available for classic bpf (Eduard) - Clear prog->bpf_func, prog->jited ... on the error path of extra pass (Eduard) - Fix spelling errors and remove unused parameter (Anton Protopopov) v8: https://lore.kernel.org/bpf/20260309140044.2652538-1-xukuohai@huaweicloud.com - Define void bpf_jit_blind_constants() function when CONFIG_BPF_JIT is not set - Move indirect_target fixup for insn patching from bpf_jit_blind_constants() to adjust_insn_aux_data() v7: https://lore.kernel.org/bpf/20260307103949.2340104-1-xukuohai@huaweicloud.com - Move constants blinding logic back to bpf/core.c - Compute ip address before switch statement in x86 JIT - Clear JIT state from error path on arm64 and loongarch v6: https://lore.kernel.org/bpf/20260306102329.2056216-1-xukuohai@huaweicloud.com - Move constants blinding from JIT to verifier - Move call to bpf_prog_select_runtime from bpf_prog_load to verifier v5: https://lore.kernel.org/bpf/20260302102726.1126019-1-xukuohai@huaweicloud.com - Switch to pass env to JIT directly to get rid of copying private insn_aux_data for each prog v4: https://lore.kernel.org/all/20260114093914.2403982-1-xukuohai@huaweicloud.com - Switch to the approach proposed by Eduard, using insn_aux_data to identify indirect jump targets, and emit ENDBR on x86 v3: https://lore.kernel.org/bpf/20251227081033.240336-1-xukuohai@huaweicloud.com - Get rid of unnecessary enum definition (Yonghong Song, Anton Protopopov) v2: https://lore.kernel.org/bpf/20251223085447.139301-1-xukuohai@huaweicloud.com - Exclude instruction arrays not used for indirect jumps (Anton Protopopov) v1: https://lore.kernel.org/bpf/20251127140318.3944249-1-xukuohai@huaweicloud.com ==================== Link: https://patch.msgid.link/20260416064341.151802-1-xukuohai@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
1cedfe17ba
|
|
@ -79,7 +79,6 @@ struct arc_jit_data {
|
|||
* The JIT pertinent context that is used by different functions.
|
||||
*
|
||||
* prog: The current eBPF program being handled.
|
||||
* orig_prog: The original eBPF program before any possible change.
|
||||
* jit: The JIT buffer and its length.
|
||||
* bpf_header: The JITed program header. "jit.buf" points inside it.
|
||||
* emit: If set, opcodes are written to memory; else, a dry-run.
|
||||
|
|
@ -94,12 +93,10 @@ struct arc_jit_data {
|
|||
* need_extra_pass: A forecast if an "extra_pass" will occur.
|
||||
* is_extra_pass: Indicates if the current pass is an extra pass.
|
||||
* user_bpf_prog: True, if VM opcodes come from a real program.
|
||||
* blinded: True if "constant blinding" step returned a new "prog".
|
||||
* success: Indicates if the whole JIT went OK.
|
||||
*/
|
||||
struct jit_context {
|
||||
struct bpf_prog *prog;
|
||||
struct bpf_prog *orig_prog;
|
||||
struct jit_buffer jit;
|
||||
struct bpf_binary_header *bpf_header;
|
||||
bool emit;
|
||||
|
|
@ -114,7 +111,6 @@ struct jit_context {
|
|||
bool need_extra_pass;
|
||||
bool is_extra_pass;
|
||||
bool user_bpf_prog;
|
||||
bool blinded;
|
||||
bool success;
|
||||
};
|
||||
|
||||
|
|
@ -161,13 +157,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
|
|||
{
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
ctx->orig_prog = prog;
|
||||
|
||||
/* If constant blinding was requested but failed, scram. */
|
||||
ctx->prog = bpf_jit_blind_constants(prog);
|
||||
if (IS_ERR(ctx->prog))
|
||||
return PTR_ERR(ctx->prog);
|
||||
ctx->blinded = (ctx->prog != ctx->orig_prog);
|
||||
ctx->prog = prog;
|
||||
|
||||
/* If the verifier doesn't zero-extend, then we have to do it. */
|
||||
ctx->do_zext = !ctx->prog->aux->verifier_zext;
|
||||
|
|
@ -214,14 +204,6 @@ static inline void maybe_free(struct jit_context *ctx, void **mem)
|
|||
*/
|
||||
static void jit_ctx_cleanup(struct jit_context *ctx)
|
||||
{
|
||||
if (ctx->blinded) {
|
||||
/* if all went well, release the orig_prog. */
|
||||
if (ctx->success)
|
||||
bpf_jit_prog_release_other(ctx->prog, ctx->orig_prog);
|
||||
else
|
||||
bpf_jit_prog_release_other(ctx->orig_prog, ctx->prog);
|
||||
}
|
||||
|
||||
maybe_free(ctx, (void **)&ctx->bpf2insn);
|
||||
maybe_free(ctx, (void **)&ctx->jit_data);
|
||||
|
||||
|
|
@ -229,12 +211,19 @@ static void jit_ctx_cleanup(struct jit_context *ctx)
|
|||
ctx->bpf2insn_valid = false;
|
||||
|
||||
/* Freeing "bpf_header" is enough. "jit.buf" is a sub-array of it. */
|
||||
if (!ctx->success && ctx->bpf_header) {
|
||||
bpf_jit_binary_free(ctx->bpf_header);
|
||||
ctx->bpf_header = NULL;
|
||||
ctx->jit.buf = NULL;
|
||||
ctx->jit.index = 0;
|
||||
ctx->jit.len = 0;
|
||||
if (!ctx->success) {
|
||||
if (ctx->bpf_header) {
|
||||
bpf_jit_binary_free(ctx->bpf_header);
|
||||
ctx->bpf_header = NULL;
|
||||
ctx->jit.buf = NULL;
|
||||
ctx->jit.index = 0;
|
||||
ctx->jit.len = 0;
|
||||
}
|
||||
if (ctx->is_extra_pass) {
|
||||
ctx->prog->bpf_func = NULL;
|
||||
ctx->prog->jited = 0;
|
||||
ctx->prog->jited_len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ctx->emit = false;
|
||||
|
|
@ -1411,7 +1400,7 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
|
|||
* (re)locations involved that their addresses are not known
|
||||
* during the first run.
|
||||
*/
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
vm_dump(prog);
|
||||
|
||||
|
|
|
|||
|
|
@ -2142,11 +2142,9 @@ bool bpf_jit_needs_zext(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
struct bpf_binary_header *header;
|
||||
bool tmp_blinded = false;
|
||||
struct jit_ctx ctx;
|
||||
unsigned int tmp_idx;
|
||||
unsigned int image_size;
|
||||
|
|
@ -2156,20 +2154,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
* the interpreter.
|
||||
*/
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
/* If constant blinding was enabled and we failed during blinding
|
||||
* then we must fall back to the interpreter. Otherwise, we save
|
||||
* the new JITed code.
|
||||
*/
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.prog = prog;
|
||||
|
|
@ -2179,10 +2164,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
* we must fall back to the interpreter
|
||||
*/
|
||||
ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
|
||||
if (ctx.offsets == NULL) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
if (ctx.offsets == NULL)
|
||||
return prog;
|
||||
|
||||
/* 1) fake pass to find in the length of the JITed code,
|
||||
* to compute ctx->offsets and other context variables
|
||||
|
|
@ -2194,10 +2177,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
* being successful in the second pass, so just fall back
|
||||
* to the interpreter.
|
||||
*/
|
||||
if (build_body(&ctx)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(&ctx))
|
||||
goto out_off;
|
||||
}
|
||||
|
||||
tmp_idx = ctx.idx;
|
||||
build_prologue(&ctx);
|
||||
|
|
@ -2213,10 +2194,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.idx += ctx.imm_count;
|
||||
if (ctx.imm_count) {
|
||||
ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
|
||||
if (ctx.imms == NULL) {
|
||||
prog = orig_prog;
|
||||
if (ctx.imms == NULL)
|
||||
goto out_off;
|
||||
}
|
||||
}
|
||||
#else
|
||||
/* there's nothing about the epilogue on ARMv7 */
|
||||
|
|
@ -2238,10 +2217,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
/* Not able to allocate memory for the structure then
|
||||
* we must fall back to the interpretation
|
||||
*/
|
||||
if (header == NULL) {
|
||||
prog = orig_prog;
|
||||
if (header == NULL)
|
||||
goto out_imms;
|
||||
}
|
||||
|
||||
/* 2.) Actual pass to generate final JIT code */
|
||||
ctx.target = (u32 *) image_ptr;
|
||||
|
|
@ -2278,16 +2255,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
#endif
|
||||
out_off:
|
||||
kfree(ctx.offsets);
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
|
||||
return prog;
|
||||
|
||||
out_free:
|
||||
image_ptr = NULL;
|
||||
bpf_jit_binary_free(header);
|
||||
prog = orig_prog;
|
||||
goto out_imms;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1197,8 +1197,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
|||
* >0 - successfully JITed a 16-byte eBPF instruction.
|
||||
* <0 - failed to JIT.
|
||||
*/
|
||||
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
||||
bool extra_pass)
|
||||
static int build_insn(const struct bpf_verifier_env *env, const struct bpf_insn *insn,
|
||||
struct jit_ctx *ctx, bool extra_pass)
|
||||
{
|
||||
const u8 code = insn->code;
|
||||
u8 dst = bpf2a64[insn->dst_reg];
|
||||
|
|
@ -1223,6 +1223,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
int ret;
|
||||
bool sign_extend;
|
||||
|
||||
if (bpf_insn_is_indirect_target(env, ctx->prog, i))
|
||||
emit_bti(A64_BTI_J, ctx);
|
||||
|
||||
switch (code) {
|
||||
/* dst = src */
|
||||
case BPF_ALU | BPF_MOV | BPF_X:
|
||||
|
|
@ -1898,7 +1901,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
||||
static int build_body(struct bpf_verifier_env *env, struct jit_ctx *ctx, bool extra_pass)
|
||||
{
|
||||
const struct bpf_prog *prog = ctx->prog;
|
||||
int i;
|
||||
|
|
@ -1917,7 +1920,7 @@ static int build_body(struct jit_ctx *ctx, bool extra_pass)
|
|||
int ret;
|
||||
|
||||
ctx->offset[i] = ctx->idx;
|
||||
ret = build_insn(insn, ctx, extra_pass);
|
||||
ret = build_insn(env, insn, ctx, extra_pass);
|
||||
if (ret > 0) {
|
||||
i++;
|
||||
ctx->offset[i] = ctx->idx;
|
||||
|
|
@ -2000,17 +2003,15 @@ struct arm64_jit_data {
|
|||
struct jit_ctx ctx;
|
||||
};
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
int image_size, prog_size, extable_size, extable_align, extable_offset;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
struct bpf_binary_header *header;
|
||||
struct bpf_binary_header *ro_header = NULL;
|
||||
struct arm64_jit_data *jit_data;
|
||||
void __percpu *priv_stack_ptr = NULL;
|
||||
bool was_classic = bpf_prog_was_classic(prog);
|
||||
int priv_stack_alloc_sz;
|
||||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
struct jit_ctx ctx;
|
||||
u8 *image_ptr;
|
||||
|
|
@ -2019,26 +2020,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
int exentry_idx;
|
||||
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
/* If blinding was requested and we failed during blinding,
|
||||
* we must fall back to the interpreter.
|
||||
*/
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
if (!jit_data)
|
||||
return prog;
|
||||
prog->aux->jit_data = jit_data;
|
||||
}
|
||||
priv_stack_ptr = prog->aux->priv_stack_ptr;
|
||||
|
|
@ -2050,10 +2038,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 16) +
|
||||
2 * PRIV_STACK_GUARD_SZ;
|
||||
priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 16, GFP_KERNEL);
|
||||
if (!priv_stack_ptr) {
|
||||
prog = orig_prog;
|
||||
if (!priv_stack_ptr)
|
||||
goto out_priv_stack;
|
||||
}
|
||||
|
||||
priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
|
||||
prog->aux->priv_stack_ptr = priv_stack_ptr;
|
||||
|
|
@ -2073,10 +2059,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.prog = prog;
|
||||
|
||||
ctx.offset = kvzalloc_objs(int, prog->len + 1);
|
||||
if (ctx.offset == NULL) {
|
||||
prog = orig_prog;
|
||||
if (ctx.offset == NULL)
|
||||
goto out_off;
|
||||
}
|
||||
|
||||
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
|
||||
ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
|
||||
|
|
@ -2089,15 +2073,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
* BPF line info needs ctx->offset[i] to be the offset of
|
||||
* instruction[i] in jited image, so build prologue first.
|
||||
*/
|
||||
if (build_prologue(&ctx, was_classic)) {
|
||||
prog = orig_prog;
|
||||
if (build_prologue(&ctx, was_classic))
|
||||
goto out_off;
|
||||
}
|
||||
|
||||
if (build_body(&ctx, extra_pass)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(env, &ctx, extra_pass))
|
||||
goto out_off;
|
||||
}
|
||||
|
||||
ctx.epilogue_offset = ctx.idx;
|
||||
build_epilogue(&ctx, was_classic);
|
||||
|
|
@ -2115,10 +2095,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr,
|
||||
sizeof(u64), &header, &image_ptr,
|
||||
jit_fill_hole);
|
||||
if (!ro_header) {
|
||||
prog = orig_prog;
|
||||
if (!ro_header)
|
||||
goto out_off;
|
||||
}
|
||||
|
||||
/* Pass 2: Determine jited position and result for each instruction */
|
||||
|
||||
|
|
@ -2146,10 +2124,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
/* Dont write body instructions to memory for now */
|
||||
ctx.write = false;
|
||||
|
||||
if (build_body(&ctx, extra_pass)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(env, &ctx, extra_pass))
|
||||
goto out_free_hdr;
|
||||
}
|
||||
|
||||
ctx.epilogue_offset = ctx.idx;
|
||||
ctx.exentry_idx = exentry_idx;
|
||||
|
|
@ -2157,20 +2133,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.write = true;
|
||||
|
||||
/* Pass 3: Adjust jump offset and write final image */
|
||||
if (build_body(&ctx, extra_pass) ||
|
||||
WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(env, &ctx, extra_pass) ||
|
||||
WARN_ON_ONCE(ctx.idx != ctx.epilogue_offset))
|
||||
goto out_free_hdr;
|
||||
}
|
||||
|
||||
build_epilogue(&ctx, was_classic);
|
||||
build_plt(&ctx);
|
||||
|
||||
/* Extra pass to validate JITed code. */
|
||||
if (validate_ctx(&ctx)) {
|
||||
prog = orig_prog;
|
||||
if (validate_ctx(&ctx))
|
||||
goto out_free_hdr;
|
||||
}
|
||||
|
||||
/* update the real prog size */
|
||||
prog_size = sizeof(u32) * ctx.idx;
|
||||
|
|
@ -2187,16 +2159,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
if (extra_pass && ctx.idx > jit_data->ctx.idx) {
|
||||
pr_err_once("multi-func JIT bug %d > %d\n",
|
||||
ctx.idx, jit_data->ctx.idx);
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
goto out_free_hdr;
|
||||
}
|
||||
if (WARN_ON(bpf_jit_binary_pack_finalize(ro_header, header))) {
|
||||
/* ro_header has been freed */
|
||||
/* ro_header and header has been freed */
|
||||
ro_header = NULL;
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
header = NULL;
|
||||
goto out_free_hdr;
|
||||
}
|
||||
} else {
|
||||
jit_data->ctx = ctx;
|
||||
|
|
@ -2233,13 +2202,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
kfree(jit_data);
|
||||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
|
||||
return prog;
|
||||
|
||||
out_free_hdr:
|
||||
if (extra_pass) {
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
}
|
||||
if (header) {
|
||||
bpf_arch_text_copy(&ro_header->size, &header->size,
|
||||
sizeof(header->size));
|
||||
|
|
|
|||
|
|
@ -1920,45 +1920,28 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
|
|||
return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
bool tmp_blinded = false, extra_pass = false;
|
||||
bool extra_pass = false;
|
||||
u8 *image_ptr, *ro_image_ptr;
|
||||
int image_size, prog_size, extable_size;
|
||||
struct jit_ctx ctx;
|
||||
struct jit_data *jit_data;
|
||||
struct bpf_binary_header *header;
|
||||
struct bpf_binary_header *ro_header;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
|
||||
/*
|
||||
* If BPF JIT was not enabled then we must fall back to
|
||||
* the interpreter.
|
||||
*/
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
/*
|
||||
* If blinding was requested and we failed during blinding,
|
||||
* we must fall back to the interpreter. Otherwise, we save
|
||||
* the new JITed code.
|
||||
*/
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
if (!jit_data)
|
||||
return prog;
|
||||
prog->aux->jit_data = jit_data;
|
||||
}
|
||||
if (jit_data->ctx.offset) {
|
||||
|
|
@ -1978,17 +1961,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
|
||||
|
||||
ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
|
||||
if (ctx.offset == NULL) {
|
||||
prog = orig_prog;
|
||||
if (ctx.offset == NULL)
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
/* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
|
||||
build_prologue(&ctx);
|
||||
if (build_body(&ctx, extra_pass)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(&ctx, extra_pass))
|
||||
goto out_offset;
|
||||
}
|
||||
ctx.epilogue_offset = ctx.idx;
|
||||
build_epilogue(&ctx);
|
||||
|
||||
|
|
@ -2004,10 +1983,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
/* Now we know the size of the structure to make */
|
||||
ro_header = bpf_jit_binary_pack_alloc(image_size, &ro_image_ptr, sizeof(u32),
|
||||
&header, &image_ptr, jit_fill_hole);
|
||||
if (!ro_header) {
|
||||
prog = orig_prog;
|
||||
if (!ro_header)
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
/* 2. Now, the actual pass to generate final JIT code */
|
||||
/*
|
||||
|
|
@ -2027,17 +2004,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.num_exentries = 0;
|
||||
|
||||
build_prologue(&ctx);
|
||||
if (build_body(&ctx, extra_pass)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(&ctx, extra_pass))
|
||||
goto out_free;
|
||||
}
|
||||
build_epilogue(&ctx);
|
||||
|
||||
/* 3. Extra pass to validate JITed code */
|
||||
if (validate_ctx(&ctx)) {
|
||||
prog = orig_prog;
|
||||
if (validate_ctx(&ctx))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* And we're done */
|
||||
if (bpf_jit_enable > 1)
|
||||
|
|
@ -2050,9 +2023,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
goto out_free;
|
||||
}
|
||||
if (WARN_ON(bpf_jit_binary_pack_finalize(ro_header, header))) {
|
||||
/* ro_header has been freed */
|
||||
/* ro_header and header have been freed */
|
||||
ro_header = NULL;
|
||||
prog = orig_prog;
|
||||
header = NULL;
|
||||
goto out_free;
|
||||
}
|
||||
/*
|
||||
|
|
@ -2084,13 +2057,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
|
||||
|
||||
return prog;
|
||||
|
||||
out_free:
|
||||
if (extra_pass) {
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
}
|
||||
|
||||
if (header) {
|
||||
bpf_arch_text_copy(&ro_header->size, &header->size, sizeof(header->size));
|
||||
bpf_jit_binary_pack_free(ro_header, header);
|
||||
|
|
|
|||
|
|
@ -909,12 +909,10 @@ bool bpf_jit_needs_zext(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
struct bpf_binary_header *header = NULL;
|
||||
struct jit_context ctx;
|
||||
bool tmp_blinded = false;
|
||||
unsigned int tmp_idx;
|
||||
unsigned int image_size;
|
||||
u8 *image_ptr;
|
||||
|
|
@ -925,19 +923,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
* the interpreter.
|
||||
*/
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
/*
|
||||
* If constant blinding was enabled and we failed during blinding
|
||||
* then we must fall back to the interpreter. Otherwise, we save
|
||||
* the new JITed code.
|
||||
*/
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
memset(&ctx, 0, sizeof(ctx));
|
||||
ctx.program = prog;
|
||||
|
|
@ -1025,14 +1011,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog->jited_len = image_size;
|
||||
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
kfree(ctx.descriptors);
|
||||
return prog;
|
||||
|
||||
out_err:
|
||||
prog = orig_prog;
|
||||
if (header)
|
||||
bpf_jit_binary_free(header);
|
||||
goto out;
|
||||
|
|
|
|||
|
|
@ -41,33 +41,22 @@ bool bpf_jit_needs_zext(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
unsigned int prog_size = 0, extable_size = 0;
|
||||
bool tmp_blinded = false, extra_pass = false;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
bool extra_pass = false;
|
||||
int pass = 0, prev_ninsns = 0, prologue_len, i;
|
||||
struct hppa_jit_data *jit_data;
|
||||
struct hppa_jit_context *ctx;
|
||||
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
if (!jit_data)
|
||||
return prog;
|
||||
prog->aux->jit_data = jit_data;
|
||||
}
|
||||
|
||||
|
|
@ -81,10 +70,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
|
||||
ctx->prog = prog;
|
||||
ctx->offset = kzalloc_objs(int, prog->len);
|
||||
if (!ctx->offset) {
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
if (!ctx->offset)
|
||||
goto out_err;
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
prev_ninsns += 20;
|
||||
ctx->offset[i] = prev_ninsns;
|
||||
|
|
@ -93,10 +80,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
for (i = 0; i < NR_JIT_ITERATIONS; i++) {
|
||||
pass++;
|
||||
ctx->ninsns = 0;
|
||||
if (build_body(ctx, extra_pass, ctx->offset)) {
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
if (build_body(ctx, extra_pass, ctx->offset))
|
||||
goto out_err;
|
||||
ctx->body_len = ctx->ninsns;
|
||||
bpf_jit_build_prologue(ctx);
|
||||
ctx->prologue_len = ctx->ninsns - ctx->body_len;
|
||||
|
|
@ -116,10 +101,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
&jit_data->image,
|
||||
sizeof(long),
|
||||
bpf_fill_ill_insns);
|
||||
if (!jit_data->header) {
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
if (!jit_data->header)
|
||||
goto out_err;
|
||||
|
||||
ctx->insns = (u32 *)jit_data->image;
|
||||
/*
|
||||
|
|
@ -134,8 +117,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
|
||||
if (jit_data->header)
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (extable_size)
|
||||
|
|
@ -148,8 +130,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
bpf_jit_build_prologue(ctx);
|
||||
if (build_body(ctx, extra_pass, NULL)) {
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
goto out_err;
|
||||
}
|
||||
bpf_jit_build_epilogue(ctx);
|
||||
|
||||
|
|
@ -160,20 +141,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
{ extern int machine_restart(char *); machine_restart(""); }
|
||||
}
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
if (bpf_jit_binary_lock_ro(jit_data->header)) {
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
goto out_err;
|
||||
}
|
||||
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
|
||||
}
|
||||
|
||||
prog->bpf_func = (void *)ctx->insns;
|
||||
prog->jited = 1;
|
||||
prog->jited_len = prog_size;
|
||||
|
||||
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
if (bpf_jit_binary_lock_ro(jit_data->header)) {
|
||||
bpf_jit_binary_free(jit_data->header);
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
goto out_offset;
|
||||
}
|
||||
prologue_len = ctx->epilogue_offset - ctx->body_len;
|
||||
for (i = 0; i < prog->len; i++)
|
||||
ctx->offset[i] += prologue_len;
|
||||
|
|
@ -183,14 +163,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
kfree(jit_data);
|
||||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
out:
|
||||
|
||||
if (HPPA_JIT_REBOOT)
|
||||
{ extern int machine_restart(char *); machine_restart(""); }
|
||||
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
return prog;
|
||||
|
||||
out_err:
|
||||
if (extra_pass) {
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
}
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
u64 hppa_div64(u64 div, u64 divisor)
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size
|
|||
}
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *fp)
|
||||
{
|
||||
u32 proglen;
|
||||
u32 alloclen;
|
||||
|
|
@ -177,9 +177,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
void __percpu *priv_stack_ptr = NULL;
|
||||
struct bpf_binary_header *fhdr = NULL;
|
||||
struct bpf_binary_header *hdr = NULL;
|
||||
struct bpf_prog *org_fp = fp;
|
||||
struct bpf_prog *tmp_fp = NULL;
|
||||
bool bpf_blinded = false;
|
||||
bool extra_pass = false;
|
||||
u8 *fimage = NULL;
|
||||
u32 *fcode_base = NULL;
|
||||
|
|
@ -187,24 +184,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
u32 fixup_len;
|
||||
|
||||
if (!fp->jit_requested)
|
||||
return org_fp;
|
||||
|
||||
tmp_fp = bpf_jit_blind_constants(org_fp);
|
||||
if (IS_ERR(tmp_fp))
|
||||
return org_fp;
|
||||
|
||||
if (tmp_fp != org_fp) {
|
||||
bpf_blinded = true;
|
||||
fp = tmp_fp;
|
||||
}
|
||||
return fp;
|
||||
|
||||
jit_data = fp->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
fp = org_fp;
|
||||
goto out;
|
||||
}
|
||||
if (!jit_data)
|
||||
return fp;
|
||||
fp->aux->jit_data = jit_data;
|
||||
}
|
||||
|
||||
|
|
@ -219,10 +205,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
priv_stack_alloc_size = round_up(fp->aux->stack_depth, 16) +
|
||||
2 * PRIV_STACK_GUARD_SZ;
|
||||
priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_size, 16, GFP_KERNEL);
|
||||
if (!priv_stack_ptr) {
|
||||
fp = org_fp;
|
||||
if (!priv_stack_ptr)
|
||||
goto out_priv_stack;
|
||||
}
|
||||
|
||||
priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_size);
|
||||
fp->aux->priv_stack_ptr = priv_stack_ptr;
|
||||
|
|
@ -249,10 +233,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
}
|
||||
|
||||
addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
|
||||
if (addrs == NULL) {
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
if (addrs == NULL)
|
||||
goto out_err;
|
||||
|
||||
memset(&cgctx, 0, sizeof(struct codegen_context));
|
||||
bpf_jit_init_reg_mapping(&cgctx);
|
||||
|
|
@ -279,11 +261,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
}
|
||||
|
||||
/* Scouting faux-generate pass 0 */
|
||||
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
|
||||
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false))
|
||||
/* We hit something illegal or unsupported. */
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
* If we have seen a tail call, we need a second pass.
|
||||
|
|
@ -294,10 +274,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
*/
|
||||
if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
|
||||
cgctx.idx = 0;
|
||||
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false)) {
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
if (bpf_jit_build_body(fp, NULL, NULL, &cgctx, addrs, 0, false))
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
bpf_jit_realloc_regs(&cgctx);
|
||||
|
|
@ -318,10 +296,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
|
||||
fhdr = bpf_jit_binary_pack_alloc(alloclen, &fimage, 4, &hdr, &image,
|
||||
bpf_jit_fill_ill_insns);
|
||||
if (!fhdr) {
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
if (!fhdr)
|
||||
goto out_err;
|
||||
|
||||
if (extable_len)
|
||||
fp->aux->extable = (void *)fimage + FUNCTION_DESCR_SIZE + proglen + fixup_len;
|
||||
|
|
@ -340,8 +316,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
extra_pass)) {
|
||||
bpf_arch_text_copy(&fhdr->size, &hdr->size, sizeof(hdr->size));
|
||||
bpf_jit_binary_pack_free(fhdr, hdr);
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
goto out_err;
|
||||
}
|
||||
bpf_jit_build_epilogue(code_base, &cgctx);
|
||||
|
||||
|
|
@ -363,15 +338,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
((u64 *)image)[1] = local_paca->kernel_toc;
|
||||
#endif
|
||||
|
||||
if (!fp->is_func || extra_pass) {
|
||||
if (bpf_jit_binary_pack_finalize(fhdr, hdr))
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
fp->bpf_func = (void *)fimage;
|
||||
fp->jited = 1;
|
||||
fp->jited_len = cgctx.idx * 4 + FUNCTION_DESCR_SIZE;
|
||||
|
||||
if (!fp->is_func || extra_pass) {
|
||||
if (bpf_jit_binary_pack_finalize(fhdr, hdr)) {
|
||||
fp = org_fp;
|
||||
goto out_addrs;
|
||||
}
|
||||
bpf_prog_fill_jited_linfo(fp, addrs);
|
||||
/*
|
||||
* On ABI V1, executable code starts after the function
|
||||
|
|
@ -398,11 +374,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
jit_data->hdr = hdr;
|
||||
}
|
||||
|
||||
out:
|
||||
if (bpf_blinded)
|
||||
bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
|
||||
|
||||
return fp;
|
||||
|
||||
out_err:
|
||||
if (extra_pass) {
|
||||
fp->bpf_func = NULL;
|
||||
fp->jited = 0;
|
||||
fp->jited_len = 0;
|
||||
}
|
||||
goto out_addrs;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -41,32 +41,22 @@ bool bpf_jit_needs_zext(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
unsigned int prog_size = 0, extable_size = 0;
|
||||
bool tmp_blinded = false, extra_pass = false;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
bool extra_pass = false;
|
||||
int pass = 0, prev_ninsns = 0, i;
|
||||
struct rv_jit_data *jit_data;
|
||||
struct rv_jit_context *ctx;
|
||||
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
return prog;
|
||||
}
|
||||
prog->aux->jit_data = jit_data;
|
||||
}
|
||||
|
|
@ -83,15 +73,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx->user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
|
||||
ctx->prog = prog;
|
||||
ctx->offset = kzalloc_objs(int, prog->len);
|
||||
if (!ctx->offset) {
|
||||
prog = orig_prog;
|
||||
if (!ctx->offset)
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
if (build_body(ctx, extra_pass, NULL)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(ctx, extra_pass, NULL))
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
for (i = 0; i < prog->len; i++) {
|
||||
prev_ninsns += 32;
|
||||
|
|
@ -105,10 +91,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
bpf_jit_build_prologue(ctx, bpf_is_subprog(prog));
|
||||
ctx->prologue_len = ctx->ninsns;
|
||||
|
||||
if (build_body(ctx, extra_pass, ctx->offset)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(ctx, extra_pass, ctx->offset))
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
ctx->epilogue_offset = ctx->ninsns;
|
||||
bpf_jit_build_epilogue(ctx);
|
||||
|
|
@ -126,10 +110,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
&jit_data->ro_image, sizeof(u32),
|
||||
&jit_data->header, &jit_data->image,
|
||||
bpf_fill_ill_insns);
|
||||
if (!jit_data->ro_header) {
|
||||
prog = orig_prog;
|
||||
if (!jit_data->ro_header)
|
||||
goto out_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the image(RW) for writing the JITed instructions. But also save
|
||||
|
|
@ -150,7 +132,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
|
||||
if (i == NR_JIT_ITERATIONS) {
|
||||
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
|
||||
prog = orig_prog;
|
||||
goto out_free_hdr;
|
||||
}
|
||||
|
||||
|
|
@ -163,26 +144,27 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx->nexentries = 0;
|
||||
|
||||
bpf_jit_build_prologue(ctx, bpf_is_subprog(prog));
|
||||
if (build_body(ctx, extra_pass, NULL)) {
|
||||
prog = orig_prog;
|
||||
if (build_body(ctx, extra_pass, NULL))
|
||||
goto out_free_hdr;
|
||||
}
|
||||
bpf_jit_build_epilogue(ctx);
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
bpf_jit_dump(prog->len, prog_size, pass, ctx->insns);
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
if (WARN_ON(bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header))) {
|
||||
/* ro_header has been freed */
|
||||
jit_data->ro_header = NULL;
|
||||
jit_data->header = NULL;
|
||||
goto out_free_hdr;
|
||||
}
|
||||
}
|
||||
|
||||
prog->bpf_func = (void *)ctx->ro_insns + cfi_get_offset();
|
||||
prog->jited = 1;
|
||||
prog->jited_len = prog_size - cfi_get_offset();
|
||||
|
||||
if (!prog->is_func || extra_pass) {
|
||||
if (WARN_ON(bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header))) {
|
||||
/* ro_header has been freed */
|
||||
jit_data->ro_header = NULL;
|
||||
prog = orig_prog;
|
||||
goto out_offset;
|
||||
}
|
||||
for (i = 0; i < prog->len; i++)
|
||||
ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
|
||||
bpf_prog_fill_jited_linfo(prog, ctx->offset);
|
||||
|
|
@ -191,14 +173,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
kfree(jit_data);
|
||||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
out:
|
||||
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
return prog;
|
||||
|
||||
out_free_hdr:
|
||||
if (extra_pass) {
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
}
|
||||
if (jit_data->header) {
|
||||
bpf_arch_text_copy(&jit_data->ro_header->size, &jit_data->header->size,
|
||||
sizeof(jit_data->header->size));
|
||||
|
|
|
|||
|
|
@ -2312,38 +2312,22 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit,
|
|||
/*
|
||||
* Compile eBPF program "fp"
|
||||
*/
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *fp)
|
||||
{
|
||||
struct bpf_prog *tmp, *orig_fp = fp;
|
||||
struct bpf_binary_header *header;
|
||||
struct s390_jit_data *jit_data;
|
||||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
struct bpf_jit jit;
|
||||
int pass;
|
||||
|
||||
if (!fp->jit_requested)
|
||||
return orig_fp;
|
||||
|
||||
tmp = bpf_jit_blind_constants(fp);
|
||||
/*
|
||||
* If blinding was requested and we failed during blinding,
|
||||
* we must fall back to the interpreter.
|
||||
*/
|
||||
if (IS_ERR(tmp))
|
||||
return orig_fp;
|
||||
if (tmp != fp) {
|
||||
tmp_blinded = true;
|
||||
fp = tmp;
|
||||
}
|
||||
return fp;
|
||||
|
||||
jit_data = fp->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
fp = orig_fp;
|
||||
goto out;
|
||||
}
|
||||
if (!jit_data)
|
||||
return fp;
|
||||
fp->aux->jit_data = jit_data;
|
||||
}
|
||||
if (jit_data->ctx.addrs) {
|
||||
|
|
@ -2356,34 +2340,27 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
|
||||
memset(&jit, 0, sizeof(jit));
|
||||
jit.addrs = kvcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL);
|
||||
if (jit.addrs == NULL) {
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
}
|
||||
if (jit.addrs == NULL)
|
||||
goto out_err;
|
||||
/*
|
||||
* Three initial passes:
|
||||
* - 1/2: Determine clobbered registers
|
||||
* - 3: Calculate program size and addrs array
|
||||
*/
|
||||
for (pass = 1; pass <= 3; pass++) {
|
||||
if (bpf_jit_prog(&jit, fp, extra_pass)) {
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
}
|
||||
if (bpf_jit_prog(&jit, fp, extra_pass))
|
||||
goto out_err;
|
||||
}
|
||||
/*
|
||||
* Final pass: Allocate and generate program
|
||||
*/
|
||||
header = bpf_jit_alloc(&jit, fp);
|
||||
if (!header) {
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
}
|
||||
if (!header)
|
||||
goto out_err;
|
||||
skip_init_ctx:
|
||||
if (bpf_jit_prog(&jit, fp, extra_pass)) {
|
||||
bpf_jit_binary_free(header);
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
goto out_err;
|
||||
}
|
||||
if (bpf_jit_enable > 1) {
|
||||
bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
|
||||
|
|
@ -2392,8 +2369,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
if (!fp->is_func || extra_pass) {
|
||||
if (bpf_jit_binary_lock_ro(header)) {
|
||||
bpf_jit_binary_free(header);
|
||||
fp = orig_fp;
|
||||
goto free_addrs;
|
||||
goto out_err;
|
||||
}
|
||||
} else {
|
||||
jit_data->header = header;
|
||||
|
|
@ -2411,11 +2387,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
|||
kfree(jit_data);
|
||||
fp->aux->jit_data = NULL;
|
||||
}
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(fp, fp == orig_fp ?
|
||||
tmp : orig_fp);
|
||||
|
||||
return fp;
|
||||
|
||||
out_err:
|
||||
if (extra_pass) {
|
||||
fp->bpf_func = NULL;
|
||||
fp->jited = 0;
|
||||
fp->jited_len = 0;
|
||||
}
|
||||
goto free_addrs;
|
||||
}
|
||||
|
||||
bool bpf_jit_supports_kfunc_call(void)
|
||||
|
|
|
|||
|
|
@ -1477,39 +1477,24 @@ struct sparc64_jit_data {
|
|||
struct jit_ctx ctx;
|
||||
};
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
struct sparc64_jit_data *jit_data;
|
||||
struct bpf_binary_header *header;
|
||||
u32 prev_image_size, image_size;
|
||||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
struct jit_ctx ctx;
|
||||
u8 *image_ptr;
|
||||
int pass, i;
|
||||
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
/* If blinding was requested and we failed during blinding,
|
||||
* we must fall back to the interpreter.
|
||||
*/
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
if (!jit_data)
|
||||
return prog;
|
||||
prog->aux->jit_data = jit_data;
|
||||
}
|
||||
if (jit_data->ctx.offset) {
|
||||
|
|
@ -1527,10 +1512,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.prog = prog;
|
||||
|
||||
ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL);
|
||||
if (ctx.offset == NULL) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
}
|
||||
if (ctx.offset == NULL)
|
||||
goto out_err;
|
||||
|
||||
/* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook
|
||||
* the offset array so that we converge faster.
|
||||
|
|
@ -1543,10 +1526,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
ctx.idx = 0;
|
||||
|
||||
build_prologue(&ctx);
|
||||
if (build_body(&ctx)) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
}
|
||||
if (build_body(&ctx))
|
||||
goto out_err;
|
||||
build_epilogue(&ctx);
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
|
|
@ -1569,10 +1550,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
image_size = sizeof(u32) * ctx.idx;
|
||||
header = bpf_jit_binary_alloc(image_size, &image_ptr,
|
||||
sizeof(u32), jit_fill_hole);
|
||||
if (header == NULL) {
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
}
|
||||
if (header == NULL)
|
||||
goto out_err;
|
||||
|
||||
ctx.image = (u32 *)image_ptr;
|
||||
skip_init_ctx:
|
||||
|
|
@ -1582,8 +1561,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
|
||||
if (build_body(&ctx)) {
|
||||
bpf_jit_binary_free(header);
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
build_epilogue(&ctx);
|
||||
|
|
@ -1592,8 +1570,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n",
|
||||
prev_image_size, ctx.idx * 4);
|
||||
bpf_jit_binary_free(header);
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
|
|
@ -1604,8 +1581,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
if (!prog->is_func || extra_pass) {
|
||||
if (bpf_jit_binary_lock_ro(header)) {
|
||||
bpf_jit_binary_free(header);
|
||||
prog = orig_prog;
|
||||
goto out_off;
|
||||
goto out_err;
|
||||
}
|
||||
} else {
|
||||
jit_data->ctx = ctx;
|
||||
|
|
@ -1624,9 +1600,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
kfree(jit_data);
|
||||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
|
||||
return prog;
|
||||
|
||||
out_err:
|
||||
if (extra_pass) {
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
}
|
||||
goto out_off;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,8 +58,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
|
|||
#define EMIT_ENDBR() EMIT(gen_endbr(), 4)
|
||||
#define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4)
|
||||
#else
|
||||
#define EMIT_ENDBR()
|
||||
#define EMIT_ENDBR_POISON()
|
||||
#define EMIT_ENDBR() do { } while (0)
|
||||
#define EMIT_ENDBR_POISON() do { } while (0)
|
||||
#endif
|
||||
|
||||
static bool is_imm8(int value)
|
||||
|
|
@ -1649,8 +1649,8 @@ static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
|
||||
int oldproglen, struct jit_context *ctx, bool jmp_padding)
|
||||
static int do_jit(struct bpf_verifier_env *env, struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding)
|
||||
{
|
||||
bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
|
||||
struct bpf_insn *insn = bpf_prog->insnsi;
|
||||
|
|
@ -1663,7 +1663,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
|
|||
void __percpu *priv_stack_ptr;
|
||||
int i, excnt = 0;
|
||||
int ilen, proglen = 0;
|
||||
u8 *prog = temp;
|
||||
u8 *ip, *prog = temp;
|
||||
u32 stack_depth;
|
||||
int err;
|
||||
|
||||
|
|
@ -1734,6 +1734,11 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image
|
|||
dst_reg = X86_REG_R9;
|
||||
}
|
||||
|
||||
if (bpf_insn_is_indirect_target(env, bpf_prog, i - 1))
|
||||
EMIT_ENDBR();
|
||||
|
||||
ip = image + addrs[i - 1] + (prog - temp);
|
||||
|
||||
switch (insn->code) {
|
||||
/* ALU */
|
||||
case BPF_ALU | BPF_ADD | BPF_X:
|
||||
|
|
@ -2440,8 +2445,6 @@ st: if (is_imm8(insn->off))
|
|||
|
||||
/* call */
|
||||
case BPF_JMP | BPF_CALL: {
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
func = (u8 *) __bpf_call_base + imm32;
|
||||
if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) {
|
||||
LOAD_TAIL_CALL_CNT_PTR(stack_depth);
|
||||
|
|
@ -2465,7 +2468,8 @@ st: if (is_imm8(insn->off))
|
|||
if (imm32)
|
||||
emit_bpf_tail_call_direct(bpf_prog,
|
||||
&bpf_prog->aux->poke_tab[imm32 - 1],
|
||||
&prog, image + addrs[i - 1],
|
||||
&prog,
|
||||
ip,
|
||||
callee_regs_used,
|
||||
stack_depth,
|
||||
ctx);
|
||||
|
|
@ -2474,7 +2478,7 @@ st: if (is_imm8(insn->off))
|
|||
&prog,
|
||||
callee_regs_used,
|
||||
stack_depth,
|
||||
image + addrs[i - 1],
|
||||
ip,
|
||||
ctx);
|
||||
break;
|
||||
|
||||
|
|
@ -2639,7 +2643,7 @@ st: if (is_imm8(insn->off))
|
|||
break;
|
||||
|
||||
case BPF_JMP | BPF_JA | BPF_X:
|
||||
emit_indirect_jump(&prog, insn->dst_reg, image + addrs[i - 1]);
|
||||
emit_indirect_jump(&prog, insn->dst_reg, ip);
|
||||
break;
|
||||
case BPF_JMP | BPF_JA:
|
||||
case BPF_JMP32 | BPF_JA:
|
||||
|
|
@ -2729,8 +2733,6 @@ st: if (is_imm8(insn->off))
|
|||
ctx->cleanup_addr = proglen;
|
||||
if (bpf_prog_was_classic(bpf_prog) &&
|
||||
!ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -3713,17 +3715,15 @@ struct x64_jit_data {
|
|||
#define MAX_PASSES 20
|
||||
#define PADDING_PASSES (MAX_PASSES - 5)
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_binary_header *rw_header = NULL;
|
||||
struct bpf_binary_header *header = NULL;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
void __percpu *priv_stack_ptr = NULL;
|
||||
struct x64_jit_data *jit_data;
|
||||
int priv_stack_alloc_sz;
|
||||
int proglen, oldproglen = 0;
|
||||
struct jit_context ctx = {};
|
||||
bool tmp_blinded = false;
|
||||
bool extra_pass = false;
|
||||
bool padding = false;
|
||||
u8 *rw_image = NULL;
|
||||
|
|
@ -3733,27 +3733,13 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
int i;
|
||||
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
/*
|
||||
* If blinding was requested and we failed during blinding,
|
||||
* we must fall back to the interpreter.
|
||||
*/
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
jit_data = prog->aux->jit_data;
|
||||
if (!jit_data) {
|
||||
jit_data = kzalloc_obj(*jit_data);
|
||||
if (!jit_data) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
if (!jit_data)
|
||||
return prog;
|
||||
prog->aux->jit_data = jit_data;
|
||||
}
|
||||
priv_stack_ptr = prog->aux->priv_stack_ptr;
|
||||
|
|
@ -3765,10 +3751,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) +
|
||||
2 * PRIV_STACK_GUARD_SZ;
|
||||
priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL);
|
||||
if (!priv_stack_ptr) {
|
||||
prog = orig_prog;
|
||||
if (!priv_stack_ptr)
|
||||
goto out_priv_stack;
|
||||
}
|
||||
|
||||
priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz);
|
||||
prog->aux->priv_stack_ptr = priv_stack_ptr;
|
||||
|
|
@ -3786,10 +3770,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
goto skip_init_addrs;
|
||||
}
|
||||
addrs = kvmalloc_objs(*addrs, prog->len + 1);
|
||||
if (!addrs) {
|
||||
prog = orig_prog;
|
||||
if (!addrs)
|
||||
goto out_addrs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Before first pass, make a rough estimation of addrs[]
|
||||
|
|
@ -3811,7 +3793,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
for (pass = 0; pass < MAX_PASSES || image; pass++) {
|
||||
if (!padding && pass >= PADDING_PASSES)
|
||||
padding = true;
|
||||
proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
|
||||
proglen = do_jit(env, prog, addrs, image, rw_image, oldproglen, &ctx, padding);
|
||||
if (proglen <= 0) {
|
||||
out_image:
|
||||
image = NULL;
|
||||
|
|
@ -3820,8 +3802,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
sizeof(rw_header->size));
|
||||
bpf_jit_binary_pack_free(header, rw_header);
|
||||
}
|
||||
/* Fall back to interpreter mode */
|
||||
prog = orig_prog;
|
||||
if (extra_pass) {
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
|
|
@ -3852,10 +3832,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
|
||||
&image, align, &rw_header, &rw_image,
|
||||
jit_fill_hole);
|
||||
if (!header) {
|
||||
prog = orig_prog;
|
||||
if (!header)
|
||||
goto out_addrs;
|
||||
}
|
||||
prog->aux->extable = (void *) image + roundup(proglen, align);
|
||||
}
|
||||
oldproglen = proglen;
|
||||
|
|
@ -3908,8 +3886,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog->bpf_func = (void *)image + cfi_get_offset();
|
||||
prog->jited = 1;
|
||||
prog->jited_len = proglen - cfi_get_offset();
|
||||
} else {
|
||||
prog = orig_prog;
|
||||
}
|
||||
|
||||
if (!image || !prog->is_func || extra_pass) {
|
||||
|
|
@ -3925,10 +3901,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
kfree(jit_data);
|
||||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
|
||||
return prog;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2518,38 +2518,22 @@ bool bpf_jit_needs_zext(void)
|
|||
return true;
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_binary_header *header = NULL;
|
||||
struct bpf_prog *tmp, *orig_prog = prog;
|
||||
int proglen, oldproglen = 0;
|
||||
struct jit_context ctx = {};
|
||||
bool tmp_blinded = false;
|
||||
u8 *image = NULL;
|
||||
int *addrs;
|
||||
int pass;
|
||||
int i;
|
||||
|
||||
if (!prog->jit_requested)
|
||||
return orig_prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
/*
|
||||
* If blinding was requested and we failed during blinding,
|
||||
* we must fall back to the interpreter.
|
||||
*/
|
||||
if (IS_ERR(tmp))
|
||||
return orig_prog;
|
||||
if (tmp != prog) {
|
||||
tmp_blinded = true;
|
||||
prog = tmp;
|
||||
}
|
||||
return prog;
|
||||
|
||||
addrs = kmalloc_objs(*addrs, prog->len);
|
||||
if (!addrs) {
|
||||
prog = orig_prog;
|
||||
goto out;
|
||||
}
|
||||
if (!addrs)
|
||||
return prog;
|
||||
|
||||
/*
|
||||
* Before first pass, make a rough estimation of addrs[]
|
||||
|
|
@ -2574,7 +2558,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
image = NULL;
|
||||
if (header)
|
||||
bpf_jit_binary_free(header);
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
}
|
||||
if (image) {
|
||||
|
|
@ -2588,10 +2571,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
if (proglen == oldproglen) {
|
||||
header = bpf_jit_binary_alloc(proglen, &image,
|
||||
1, jit_fill_hole);
|
||||
if (!header) {
|
||||
prog = orig_prog;
|
||||
if (!header)
|
||||
goto out_addrs;
|
||||
}
|
||||
}
|
||||
oldproglen = proglen;
|
||||
cond_resched();
|
||||
|
|
@ -2604,16 +2585,10 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
prog->bpf_func = (void *)image;
|
||||
prog->jited = 1;
|
||||
prog->jited_len = proglen;
|
||||
} else {
|
||||
prog = orig_prog;
|
||||
}
|
||||
|
||||
out_addrs:
|
||||
kfree(addrs);
|
||||
out:
|
||||
if (tmp_blinded)
|
||||
bpf_jit_prog_release_other(prog, prog == orig_prog ?
|
||||
tmp : orig_prog);
|
||||
return prog;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1541,6 +1541,8 @@ bool bpf_has_frame_pointer(unsigned long ip);
|
|||
int bpf_jit_charge_modmem(u32 size);
|
||||
void bpf_jit_uncharge_modmem(u32 size);
|
||||
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
|
||||
bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog,
|
||||
int insn_idx);
|
||||
#else
|
||||
static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
|
||||
struct bpf_trampoline *tr,
|
||||
|
|
|
|||
|
|
@ -630,16 +630,17 @@ struct bpf_insn_aux_data {
|
|||
|
||||
/* below fields are initialized once */
|
||||
unsigned int orig_idx; /* original instruction index */
|
||||
bool jmp_point;
|
||||
bool prune_point;
|
||||
u32 jmp_point:1;
|
||||
u32 prune_point:1;
|
||||
/* ensure we check state equivalence and save state checkpoint and
|
||||
* this instruction, regardless of any heuristics
|
||||
*/
|
||||
bool force_checkpoint;
|
||||
u32 force_checkpoint:1;
|
||||
/* true if instruction is a call to a helper function that
|
||||
* accepts callback function as a parameter.
|
||||
*/
|
||||
bool calls_callback;
|
||||
u32 calls_callback:1;
|
||||
u32 indirect_target:1; /* if it is an indirect jump target */
|
||||
/*
|
||||
* CFG strongly connected component this instruction belongs to,
|
||||
* zero if it is a singleton SCC.
|
||||
|
|
|
|||
|
|
@ -1108,6 +1108,8 @@ sk_filter_reason(struct sock *sk, struct sk_buff *skb)
|
|||
return sk_filter_trim_cap(sk, skb, 1);
|
||||
}
|
||||
|
||||
struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp,
|
||||
int *err);
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
|
||||
void bpf_prog_free(struct bpf_prog *fp);
|
||||
|
||||
|
|
@ -1153,7 +1155,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
|||
((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
|
||||
(void *)__bpf_call_base)
|
||||
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
||||
struct bpf_prog *bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog);
|
||||
void bpf_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_jit_needs_zext(void);
|
||||
bool bpf_jit_inlines_helper_call(s32 imm);
|
||||
|
|
@ -1184,6 +1186,31 @@ static inline bool bpf_dump_raw_ok(const struct cred *cred)
|
|||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
const struct bpf_insn *patch, u32 len);
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
|
||||
const struct bpf_insn *patch, u32 len);
|
||||
struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env);
|
||||
void bpf_restore_insn_aux_data(struct bpf_verifier_env *env,
|
||||
struct bpf_insn_aux_data *orig_insn_aux);
|
||||
#else
|
||||
static inline struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void bpf_restore_insn_aux_data(struct bpf_verifier_env *env,
|
||||
struct bpf_insn_aux_data *orig_insn_aux)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
|
||||
|
||||
static inline bool xdp_return_frame_no_direct(void)
|
||||
|
|
@ -1310,9 +1337,14 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
|
|||
|
||||
const char *bpf_jit_get_prog_name(struct bpf_prog *prog);
|
||||
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog);
|
||||
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
|
||||
|
||||
static inline bool bpf_prog_need_blind(const struct bpf_prog *prog)
|
||||
{
|
||||
return prog->blinding_requested && !prog->blinded;
|
||||
}
|
||||
|
||||
static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
|
||||
u32 pass, void *image)
|
||||
{
|
||||
|
|
@ -1451,6 +1483,20 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
|
|||
{
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_need_blind(const struct bpf_prog *prog)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
return prog;
|
||||
}
|
||||
|
||||
static inline void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
|
||||
|
|
|
|||
|
|
@ -1491,24 +1491,11 @@ void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
|
|||
bpf_prog_clone_free(fp_other);
|
||||
}
|
||||
|
||||
static void adjust_insn_arrays(struct bpf_prog *prog, u32 off, u32 len)
|
||||
{
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
struct bpf_map *map;
|
||||
int i;
|
||||
|
||||
if (len <= 1)
|
||||
return;
|
||||
|
||||
for (i = 0; i < prog->aux->used_map_cnt; i++) {
|
||||
map = prog->aux->used_maps[i];
|
||||
if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY)
|
||||
bpf_insn_array_adjust(map, off, len);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
||||
/*
|
||||
* Now this function is used only to blind the main prog and must be invoked only when
|
||||
* bpf_prog_need_blind() returns true.
|
||||
*/
|
||||
struct bpf_prog *bpf_jit_blind_constants(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_insn insn_buff[16], aux[2];
|
||||
struct bpf_prog *clone, *tmp;
|
||||
|
|
@ -1516,13 +1503,17 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
|||
struct bpf_insn *insn;
|
||||
int i, rewritten;
|
||||
|
||||
if (!prog->blinding_requested || prog->blinded)
|
||||
return prog;
|
||||
if (WARN_ON_ONCE(env && env->prog != prog))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
clone = bpf_prog_clone_create(prog, GFP_USER);
|
||||
if (!clone)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* make sure bpf_patch_insn_data() patches the correct prog */
|
||||
if (env)
|
||||
env->prog = clone;
|
||||
|
||||
insn_cnt = clone->len;
|
||||
insn = clone->insnsi;
|
||||
|
||||
|
|
@ -1550,21 +1541,28 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
|||
if (!rewritten)
|
||||
continue;
|
||||
|
||||
tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
|
||||
if (IS_ERR(tmp)) {
|
||||
if (env)
|
||||
tmp = bpf_patch_insn_data(env, i, insn_buff, rewritten);
|
||||
else
|
||||
tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
|
||||
|
||||
if (IS_ERR_OR_NULL(tmp)) {
|
||||
if (env)
|
||||
/* restore the original prog */
|
||||
env->prog = prog;
|
||||
/* Patching may have repointed aux->prog during
|
||||
* realloc from the original one, so we need to
|
||||
* fix it up here on error.
|
||||
*/
|
||||
bpf_jit_prog_release_other(prog, clone);
|
||||
return tmp;
|
||||
return IS_ERR(tmp) ? tmp : ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
clone = tmp;
|
||||
insn_delta = rewritten - 1;
|
||||
|
||||
/* Instructions arrays must be updated using absolute xlated offsets */
|
||||
adjust_insn_arrays(clone, prog->aux->subprog_start + i, rewritten);
|
||||
if (env)
|
||||
env->prog = clone;
|
||||
|
||||
/* Walk new program and skip insns we just inserted. */
|
||||
insn = clone->insnsi + i + insn_delta;
|
||||
|
|
@ -1575,6 +1573,15 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
|
|||
clone->blinded = 1;
|
||||
return clone;
|
||||
}
|
||||
|
||||
bool bpf_insn_is_indirect_target(const struct bpf_verifier_env *env, const struct bpf_prog *prog,
|
||||
int insn_idx)
|
||||
{
|
||||
if (!env)
|
||||
return false;
|
||||
insn_idx += prog->aux->subprog_start;
|
||||
return env->insn_aux_data[insn_idx].indirect_target;
|
||||
}
|
||||
#endif /* CONFIG_BPF_JIT */
|
||||
|
||||
/* Base function for offset calculation. Needs to go into .text section,
|
||||
|
|
@ -2533,18 +2540,55 @@ static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
|
|||
return select_interpreter;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_prog_select_runtime - select exec runtime for BPF program
|
||||
* @fp: bpf_prog populated with BPF program
|
||||
* @err: pointer to error variable
|
||||
*
|
||||
* Try to JIT eBPF program, if JIT is not available, use interpreter.
|
||||
* The BPF program will be executed via bpf_prog_run() function.
|
||||
*
|
||||
* Return: the &fp argument along with &err set to 0 for success or
|
||||
* a negative errno code on failure
|
||||
*/
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||
static struct bpf_prog *bpf_prog_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
struct bpf_prog *orig_prog;
|
||||
struct bpf_insn_aux_data *orig_insn_aux;
|
||||
|
||||
if (!bpf_prog_need_blind(prog))
|
||||
return bpf_int_jit_compile(env, prog);
|
||||
|
||||
if (env) {
|
||||
/*
|
||||
* If env is not NULL, we are called from the end of bpf_check(), at this
|
||||
* point, only insn_aux_data is used after failure, so it should be restored
|
||||
* on failure.
|
||||
*/
|
||||
orig_insn_aux = bpf_dup_insn_aux_data(env);
|
||||
if (!orig_insn_aux)
|
||||
return prog;
|
||||
}
|
||||
|
||||
orig_prog = prog;
|
||||
prog = bpf_jit_blind_constants(env, prog);
|
||||
/*
|
||||
* If blinding was requested and we failed during blinding, we must fall
|
||||
* back to the interpreter.
|
||||
*/
|
||||
if (IS_ERR(prog))
|
||||
goto out_restore;
|
||||
|
||||
prog = bpf_int_jit_compile(env, prog);
|
||||
if (prog->jited) {
|
||||
bpf_jit_prog_release_other(prog, orig_prog);
|
||||
if (env)
|
||||
vfree(orig_insn_aux);
|
||||
return prog;
|
||||
}
|
||||
|
||||
bpf_jit_prog_release_other(orig_prog, prog);
|
||||
|
||||
out_restore:
|
||||
prog = orig_prog;
|
||||
if (env)
|
||||
bpf_restore_insn_aux_data(env, orig_insn_aux);
|
||||
#endif
|
||||
return prog;
|
||||
}
|
||||
|
||||
struct bpf_prog *__bpf_prog_select_runtime(struct bpf_verifier_env *env, struct bpf_prog *fp,
|
||||
int *err)
|
||||
{
|
||||
/* In case of BPF to BPF calls, verifier did all the prep
|
||||
* work with regards to JITing, etc.
|
||||
|
|
@ -2572,7 +2616,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
|||
if (*err)
|
||||
return fp;
|
||||
|
||||
fp = bpf_int_jit_compile(fp);
|
||||
fp = bpf_prog_jit_compile(env, fp);
|
||||
bpf_prog_jit_attempt_done(fp);
|
||||
if (!fp->jited && jit_needed) {
|
||||
*err = -ENOTSUPP;
|
||||
|
|
@ -2598,6 +2642,22 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
|||
|
||||
return fp;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_prog_select_runtime - select exec runtime for BPF program
|
||||
* @fp: bpf_prog populated with BPF program
|
||||
* @err: pointer to error variable
|
||||
*
|
||||
* Try to JIT eBPF program, if JIT is not available, use interpreter.
|
||||
* The BPF program will be executed via bpf_prog_run() function.
|
||||
*
|
||||
* Return: the &fp argument along with &err set to 0 for success or
|
||||
* a negative errno code on failure
|
||||
*/
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||
{
|
||||
return __bpf_prog_select_runtime(NULL, fp, err);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
|
||||
|
||||
static unsigned int __bpf_prog_ret1(const void *ctx,
|
||||
|
|
@ -3085,7 +3145,7 @@ const struct bpf_func_proto bpf_tail_call_proto = {
|
|||
* It is encouraged to implement bpf_int_jit_compile() instead, so that
|
||||
* eBPF and implicitly also cBPF can get JITed!
|
||||
*/
|
||||
struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_verifier_env *env, struct bpf_prog *prog)
|
||||
{
|
||||
return prog;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -183,6 +183,18 @@ static void adjust_insn_aux_data(struct bpf_verifier_env *env,
|
|||
data[i].seen = old_seen;
|
||||
data[i].zext_dst = insn_has_def32(insn + i);
|
||||
}
|
||||
|
||||
/*
|
||||
* The indirect_target flag of the original instruction was moved to the last of the
|
||||
* new instructions by the above memmove and memset, but the indirect jump target is
|
||||
* actually the first instruction, so move it back. This also matches with the behavior
|
||||
* of bpf_insn_array_adjust(), which preserves xlated_off to point to the first new
|
||||
* instruction.
|
||||
*/
|
||||
if (data[off + cnt - 1].indirect_target) {
|
||||
data[off].indirect_target = 1;
|
||||
data[off + cnt - 1].indirect_target = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
|
||||
|
|
@ -232,8 +244,8 @@ static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
|
|||
}
|
||||
}
|
||||
|
||||
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
|
||||
const struct bpf_insn *patch, u32 len)
|
||||
{
|
||||
struct bpf_prog *new_prog;
|
||||
struct bpf_insn_aux_data *new_data = NULL;
|
||||
|
|
@ -973,7 +985,47 @@ int bpf_convert_ctx_accesses(struct bpf_verifier_env *env)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int bpf_jit_subprogs(struct bpf_verifier_env *env)
|
||||
static u32 *bpf_dup_subprog_starts(struct bpf_verifier_env *env)
|
||||
{
|
||||
u32 *starts = NULL;
|
||||
|
||||
starts = kvmalloc_objs(u32, env->subprog_cnt, GFP_KERNEL_ACCOUNT);
|
||||
if (starts) {
|
||||
for (int i = 0; i < env->subprog_cnt; i++)
|
||||
starts[i] = env->subprog_info[i].start;
|
||||
}
|
||||
return starts;
|
||||
}
|
||||
|
||||
static void bpf_restore_subprog_starts(struct bpf_verifier_env *env, u32 *orig_starts)
|
||||
{
|
||||
for (int i = 0; i < env->subprog_cnt; i++)
|
||||
env->subprog_info[i].start = orig_starts[i];
|
||||
/* restore the start of fake 'exit' subprog as well */
|
||||
env->subprog_info[env->subprog_cnt].start = env->prog->len;
|
||||
}
|
||||
|
||||
struct bpf_insn_aux_data *bpf_dup_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
size_t size;
|
||||
void *new_aux;
|
||||
|
||||
size = array_size(sizeof(struct bpf_insn_aux_data), env->prog->len);
|
||||
new_aux = __vmalloc(size, GFP_KERNEL_ACCOUNT);
|
||||
if (new_aux)
|
||||
memcpy(new_aux, env->insn_aux_data, size);
|
||||
return new_aux;
|
||||
}
|
||||
|
||||
void bpf_restore_insn_aux_data(struct bpf_verifier_env *env,
|
||||
struct bpf_insn_aux_data *orig_insn_aux)
|
||||
{
|
||||
/* the expanded elements are zero-filled, so no special handling is required */
|
||||
vfree(env->insn_aux_data);
|
||||
env->insn_aux_data = orig_insn_aux;
|
||||
}
|
||||
|
||||
static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_prog *prog = env->prog, **func, *tmp;
|
||||
int i, j, subprog_start, subprog_end = 0, len, subprog;
|
||||
|
|
@ -981,10 +1033,6 @@ int bpf_jit_subprogs(struct bpf_verifier_env *env)
|
|||
struct bpf_insn *insn;
|
||||
void *old_bpf_func;
|
||||
int err, num_exentries;
|
||||
int old_len, subprog_start_adjustment = 0;
|
||||
|
||||
if (env->subprog_cnt <= 1)
|
||||
return 0;
|
||||
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
|
||||
|
|
@ -1053,10 +1101,11 @@ int bpf_jit_subprogs(struct bpf_verifier_env *env)
|
|||
goto out_free;
|
||||
func[i]->is_func = 1;
|
||||
func[i]->sleepable = prog->sleepable;
|
||||
func[i]->blinded = prog->blinded;
|
||||
func[i]->aux->func_idx = i;
|
||||
/* Below members will be freed only at prog->aux */
|
||||
func[i]->aux->btf = prog->aux->btf;
|
||||
func[i]->aux->subprog_start = subprog_start + subprog_start_adjustment;
|
||||
func[i]->aux->subprog_start = subprog_start;
|
||||
func[i]->aux->func_info = prog->aux->func_info;
|
||||
func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
|
||||
func[i]->aux->poke_tab = prog->aux->poke_tab;
|
||||
|
|
@ -1113,15 +1162,7 @@ int bpf_jit_subprogs(struct bpf_verifier_env *env)
|
|||
func[i]->aux->token = prog->aux->token;
|
||||
if (!i)
|
||||
func[i]->aux->exception_boundary = env->seen_exception;
|
||||
|
||||
/*
|
||||
* To properly pass the absolute subprog start to jit
|
||||
* all instruction adjustments should be accumulated
|
||||
*/
|
||||
old_len = func[i]->len;
|
||||
func[i] = bpf_int_jit_compile(func[i]);
|
||||
subprog_start_adjustment += func[i]->len - old_len;
|
||||
|
||||
func[i] = bpf_int_jit_compile(env, func[i]);
|
||||
if (!func[i]->jited) {
|
||||
err = -ENOTSUPP;
|
||||
goto out_free;
|
||||
|
|
@ -1165,7 +1206,7 @@ int bpf_jit_subprogs(struct bpf_verifier_env *env)
|
|||
}
|
||||
for (i = 0; i < env->subprog_cnt; i++) {
|
||||
old_bpf_func = func[i]->bpf_func;
|
||||
tmp = bpf_int_jit_compile(func[i]);
|
||||
tmp = bpf_int_jit_compile(env, func[i]);
|
||||
if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
|
||||
verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
|
||||
err = -ENOTSUPP;
|
||||
|
|
@ -1247,16 +1288,87 @@ int bpf_jit_subprogs(struct bpf_verifier_env *env)
|
|||
}
|
||||
kfree(func);
|
||||
out_undo_insn:
|
||||
bpf_prog_jit_attempt_done(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
int bpf_jit_subprogs(struct bpf_verifier_env *env)
|
||||
{
|
||||
int err, i;
|
||||
bool blinded = false;
|
||||
struct bpf_insn *insn;
|
||||
struct bpf_prog *prog, *orig_prog;
|
||||
struct bpf_insn_aux_data *orig_insn_aux;
|
||||
u32 *orig_subprog_starts;
|
||||
|
||||
if (env->subprog_cnt <= 1)
|
||||
return 0;
|
||||
|
||||
prog = orig_prog = env->prog;
|
||||
if (bpf_prog_need_blind(prog)) {
|
||||
orig_insn_aux = bpf_dup_insn_aux_data(env);
|
||||
if (!orig_insn_aux) {
|
||||
err = -ENOMEM;
|
||||
goto out_cleanup;
|
||||
}
|
||||
orig_subprog_starts = bpf_dup_subprog_starts(env);
|
||||
if (!orig_subprog_starts) {
|
||||
vfree(orig_insn_aux);
|
||||
err = -ENOMEM;
|
||||
goto out_cleanup;
|
||||
}
|
||||
prog = bpf_jit_blind_constants(env, prog);
|
||||
if (IS_ERR(prog)) {
|
||||
err = -ENOMEM;
|
||||
prog = orig_prog;
|
||||
goto out_restore;
|
||||
}
|
||||
blinded = true;
|
||||
}
|
||||
|
||||
err = jit_subprogs(env);
|
||||
if (err)
|
||||
goto out_jit_err;
|
||||
|
||||
if (blinded) {
|
||||
bpf_jit_prog_release_other(prog, orig_prog);
|
||||
kvfree(orig_subprog_starts);
|
||||
vfree(orig_insn_aux);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_jit_err:
|
||||
if (blinded) {
|
||||
bpf_jit_prog_release_other(orig_prog, prog);
|
||||
/* roll back to the clean original prog */
|
||||
prog = env->prog = orig_prog;
|
||||
goto out_restore;
|
||||
} else {
|
||||
if (err != -EFAULT) {
|
||||
/*
|
||||
* We will fall back to interpreter mode when err is not -EFAULT, before
|
||||
* that, insn->off and insn->imm should be restored to their original
|
||||
* values since they were modified by jit_subprogs.
|
||||
*/
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
if (!bpf_pseudo_call(insn))
|
||||
continue;
|
||||
insn->off = 0;
|
||||
insn->imm = env->insn_aux_data[i].call_imm;
|
||||
}
|
||||
}
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
out_restore:
|
||||
bpf_restore_subprog_starts(env, orig_subprog_starts);
|
||||
bpf_restore_insn_aux_data(env, orig_insn_aux);
|
||||
kvfree(orig_subprog_starts);
|
||||
out_cleanup:
|
||||
/* cleanup main prog to be interpreted */
|
||||
prog->jit_requested = 0;
|
||||
prog->blinding_requested = 0;
|
||||
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
|
||||
if (!bpf_pseudo_call(insn))
|
||||
continue;
|
||||
insn->off = 0;
|
||||
insn->imm = env->insn_aux_data[i].call_imm;
|
||||
}
|
||||
bpf_prog_jit_attempt_done(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3083,10 +3083,6 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
|||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
prog = bpf_prog_select_runtime(prog, &err);
|
||||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
err = bpf_prog_mark_insn_arrays_ready(prog);
|
||||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
|
|
|||
|
|
@ -3497,6 +3497,11 @@ static int insn_stack_access_flags(int frameno, int spi)
|
|||
return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
|
||||
}
|
||||
|
||||
static void mark_indirect_target(struct bpf_verifier_env *env, int idx)
|
||||
{
|
||||
env->insn_aux_data[idx].indirect_target = true;
|
||||
}
|
||||
|
||||
#define LR_FRAMENO_BITS 3
|
||||
#define LR_SPI_BITS 6
|
||||
#define LR_ENTRY_BITS (LR_SPI_BITS + LR_FRAMENO_BITS + 1)
|
||||
|
|
@ -17545,12 +17550,14 @@ static int check_indirect_jump(struct bpf_verifier_env *env, struct bpf_insn *in
|
|||
}
|
||||
|
||||
for (i = 0; i < n - 1; i++) {
|
||||
mark_indirect_target(env, env->gotox_tmp_buf->items[i]);
|
||||
other_branch = push_stack(env, env->gotox_tmp_buf->items[i],
|
||||
env->insn_idx, env->cur_state->speculative);
|
||||
if (IS_ERR(other_branch))
|
||||
return PTR_ERR(other_branch);
|
||||
}
|
||||
env->insn_idx = env->gotox_tmp_buf->items[n-1];
|
||||
mark_indirect_target(env, env->insn_idx);
|
||||
return INSN_IDX_UPDATED;
|
||||
}
|
||||
|
||||
|
|
@ -20155,6 +20162,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
|||
|
||||
adjust_btf_func(env);
|
||||
|
||||
/* extension progs temporarily inherit the attach_type of their targets
|
||||
for verification purposes, so set it back to zero before returning
|
||||
*/
|
||||
if (env->prog->type == BPF_PROG_TYPE_EXT)
|
||||
env->prog->expected_attach_type = 0;
|
||||
|
||||
env->prog = __bpf_prog_select_runtime(env, env->prog, &ret);
|
||||
|
||||
err_release_maps:
|
||||
if (ret)
|
||||
release_insn_arrays(env);
|
||||
|
|
@ -20166,12 +20181,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
|||
if (!env->prog->aux->used_btfs)
|
||||
release_btfs(env);
|
||||
|
||||
/* extension progs temporarily inherit the attach_type of their targets
|
||||
for verification purposes, so set it back to zero before returning
|
||||
*/
|
||||
if (env->prog->type == BPF_PROG_TYPE_EXT)
|
||||
env->prog->expected_attach_type = 0;
|
||||
|
||||
*prog = env->prog;
|
||||
|
||||
module_put(env->attach_btf_mod);
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user