mirror of
https://github.com/torvalds/linux.git
synced 2026-05-13 00:28:54 +02:00
selftests/bpf: Improve test coverage for kfunc call
On powerpc, immediate load instructions are sign extended. In case of unsigned types, arguments should be explicitly zero-extended by the caller. For kfunc call, this needs to be handled in the JIT code. In bpf_kfunc_call_test4(), that tests for sign-extension of signed argument types in kfunc calls, add some additional failure checks. And add bpf_kfunc_call_test5() to test zero-extension of unsigned argument types in kfunc calls. Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> Acked-by: Yonghong Song <yonghong.song@linux.dev> Link: https://lore.kernel.org/r/20260312080113.843408-1-hbathini@linux.ibm.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
ca0f39a369
commit
2af3aa702c
|
|
@ -74,6 +74,8 @@ static struct kfunc_test_params kfunc_tests[] = {
|
|||
TC_TEST(kfunc_call_test1, 12),
|
||||
TC_TEST(kfunc_call_test2, 3),
|
||||
TC_TEST(kfunc_call_test4, -1234),
|
||||
TC_TEST(kfunc_call_test5, 0),
|
||||
TC_TEST(kfunc_call_test5_asm, 0),
|
||||
TC_TEST(kfunc_call_test_ref_btf_id, 0),
|
||||
TC_TEST(kfunc_call_test_get_mem, 42),
|
||||
SYSCALL_TEST(kfunc_syscall_test, 0),
|
||||
|
|
|
|||
|
|
@ -2,8 +2,106 @@
|
|||
/* Copyright (c) 2021 Facebook */
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf_misc.h"
|
||||
#include "../test_kmods/bpf_testmod_kfunc.h"
|
||||
|
||||
SEC("tc")
|
||||
int kfunc_call_test5(struct __sk_buff *skb)
|
||||
{
|
||||
struct bpf_sock *sk = skb->sk;
|
||||
int ret;
|
||||
u32 val32;
|
||||
u16 val16;
|
||||
u8 val8;
|
||||
|
||||
if (!sk)
|
||||
return -1;
|
||||
|
||||
sk = bpf_sk_fullsock(sk);
|
||||
if (!sk)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Test with constant values to verify zero-extension.
|
||||
* ISA-dependent BPF asm:
|
||||
* With ALU32: w1 = 0xFF; w2 = 0xFFFF; w3 = 0xFFFFffff
|
||||
* Without ALU32: r1 = 0xFF; r2 = 0xFFFF; r3 = 0xFFFFffff
|
||||
* Both zero-extend to 64-bit before the kfunc call.
|
||||
*/
|
||||
ret = bpf_kfunc_call_test5(0xFF, 0xFFFF, 0xFFFFffffULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
val32 = bpf_get_prandom_u32();
|
||||
val16 = val32 & 0xFFFF;
|
||||
val8 = val32 & 0xFF;
|
||||
ret = bpf_kfunc_call_test5(val8, val16, val32);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Test multiplication with different operand sizes:
|
||||
*
|
||||
* val8 * 0xFF:
|
||||
* - Both operands promote to int (32-bit signed)
|
||||
* - Result: 32-bit multiplication, truncated to u8, then zero-extended
|
||||
*
|
||||
* val16 * 0xFFFF:
|
||||
* - Both operands promote to int (32-bit signed)
|
||||
* - Result: 32-bit multiplication, truncated to u16, then zero-extended
|
||||
*
|
||||
* val32 * 0xFFFFffffULL:
|
||||
* - val32 (u32) promotes to unsigned long long (due to ULL suffix)
|
||||
* - Result: 64-bit unsigned multiplication, truncated to u32, then zero-extended
|
||||
*/
|
||||
ret = bpf_kfunc_call_test5(val8 * 0xFF, val16 * 0xFFFF, val32 * 0xFFFFffffULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assembly version testing the multiplication edge case explicitly.
|
||||
* This ensures consistent testing across different ISA versions.
|
||||
*/
|
||||
SEC("tc")
|
||||
__naked int kfunc_call_test5_asm(void)
|
||||
{
|
||||
asm volatile (
|
||||
/* Get a random u32 value */
|
||||
"call %[bpf_get_prandom_u32];"
|
||||
"r6 = r0;" /* Save val32 in r6 */
|
||||
|
||||
/* Prepare first argument: val8 * 0xFF */
|
||||
"r1 = r6;"
|
||||
"r1 &= 0xFF;" /* val8 = val32 & 0xFF */
|
||||
"r7 = 0xFF;"
|
||||
"r1 *= r7;" /* 64-bit mult: r1 = r1 * r7 */
|
||||
|
||||
/* Prepare second argument: val16 * 0xFFFF */
|
||||
"r2 = r6;"
|
||||
"r2 &= 0xFFFF;" /* val16 = val32 & 0xFFFF */
|
||||
"r7 = 0xFFFF;"
|
||||
"r2 *= r7;" /* 64-bit mult: r2 = r2 * r7 */
|
||||
|
||||
/* Prepare third argument: val32 * 0xFFFFffff */
|
||||
"r3 = r6;" /* val32 */
|
||||
"r7 = 0xFFFFffff;"
|
||||
"r3 *= r7;" /* 64-bit mult: r3 = r3 * r7 */
|
||||
|
||||
/* Call kfunc with multiplication results */
|
||||
"call bpf_kfunc_call_test5;"
|
||||
|
||||
/* Check return value */
|
||||
"if r0 != 0 goto exit_%=;"
|
||||
"r0 = 0;"
|
||||
"exit_%=: exit;"
|
||||
:
|
||||
: __imm(bpf_get_prandom_u32)
|
||||
: __clobber_all);
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int kfunc_call_test4(struct __sk_buff *skb)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -760,12 +760,63 @@ __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
|
|||
|
||||
__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
|
||||
{
|
||||
/* Provoke the compiler to assume that the caller has sign-extended a,
|
||||
/*
|
||||
* Make val as volatile to avoid compiler optimizations.
|
||||
* Verify that negative signed values remain negative after
|
||||
* sign-extension (JIT must sign-extend, not zero-extend).
|
||||
*/
|
||||
volatile long val;
|
||||
|
||||
/* val will be positive, if JIT does zero-extension instead of sign-extension */
|
||||
val = a;
|
||||
if (val >= 0)
|
||||
return 1;
|
||||
|
||||
val = b;
|
||||
if (val >= 0)
|
||||
return 2;
|
||||
|
||||
val = c;
|
||||
if (val >= 0)
|
||||
return 3;
|
||||
|
||||
/*
|
||||
* Provoke the compiler to assume that the caller has sign-extended a,
|
||||
* b and c on platforms where this is required (e.g. s390x).
|
||||
*/
|
||||
return (long)a + (long)b + (long)c + d;
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_kfunc_call_test5(u8 a, u16 b, u32 c)
|
||||
{
|
||||
/*
|
||||
* Make val as volatile to avoid compiler optimizations on the below checks
|
||||
* In C, assigning u8/u16/u32 to long performs zero-extension.
|
||||
*/
|
||||
volatile long val = a;
|
||||
|
||||
/* Check zero-extension */
|
||||
if (val != (unsigned long)a)
|
||||
return 1;
|
||||
/* Check no sign-extension */
|
||||
if (val < 0)
|
||||
return 2;
|
||||
|
||||
val = b;
|
||||
if (val != (unsigned long)b)
|
||||
return 3;
|
||||
if (val < 0)
|
||||
return 4;
|
||||
|
||||
val = c;
|
||||
if (val != (unsigned long)c)
|
||||
return 5;
|
||||
if (val < 0)
|
||||
return 6;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct prog_test_ref_kfunc prog_test_struct = {
|
||||
.a = 42,
|
||||
.b = 108,
|
||||
|
|
@ -1228,6 +1279,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
|
|||
BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test5)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
|
||||
|
|
|
|||
|
|
@ -110,6 +110,7 @@ __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b,
|
|||
int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym;
|
||||
struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym;
|
||||
long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym;
|
||||
int bpf_kfunc_call_test5(__u8 a, __u16 b, __u32 c) __ksym;
|
||||
|
||||
void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym;
|
||||
void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user