bpf-fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmhcdnsACgkQ6rmadz2v
 bTqkRA//f024qEkYGrnnkRk1ZoOuKWk7DEUvw/J+us9dhPvJABmUHL3ZuMmDp1D/
 EgGWAMg1q8tsXvlnAR4mV25T1DLpfMmo6hzwZgVeGl3X9YqTCPbgBONRr6F1HXP4
 OXHnm9vHVcki8z0vPIUHsAbudp0PrXx9lSUssT3kCoZuV0xeQKTznvUS9HwGC8vP
 ex59XrkNaUeEyVozsa0YFHtT57NAH/77QSj1A5HC/x0u9SJroao18ct3b/5t7QdQ
 N4hcc/GH+xoGDyXPFYFlst9kXmYwCpz26w8bCpBY5x0Red+LhkvHwRv6KM1Czl3J
 f9da+S2qbetqeiGJwg8/lNLnHQcgqUifYu5lr35ijpxf7Qgyw0jbT+Cy2kd68GcC
 J0GCminZep+bsKARriq9+ZBcm282xBTfzBN4936HTxC6zh41J+jdbOC62Gw+pXju
 9EJwQmY59KPUyDKz5mUm48NmY4g7Zcvk2y7kCaiD5Np+WR1eFbWT7v6eAchA+JRi
 tRfTR5eqSS17GybfrPntto2aoydEC2rPublMTu2OT3bjJe2WPf4aFZaGmOoQZwX2
 97sa0hpMSbf4zS7h1mqHQ9y3p9qvXTwzWikm1fjFeukvb53GiRYxax5LutpePxEU
 OFHREy4InWHdCet0Irr8u44UbrAkxiNUYBD5KLQO/ZUlrMmsrBI=
 =Buaz
 -----END PGP SIGNATURE-----

Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Pull bpf fixes from Alexei Starovoitov:

 - Fix use-after-free in libbpf when map is resized (Adin Scannell)

 - Fix verifier assumptions about 2nd argument of bpf_sysctl_get_name
   (Jerome Marchand)

 - Fix verifier assumption of nullness of d_inode in dentry (Song Liu)

 - Fix global starvation of LRU map (Willem de Bruijn)

 - Fix potential NULL dereference in btf_dump__free (Yuan Chen)

* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: adapt one more case in test_lru_map to the new target_free
  libbpf: Fix possible use-after-free for externs
  selftests/bpf: Convert test_sysctl to prog_tests
  bpf: Specify access type of bpf_sysctl_get_name args
  libbpf: Fix null pointer dereference in btf_dump__free on allocation failure
  bpf: Adjust free target to avoid global starvation of LRU map
  bpf: Mark dentry->d_inode as trusted_or_null
This commit is contained in:
Linus Torvalds 2025-06-25 21:09:02 -07:00
commit ee88bddf7f
15 changed files with 142 additions and 99 deletions

View File

@ -233,10 +233,16 @@ attempts in order to enforce the LRU property which have increasing impacts on
other CPUs involved in the following operation attempts:
- Attempt to use CPU-local state to batch operations
- Attempt to fetch free nodes from global lists
- Attempt to fetch ``target_free`` free nodes from global lists
- Attempt to pull any node from a global list and remove it from the hashmap
- Attempt to pull any node from any CPU's list and remove it from the hashmap
The number of nodes to borrow from the global list in a batch, ``target_free``,
depends on the size of the map. Larger batch size reduces lock contention, but
may also exhaust the global structure. The value is computed at map init to
avoid exhaustion, by limiting aggregate reservation by all CPUs to half the map
size. With a minimum of a single element and maximum budget of 128 at a time.
This algorithm is described visually in the following diagram. See the
description in commit 3a08c2fd7634 ("bpf: LRU List") for a full explanation of
the corresponding operations:

View File

@ -35,18 +35,18 @@ digraph {
fn_bpf_lru_list_pop_free_to_local [shape=rectangle,fillcolor=2,
label="Flush local pending,
Rotate Global list, move
LOCAL_FREE_TARGET
target_free
from global -> local"]
// Also corresponds to:
// fn__local_list_flush()
// fn_bpf_lru_list_rotate()
fn___bpf_lru_node_move_to_free[shape=diamond,fillcolor=2,
label="Able to free\nLOCAL_FREE_TARGET\nnodes?"]
label="Able to free\ntarget_free\nnodes?"]
fn___bpf_lru_list_shrink_inactive [shape=rectangle,fillcolor=3,
label="Shrink inactive list
up to remaining
LOCAL_FREE_TARGET
target_free
(global LRU -> local)"]
fn___bpf_lru_list_shrink [shape=diamond,fillcolor=2,
label="> 0 entries in\nlocal free list?"]

View File

@ -337,12 +337,12 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
list) {
__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
BPF_LRU_LOCAL_LIST_T_FREE);
if (++nfree == LOCAL_FREE_TARGET)
if (++nfree == lru->target_free)
break;
}
if (nfree < LOCAL_FREE_TARGET)
__bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
if (nfree < lru->target_free)
__bpf_lru_list_shrink(lru, l, lru->target_free - nfree,
local_free_list(loc_l),
BPF_LRU_LOCAL_LIST_T_FREE);
@ -577,6 +577,9 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
buf += elem_size;
}
lru->target_free = clamp((nr_elems / num_possible_cpus()) / 2,
1, LOCAL_FREE_TARGET);
}
static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,

View File

@ -58,6 +58,7 @@ struct bpf_lru {
del_from_htab_func del_from_htab;
void *del_arg;
unsigned int hash_offset;
unsigned int target_free;
unsigned int nr_scans;
bool percpu;
};

View File

@ -2134,7 +2134,7 @@ static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg2_type = ARG_PTR_TO_MEM | MEM_WRITE,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};

View File

@ -7027,8 +7027,7 @@ BTF_TYPE_SAFE_TRUSTED(struct file) {
struct inode *f_inode;
};
BTF_TYPE_SAFE_TRUSTED(struct dentry) {
/* no negative dentry-s in places where bpf can see it */
BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct dentry) {
struct inode *d_inode;
};
@ -7066,7 +7065,6 @@ static bool type_is_trusted(struct bpf_verifier_env *env,
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
}
@ -7076,6 +7074,7 @@ static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
const char *field_name, u32 btf_id)
{
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct dentry));
return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id,
"__safe_trusted_or_null");

View File

@ -226,6 +226,9 @@ static void btf_dump_free_names(struct hashmap *map)
size_t bkt;
struct hashmap_entry *cur;
if (!map)
return;
hashmap__for_each_entry(map, cur, bkt)
free((void *)cur->pkey);

View File

@ -597,7 +597,7 @@ struct extern_desc {
int sym_idx;
int btf_id;
int sec_btf_id;
const char *name;
char *name;
char *essent_name;
bool is_set;
bool is_weak;
@ -4259,7 +4259,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return ext->btf_id;
}
t = btf__type_by_id(obj->btf, ext->btf_id);
ext->name = btf__name_by_offset(obj->btf, t->name_off);
ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
if (!ext->name)
return -ENOMEM;
ext->sym_idx = i;
ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
@ -9138,8 +9140,10 @@ void bpf_object__close(struct bpf_object *obj)
zfree(&obj->btf_custom_path);
zfree(&obj->kconfig);
for (i = 0; i < obj->nr_extern; i++)
for (i = 0; i < obj->nr_extern; i++) {
zfree(&obj->externs[i].name);
zfree(&obj->externs[i].essent_name);
}
zfree(&obj->externs);
obj->nr_extern = 0;

View File

@ -21,7 +21,6 @@ test_lirc_mode2_user
flow_dissector_load
test_tcpnotify_user
test_libbpf
test_sysctl
xdping
test_cpp
*.d

View File

@ -73,7 +73,7 @@ endif
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_progs \
test_sockmap \
test_tcpnotify_user test_sysctl \
test_tcpnotify_user \
test_progs-no_alu32
TEST_INST_SUBDIRS := no_alu32
@ -220,7 +220,7 @@ ifeq ($(VMLINUX_BTF),)
$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
endif
# Define simple and short `make test_progs`, `make test_sysctl`, etc targets
# Define simple and short `make test_progs`, `make test_maps`, etc targets
# to build individual tests.
# NOTE: Semicolon at the end is critical to override lib.mk's default static
# rule for binaries.
@ -329,7 +329,6 @@ NETWORK_HELPERS := $(OUTPUT)/network_helpers.o
$(OUTPUT)/test_sockmap: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tcpnotify_user: $(CGROUP_HELPERS) $(TESTING_HELPERS) $(TRACE_HELPERS)
$(OUTPUT)/test_sock_fields: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_sysctl: $(CGROUP_HELPERS) $(TESTING_HELPERS)
$(OUTPUT)/test_tag: $(TESTING_HELPERS)
$(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS)
$(OUTPUT)/xdping: $(TESTING_HELPERS)

View File

@ -1,22 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <fcntl.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <linux/filter.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <bpf/bpf_endian.h>
#include "bpf_util.h"
#include "test_progs.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#define CG_PATH "/foo"
#define MAX_INSNS 512
@ -1608,26 +1594,19 @@ static int run_tests(int cgfd)
return fails ? -1 : 0;
}
int main(int argc, char **argv)
void test_sysctl(void)
{
int cgfd = -1;
int err = 0;
int cgfd;
cgfd = cgroup_setup_and_join(CG_PATH);
if (cgfd < 0)
goto err;
if (!ASSERT_OK_FD(cgfd < 0, "create_cgroup"))
goto out;
/* Use libbpf 1.0 API mode */
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (!ASSERT_OK(run_tests(cgfd), "run_tests"))
goto out;
if (run_tests(cgfd))
goto err;
goto out;
err:
err = -1;
out:
close(cgfd);
cleanup_cgroup_environment();
return err;
return;
}

View File

@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last");
int percpu_arr[1] SEC(".data.percpu_arr");
/* at least one extern is included, to ensure that a specific
* regression is tested whereby resizing resulted in a free-after-use
* bug after type information is invalidated by the resize operation.
*
* There isn't a particularly good API to test for this specific condition,
* but by having externs for the resizing tests it will cover this path.
*/
extern int LINUX_KERNEL_VERSION __kconfig;
long version_sink;
SEC("tp/syscalls/sys_enter_getpid")
int bss_array_sum(void *ctx)
{
@ -44,6 +54,9 @@ int bss_array_sum(void *ctx)
for (size_t i = 0; i < bss_array_len; ++i)
sum += array[i];
/* see above; ensure this is not optimized out */
version_sink = LINUX_KERNEL_VERSION;
return 0;
}
@ -59,6 +72,9 @@ int data_array_sum(void *ctx)
for (size_t i = 0; i < data_array_len; ++i)
sum += my_array[i];
/* see above; ensure this is not optimized out */
version_sink = LINUX_KERNEL_VERSION;
return 0;
}

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2024 Google LLC. */
#include <vmlinux.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@ -82,4 +83,21 @@ int BPF_PROG(path_d_path_from_file_argument, struct file *file)
return 0;
}
SEC("lsm.s/inode_rename")
__success
int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct inode *inode = new_dentry->d_inode;
ino_t ino;
if (!inode)
return 0;
ino = inode->i_ino;
if (ino == 0)
return -EACCES;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2024 Google LLC. */
#include <vmlinux.h>
#include <errno.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <linux/limits.h>
@ -158,4 +159,18 @@ int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f)
return 0;
}
SEC("lsm.s/inode_rename")
__failure __msg("invalid mem access 'trusted_ptr_or_null_'")
int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
struct inode *inode = new_dentry->d_inode;
ino_t ino;
ino = inode->i_ino;
if (ino == 0)
return -EACCES;
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -138,6 +138,18 @@ static int sched_next_online(int pid, int *next_to_try)
return ret;
}
/* Derive target_free from map_size, same as bpf_common_lru_populate */
static unsigned int __tgt_size(unsigned int map_size)
{
return (map_size / nr_cpus) / 2;
}
/* Inverse of how bpf_common_lru_populate derives target_free from map_size. */
static unsigned int __map_size(unsigned int tgt_free)
{
return tgt_free * nr_cpus * 2;
}
/* Size of the LRU map is 2
* Add key=1 (+1 key)
* Add key=2 (+1 key)
@ -231,11 +243,11 @@ static void test_lru_sanity0(int map_type, int map_flags)
printf("Pass\n");
}
/* Size of the LRU map is 1.5*tgt_free
* Insert 1 to tgt_free (+tgt_free keys)
* Lookup 1 to tgt_free/2
* Insert 1+tgt_free to 2*tgt_free (+tgt_free keys)
* => 1+tgt_free/2 to LOCALFREE_TARGET will be removed by LRU
/* Verify that unreferenced elements are recycled before referenced ones.
* Insert elements.
* Reference a subset of these.
* Insert more, enough to trigger recycling.
* Verify that unreferenced are recycled.
*/
static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
{
@ -257,7 +269,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
map_size = tgt_free + batch_size;
map_size = __map_size(tgt_free) + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
@ -266,13 +278,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
/* Insert 1 to tgt_free (+tgt_free keys) */
end_key = 1 + tgt_free;
/* Insert map_size - batch_size keys */
end_key = 1 + __map_size(tgt_free);
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
/* Lookup 1 to tgt_free/2 */
/* Lookup 1 to batch_size */
end_key = 1 + batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
@ -280,12 +292,13 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
BPF_NOEXIST));
}
/* Insert 1+tgt_free to 2*tgt_free
* => 1+tgt_free/2 to LOCALFREE_TARGET will be
/* Insert another map_size - batch_size keys
* Map will contain 1 to batch_size plus these latest, i.e.,
* => previous 1+batch_size to map_size - batch_size will have been
* removed by LRU
*/
key = 1 + tgt_free;
end_key = key + tgt_free;
key = 1 + __map_size(tgt_free);
end_key = key + __map_size(tgt_free);
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@ -301,17 +314,8 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
printf("Pass\n");
}
/* Size of the LRU map 1.5 * tgt_free
* Insert 1 to tgt_free (+tgt_free keys)
* Update 1 to tgt_free/2
* => The original 1 to tgt_free/2 will be removed due to
* the LRU shrink process
* Re-insert 1 to tgt_free/2 again and do a lookup immeidately
* Insert 1+tgt_free to tgt_free*3/2
* Insert 1+tgt_free*3/2 to tgt_free*5/2
* => Key 1+tgt_free to tgt_free*3/2
* will be removed from LRU because it has never
* been lookup and ref bit is not set
/* Verify that insertions exceeding map size will recycle the oldest.
* Verify that unreferenced elements are recycled before referenced.
*/
static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
{
@ -334,7 +338,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
map_size = tgt_free + batch_size;
map_size = __map_size(tgt_free) + batch_size;
lru_map_fd = create_map(map_type, map_flags, map_size);
assert(lru_map_fd != -1);
@ -343,8 +347,8 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
/* Insert 1 to tgt_free (+tgt_free keys) */
end_key = 1 + tgt_free;
/* Insert map_size - batch_size keys */
end_key = 1 + __map_size(tgt_free);
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@ -357,8 +361,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
* shrink the inactive list to get tgt_free
* number of free nodes.
*
* Hence, the oldest key 1 to tgt_free/2
* are removed from the LRU list.
* Hence, the oldest key is removed from the LRU list.
*/
key = 1;
if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
@ -370,8 +373,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
BPF_EXIST));
}
/* Re-insert 1 to tgt_free/2 again and do a lookup
* immeidately.
/* Re-insert 1 to batch_size again and do a lookup immediately.
*/
end_key = 1 + batch_size;
value[0] = 4321;
@ -387,17 +389,18 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
/* Insert 1+tgt_free to tgt_free*3/2 */
end_key = 1 + tgt_free + batch_size;
for (key = 1 + tgt_free; key < end_key; key++)
/* Insert batch_size new elements */
key = 1 + __map_size(tgt_free);
end_key = key + batch_size;
for (; key < end_key; key++)
/* These newly added but not referenced keys will be
* gone during the next LRU shrink.
*/
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
/* Insert 1+tgt_free*3/2 to tgt_free*5/2 */
end_key = key + tgt_free;
/* Insert map_size - batch_size elements */
end_key += __map_size(tgt_free);
for (; key < end_key; key++) {
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
@ -413,12 +416,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
printf("Pass\n");
}
/* Size of the LRU map is 2*tgt_free
* It is to test the active/inactive list rotation
* Insert 1 to 2*tgt_free (+2*tgt_free keys)
* Lookup key 1 to tgt_free*3/2
* Add 1+2*tgt_free to tgt_free*5/2 (+tgt_free/2 keys)
* => key 1+tgt_free*3/2 to 2*tgt_free are removed from LRU
/* Test the active/inactive list rotation
*
* Fill the whole map, deplete the free list.
* Reference all except the last lru->target_free elements.
* Insert lru->target_free new elements. This triggers one shrink.
* Verify that the non-referenced elements are replaced.
*/
static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
{
@ -437,8 +440,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
batch_size = __tgt_size(tgt_free);
map_size = tgt_free * 2;
lru_map_fd = create_map(map_type, map_flags, map_size);
@ -449,23 +451,21 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
value[0] = 1234;
/* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
end_key = 1 + (2 * tgt_free);
/* Fill the map */
end_key = 1 + map_size;
for (key = 1; key < end_key; key++)
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
/* Lookup key 1 to tgt_free*3/2 */
end_key = tgt_free + batch_size;
/* Reference all but the last batch_size */
end_key = 1 + map_size - batch_size;
for (key = 1; key < end_key; key++) {
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
BPF_NOEXIST));
}
/* Add 1+2*tgt_free to tgt_free*5/2
* (+tgt_free/2 keys)
*/
/* Insert new batch_size: replaces the non-referenced elements */
key = 2 * tgt_free + 1;
end_key = key + batch_size;
for (; key < end_key; key++) {
@ -500,7 +500,8 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
lru_map_fd = create_map(map_type, map_flags,
3 * tgt_free * nr_cpus);
else
lru_map_fd = create_map(map_type, map_flags, 3 * tgt_free);
lru_map_fd = create_map(map_type, map_flags,
3 * __map_size(tgt_free));
assert(lru_map_fd != -1);
expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0,