Merge branch 'slab/for-7.1/misc' into slab/for-next

Merge misc slab changes that are not related to sheaves. Various
improvements for sysfs, debugging and testing.
This commit is contained in:
Vlastimil Babka (SUSE) 2026-04-07 14:39:34 +02:00
commit 44e0ebe4ac
4 changed files with 102 additions and 16 deletions

View File

@ -24354,6 +24354,7 @@ F: Documentation/admin-guide/mm/slab.rst
F: Documentation/mm/slab.rst
F: include/linux/mempool.h
F: include/linux/slab.h
F: lib/tests/slub_kunit.c
F: mm/failslab.c
F: mm/mempool.c
F: mm/slab.h

View File

@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/rcupdate.h>
#include <linux/delay.h>
#include <linux/perf_event.h>
#include "../mm/slab.h"
static struct kunit_resource resource;
@ -291,6 +292,94 @@ static void test_krealloc_redzone_zeroing(struct kunit *test)
kmem_cache_destroy(s);
}
#ifdef CONFIG_PERF_EVENTS
#define NR_ITERATIONS 1000
#define NR_OBJECTS 1000
static void *objects[NR_OBJECTS];
struct test_nolock_context {
struct kunit *test;
int callback_count;
int alloc_ok;
int alloc_fail;
struct perf_event *event;
};
static struct perf_event_attr hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.size = sizeof(struct perf_event_attr),
.pinned = 1,
.disabled = 1,
.freq = 1,
.sample_freq = 100000,
};
static void overflow_handler_test_kmalloc_kfree_nolock(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
void *objp;
gfp_t gfp;
struct test_nolock_context *ctx = event->overflow_handler_context;
/* __GFP_ACCOUNT to test kmalloc_nolock() in alloc_slab_obj_exts() */
gfp = (ctx->callback_count % 2) ? 0 : __GFP_ACCOUNT;
objp = kmalloc_nolock(64, gfp, NUMA_NO_NODE);
if (objp)
ctx->alloc_ok++;
else
ctx->alloc_fail++;
kfree_nolock(objp);
ctx->callback_count++;
}
static void test_kmalloc_kfree_nolock(struct kunit *test)
{
int i, j;
struct test_nolock_context ctx = { .test = test };
struct perf_event *event;
bool alloc_fail = false;
event = perf_event_create_kernel_counter(&hw_attr, -1, current,
overflow_handler_test_kmalloc_kfree_nolock,
&ctx);
if (IS_ERR(event))
kunit_skip(test, "Failed to create perf event");
ctx.event = event;
perf_event_enable(ctx.event);
for (i = 0; i < NR_ITERATIONS; i++) {
for (j = 0; j < NR_OBJECTS; j++) {
gfp_t gfp = (i % 2) ? GFP_KERNEL : GFP_KERNEL_ACCOUNT;
objects[j] = kmalloc(64, gfp);
if (!objects[j]) {
j--;
while (j >= 0)
kfree(objects[j--]);
alloc_fail = true;
goto cleanup;
}
}
for (j = 0; j < NR_OBJECTS; j++)
kfree(objects[j]);
}
cleanup:
perf_event_disable(ctx.event);
perf_event_release_kernel(ctx.event);
kunit_info(test, "callback_count: %d, alloc_ok: %d, alloc_fail: %d\n",
ctx.callback_count, ctx.alloc_ok, ctx.alloc_fail);
if (alloc_fail)
kunit_skip(test, "Allocation failed");
KUNIT_EXPECT_EQ(test, 0, slab_errors);
}
#endif
static int test_init(struct kunit *test)
{
slab_errors = 0;
@ -315,6 +404,9 @@ static struct kunit_case test_cases[] = {
KUNIT_CASE(test_kfree_rcu_wq_destroy),
KUNIT_CASE(test_leak_destroy),
KUNIT_CASE(test_krealloc_redzone_zeroing),
#ifdef CONFIG_PERF_EVENTS
KUNIT_CASE_SLOW(test_kmalloc_kfree_nolock),
#endif
{}
};

View File

@ -172,6 +172,7 @@ config SLUB
config KVFREE_RCU_BATCHED
def_bool y
depends on !SLUB_TINY && !TINY_RCU
depends on !RCU_STRICT_GRACE_PERIOD
config SLUB_TINY
bool "Configure for minimal memory footprint"

View File

@ -8975,7 +8975,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
return len;
}
#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
#define to_slab_attr(n) container_of_const(n, struct slab_attribute, attr)
#define to_slab(n) container_of(n, struct kmem_cache, kobj)
struct slab_attribute {
@ -8985,10 +8985,10 @@ struct slab_attribute {
};
#define SLAB_ATTR_RO(_name) \
static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
static const struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
#define SLAB_ATTR(_name) \
static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
static const struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
{
@ -9382,7 +9382,7 @@ static ssize_t skip_kfence_store(struct kmem_cache *s,
SLAB_ATTR(skip_kfence);
#endif
static struct attribute *slab_attrs[] = {
static const struct attribute *const slab_attrs[] = {
&slab_size_attr.attr,
&object_size_attr.attr,
&objs_per_slab_attr.attr,
@ -9459,15 +9459,13 @@ static struct attribute *slab_attrs[] = {
NULL
};
static const struct attribute_group slab_attr_group = {
.attrs = slab_attrs,
};
ATTRIBUTE_GROUPS(slab);
static ssize_t slab_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct slab_attribute *attribute;
const struct slab_attribute *attribute;
struct kmem_cache *s;
attribute = to_slab_attr(attr);
@ -9483,7 +9481,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
struct slab_attribute *attribute;
const struct slab_attribute *attribute;
struct kmem_cache *s;
attribute = to_slab_attr(attr);
@ -9508,6 +9506,7 @@ static const struct sysfs_ops slab_sysfs_ops = {
static const struct kobj_type slab_ktype = {
.sysfs_ops = &slab_sysfs_ops,
.release = kmem_cache_release,
.default_groups = slab_groups,
};
static struct kset *slab_kset;
@ -9595,10 +9594,6 @@ static int sysfs_slab_add(struct kmem_cache *s)
if (err)
goto out;
err = sysfs_create_group(&s->kobj, &slab_attr_group);
if (err)
goto out_del_kobj;
if (!unmergeable) {
/* Setup first alias */
sysfs_slab_alias(s, s->name);
@ -9607,9 +9602,6 @@ static int sysfs_slab_add(struct kmem_cache *s)
if (!unmergeable)
kfree(name);
return err;
out_del_kobj:
kobject_del(&s->kobj);
goto out;
}
void sysfs_slab_unlink(struct kmem_cache *s)