Merge branch 'for-7.1/dax-hmem' into cxl-for-next

The series addresses conflicts between HMEM and CXL when handling Soft
Reserved memory ranges. CXL will try best effort in claiming the Soft
Reserved memory region that are CXL regions. If fails, it will punt
back to HMEM.

tools/testing/cxl: Test dax_hmem takeover of CXL regions
tools/testing/cxl: Simulate auto-assembly failure
dax/hmem: Parent dax_hmem devices
dax/hmem: Fix singleton confusion between dax_hmem_work and hmem devices
dax/hmem: Reduce visibility of dax_cxl coordination symbols
cxl/region: Constify cxl_region_resource_contains()
cxl/region: Limit visibility of cxl_region_contains_resource()
dax/cxl: Fix HMEM dependencies
cxl/region: Fix use-after-free from auto assembly failure
dax/hmem, cxl: Defer and resolve Soft Reserved ownership
cxl/region: Add helper to check Soft Reserved containment by CXL regions
dax: Track all dax_region allocations under a global resource tree
dax/cxl, hmem: Initialize hmem early and defer dax_cxl binding
dax/hmem: Gate Soft Reserved deferral on DEV_DAX_CXL
dax/hmem: Request cxl_acpi and cxl_pci before walking Soft Reserved ranges
dax/hmem: Factor HMEM registration into __hmem_register_device()
dax/bus: Use dax_region_put() in alloc_dax_region() error path
This commit is contained in:
Dave Jiang 2026-04-03 12:21:27 -07:00
commit 303d32843b
16 changed files with 462 additions and 35 deletions

View File

@ -1103,6 +1103,14 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
if (!cxld->region) {
cxld->region = cxlr;
/*
* Now that cxld->region is set the intermediate staging state
* can be cleared.
*/
if (cxld == &cxled->cxld &&
cxled->state == CXL_DECODER_STATE_AUTO_STAGED)
cxled->state = CXL_DECODER_STATE_AUTO;
get_device(&cxlr->dev);
}
@ -1844,6 +1852,7 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr,
pos = p->nr_targets;
p->targets[pos] = cxled;
cxled->pos = pos;
cxled->state = CXL_DECODER_STATE_AUTO_STAGED;
p->nr_targets++;
return 0;
@ -2193,6 +2202,47 @@ static int cxl_region_attach(struct cxl_region *cxlr,
return 0;
}
static int cxl_region_by_target(struct device *dev, const void *data)
{
const struct cxl_endpoint_decoder *cxled = data;
struct cxl_region_params *p;
struct cxl_region *cxlr;
if (!is_cxl_region(dev))
return 0;
cxlr = to_cxl_region(dev);
p = &cxlr->params;
return p->targets[cxled->pos] == cxled;
}
/*
* When an auto-region fails to assemble the decoder may be listed as a target,
* but not fully attached.
*/
static void cxl_cancel_auto_attach(struct cxl_endpoint_decoder *cxled)
{
struct cxl_region_params *p;
struct cxl_region *cxlr;
int pos = cxled->pos;
if (cxled->state != CXL_DECODER_STATE_AUTO_STAGED)
return;
struct device *dev __free(put_device) =
bus_find_device(&cxl_bus_type, NULL, cxled, cxl_region_by_target);
if (!dev)
return;
cxlr = to_cxl_region(dev);
p = &cxlr->params;
p->nr_targets--;
cxled->state = CXL_DECODER_STATE_AUTO;
cxled->pos = -1;
p->targets[pos] = NULL;
}
static struct cxl_region *
__cxl_decoder_detach(struct cxl_region *cxlr,
struct cxl_endpoint_decoder *cxled, int pos,
@ -2216,8 +2266,10 @@ __cxl_decoder_detach(struct cxl_region *cxlr,
cxled = p->targets[pos];
} else {
cxlr = cxled->cxld.region;
if (!cxlr)
if (!cxlr) {
cxl_cancel_auto_attach(cxled);
return NULL;
}
p = &cxlr->params;
}
@ -4217,6 +4269,36 @@ static int cxl_region_setup_poison(struct cxl_region *cxlr)
return devm_add_action_or_reset(dev, remove_debugfs, dentry);
}
static int region_contains_resource(struct device *dev, const void *data)
{
const struct resource *res = data;
struct cxl_region *cxlr;
struct cxl_region_params *p;
if (!is_cxl_region(dev))
return 0;
cxlr = to_cxl_region(dev);
p = &cxlr->params;
if (p->state != CXL_CONFIG_COMMIT)
return 0;
if (!p->res)
return 0;
return resource_contains(p->res, res) ? 1 : 0;
}
bool cxl_region_contains_resource(const struct resource *res)
{
guard(rwsem_read)(&cxl_rwsem.region);
struct device *dev __free(put_device) = bus_find_device(
&cxl_bus_type, NULL, res, region_contains_resource);
return !!dev;
}
EXPORT_SYMBOL_FOR_MODULES(cxl_region_contains_resource, "dax_hmem");
static int cxl_region_can_probe(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;

View File

@ -287,12 +287,14 @@ struct cxl_decoder {
};
/*
* Track whether this decoder is reserved for region autodiscovery, or
* free for userspace provisioning.
* Track whether this decoder is free for userspace provisioning, reserved for
* region autodiscovery, whether it is started connecting (awaiting other
* peers), or has completed auto assembly.
*/
enum cxl_decoder_state {
CXL_DECODER_STATE_MANUAL,
CXL_DECODER_STATE_AUTO,
CXL_DECODER_STATE_AUTO_STAGED,
};
/**
@ -843,6 +845,7 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
int cxl_add_to_region(struct cxl_endpoint_decoder *cxled);
struct cxl_dax_region *to_cxl_dax_region(struct device *dev);
u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint, u64 spa);
bool cxl_region_contains_resource(const struct resource *res);
#else
static inline bool is_cxl_pmem_region(struct device *dev)
{
@ -865,6 +868,10 @@ static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
{
return 0;
}
static inline bool cxl_region_contains_resource(const struct resource *res)
{
return false;
}
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);

View File

@ -32,6 +32,9 @@ config DEV_DAX_HMEM
depends on EFI_SOFT_RESERVE
select NUMA_KEEP_MEMINFO if NUMA_MEMBLKS
default DEV_DAX
depends on CXL_ACPI || !CXL_ACPI
depends on CXL_PCI || !CXL_PCI
depends on CXL_BUS || !CXL_BUS
help
EFI 2.8 platforms, and others, may advertise 'specific purpose'
memory. For example, a high bandwidth memory pool. The
@ -48,6 +51,7 @@ config DEV_DAX_CXL
tristate "CXL DAX: direct access to CXL RAM regions"
depends on CXL_BUS && CXL_REGION && DEV_DAX
default CXL_REGION && DEV_DAX
depends on DEV_DAX_HMEM || !DEV_DAX_HMEM
help
CXL RAM regions are either mapped by platform-firmware
and published in the initial system-memory map as "System RAM", mapped

View File

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += hmem/
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
@ -10,5 +11,3 @@ dax-y += bus.o
device_dax-y := device.o
dax_pmem-y := pmem.o
dax_cxl-y := cxl.o
obj-y += hmem/

View File

@ -10,6 +10,7 @@
#include "dax-private.h"
#include "bus.h"
static struct resource dax_regions = DEFINE_RES_MEM_NAMED(0, -1, "DAX Regions");
static DEFINE_MUTEX(dax_bus_lock);
/*
@ -627,6 +628,7 @@ static void dax_region_unregister(void *region)
sysfs_remove_groups(&dax_region->dev->kobj,
dax_region_attribute_groups);
release_resource(&dax_region->res);
dax_region_put(dax_region);
}
@ -635,6 +637,7 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
unsigned long flags)
{
struct dax_region *dax_region;
int rc;
/*
* The DAX core assumes that it can store its private data in
@ -667,14 +670,25 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
.flags = IORESOURCE_MEM | flags,
};
if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
kfree(dax_region);
return NULL;
rc = request_resource(&dax_regions, &dax_region->res);
if (rc) {
dev_dbg(parent, "dax_region resource conflict for %pR\n",
&dax_region->res);
goto err_res;
}
if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups))
goto err_sysfs;
if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
return NULL;
return dax_region;
err_sysfs:
release_resource(&dax_region->res);
err_res:
dax_region_put(dax_region);
return NULL;
}
EXPORT_SYMBOL_GPL(alloc_dax_region);

View File

@ -3,7 +3,9 @@
#ifndef __DAX_BUS_H__
#define __DAX_BUS_H__
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/range.h>
#include <linux/workqueue.h>
struct dev_dax;
struct resource;
@ -49,6 +51,24 @@ void dax_driver_unregister(struct dax_device_driver *dax_drv);
void kill_dev_dax(struct dev_dax *dev_dax);
bool static_dev_dax(struct dev_dax *dev_dax);
struct hmem_platform_device {
struct platform_device pdev;
struct work_struct work;
bool did_probe;
};
static inline struct hmem_platform_device *
to_hmem_platform_device(struct platform_device *pdev)
{
return container_of(pdev, struct hmem_platform_device, pdev);
}
#if IS_ENABLED(CONFIG_DEV_DAX_HMEM)
void dax_hmem_flush_work(void);
#else
static inline void dax_hmem_flush_work(void) { }
#endif
#define MODULE_ALIAS_DAX_DEVICE(type) \
MODULE_ALIAS("dax:t" __stringify(type) "*")
#define DAX_DEVICE_MODALIAS_FMT "dax:t%d"

View File

@ -38,10 +38,36 @@ static struct cxl_driver cxl_dax_region_driver = {
.id = CXL_DEVICE_DAX_REGION,
.drv = {
.suppress_bind_attrs = true,
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
module_cxl_driver(cxl_dax_region_driver);
static void cxl_dax_region_driver_register(struct work_struct *work)
{
dax_hmem_flush_work();
cxl_driver_register(&cxl_dax_region_driver);
}
static DECLARE_WORK(cxl_dax_region_driver_work, cxl_dax_region_driver_register);
static int __init cxl_dax_region_init(void)
{
/*
* Need to resolve a race with dax_hmem wanting to drive regions
* instead of CXL
*/
queue_work(system_long_wq, &cxl_dax_region_driver_work);
return 0;
}
module_init(cxl_dax_region_init);
static void __exit cxl_dax_region_exit(void)
{
flush_work(&cxl_dax_region_driver_work);
cxl_driver_unregister(&cxl_dax_region_driver);
}
module_exit(cxl_dax_region_exit);
MODULE_ALIAS_CXL(CXL_DEVICE_DAX_REGION);
MODULE_DESCRIPTION("CXL DAX: direct access to CXL regions");
MODULE_LICENSE("GPL");

View File

@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/dax.h>
#include <linux/mm.h>
#include "../bus.h"
static bool nohmem;
module_param_named(disable, nohmem, bool, 0444);
@ -33,9 +34,21 @@ int walk_hmem_resources(struct device *host, walk_hmem_fn fn)
}
EXPORT_SYMBOL_GPL(walk_hmem_resources);
static void hmem_work(struct work_struct *work)
{
/* place holder until dax_hmem driver attaches */
}
static struct hmem_platform_device hmem_platform = {
.pdev = {
.name = "hmem_platform",
.id = 0,
},
.work = __WORK_INITIALIZER(hmem_platform.work, hmem_work),
};
static void __hmem_register_resource(int target_nid, struct resource *res)
{
struct platform_device *pdev;
struct resource *new;
int rc;
@ -51,17 +64,13 @@ static void __hmem_register_resource(int target_nid, struct resource *res)
if (platform_initialized)
return;
pdev = platform_device_alloc("hmem_platform", 0);
if (!pdev) {
rc = platform_device_register(&hmem_platform.pdev);
if (rc) {
pr_err_once("failed to register device-dax hmem_platform device\n");
return;
}
rc = platform_device_add(pdev);
if (rc)
platform_device_put(pdev);
else
platform_initialized = true;
platform_initialized = true;
}
void hmem_register_resource(int target_nid, struct resource *res)

View File

@ -3,6 +3,7 @@
#include <linux/memregion.h>
#include <linux/module.h>
#include <linux/dax.h>
#include "../../cxl/cxl.h"
#include "../bus.h"
static bool region_idle;
@ -58,21 +59,22 @@ static void release_hmem(void *pdev)
platform_device_unregister(pdev);
}
static int hmem_register_device(struct device *host, int target_nid,
const struct resource *res)
static struct workqueue_struct *dax_hmem_wq;
void dax_hmem_flush_work(void)
{
flush_workqueue(dax_hmem_wq);
}
EXPORT_SYMBOL_FOR_MODULES(dax_hmem_flush_work, "dax_cxl");
static int __hmem_register_device(struct device *host, int target_nid,
const struct resource *res)
{
struct platform_device *pdev;
struct memregion_info info;
long id;
int rc;
if (IS_ENABLED(CONFIG_CXL_REGION) &&
region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
IORES_DESC_CXL) != REGION_DISJOINT) {
dev_dbg(host, "deferring range to CXL: %pr\n", res);
return 0;
}
rc = region_intersects_soft_reserve(res->start, resource_size(res));
if (rc != REGION_INTERSECTS)
return 0;
@ -94,6 +96,7 @@ static int hmem_register_device(struct device *host, int target_nid,
return -ENOMEM;
}
pdev->dev.parent = host;
pdev->dev.numa_node = numa_map_to_online_node(target_nid);
info = (struct memregion_info) {
.target_node = target_nid,
@ -123,8 +126,74 @@ static int hmem_register_device(struct device *host, int target_nid,
return rc;
}
static int hmem_register_cxl_device(struct device *host, int target_nid,
const struct resource *res)
{
if (region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
IORES_DESC_CXL) == REGION_DISJOINT)
return 0;
if (cxl_region_contains_resource(res)) {
dev_dbg(host, "CXL claims resource, dropping: %pr\n", res);
return 0;
}
dev_dbg(host, "CXL did not claim resource, registering: %pr\n", res);
return __hmem_register_device(host, target_nid, res);
}
static void process_defer_work(struct work_struct *w)
{
struct hmem_platform_device *hpdev = container_of(w, typeof(*hpdev), work);
struct device *dev = &hpdev->pdev.dev;
/* Relies on cxl_acpi and cxl_pci having had a chance to load */
wait_for_device_probe();
guard(device)(dev);
if (!dev->driver)
goto out;
if (!hpdev->did_probe) {
hpdev->did_probe = true;
walk_hmem_resources(dev, hmem_register_cxl_device);
}
out:
put_device(dev);
}
static int hmem_register_device(struct device *host, int target_nid,
const struct resource *res)
{
struct platform_device *pdev = to_platform_device(host);
struct hmem_platform_device *hpdev = to_hmem_platform_device(pdev);
if (IS_ENABLED(CONFIG_DEV_DAX_CXL) &&
region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
IORES_DESC_CXL) != REGION_DISJOINT) {
if (!hpdev->did_probe) {
dev_dbg(host, "await CXL initial probe: %pr\n", res);
hpdev->work.func = process_defer_work;
get_device(host);
if (!queue_work(dax_hmem_wq, &hpdev->work))
put_device(host);
return 0;
}
dev_dbg(host, "deferring range to CXL: %pr\n", res);
return 0;
}
return __hmem_register_device(host, target_nid, res);
}
static int dax_hmem_platform_probe(struct platform_device *pdev)
{
struct hmem_platform_device *hpdev = to_hmem_platform_device(pdev);
/* queue is only flushed on module unload, fail rebind with pending work */
if (work_pending(&hpdev->work))
return -EBUSY;
return walk_hmem_resources(&pdev->dev, hmem_register_device);
}
@ -139,13 +208,34 @@ static __init int dax_hmem_init(void)
{
int rc;
/*
* Ensure that cxl_acpi and cxl_pci have a chance to kick off
* CXL topology discovery at least once before scanning the
* iomem resource tree for IORES_DESC_CXL resources.
*/
if (IS_ENABLED(CONFIG_DEV_DAX_CXL)) {
request_module("cxl_acpi");
request_module("cxl_pci");
}
dax_hmem_wq = alloc_ordered_workqueue("dax_hmem_wq", 0);
if (!dax_hmem_wq)
return -ENOMEM;
rc = platform_driver_register(&dax_hmem_platform_driver);
if (rc)
return rc;
goto err_platform_driver;
rc = platform_driver_register(&dax_hmem_driver);
if (rc)
platform_driver_unregister(&dax_hmem_platform_driver);
goto err_driver;
return 0;
err_driver:
platform_driver_unregister(&dax_hmem_platform_driver);
err_platform_driver:
destroy_workqueue(dax_hmem_wq);
return rc;
}
@ -154,18 +244,12 @@ static __exit void dax_hmem_exit(void)
{
platform_driver_unregister(&dax_hmem_driver);
platform_driver_unregister(&dax_hmem_platform_driver);
destroy_workqueue(dax_hmem_wq);
}
module_init(dax_hmem_init);
module_exit(dax_hmem_exit);
/* Allow for CXL to define its own dax regions */
#if IS_ENABLED(CONFIG_CXL_REGION)
#if IS_MODULE(CONFIG_CXL_ACPI)
MODULE_SOFTDEP("pre: cxl_acpi");
#endif
#endif
MODULE_ALIAS("platform:hmem*");
MODULE_ALIAS("platform:hmem_platform*");
MODULE_DESCRIPTION("HMEM DAX: direct access to 'specific purpose' memory");

View File

@ -11,8 +11,12 @@ ldflags-y += --wrap=devm_cxl_endpoint_decoders_setup
ldflags-y += --wrap=hmat_get_extended_linear_cache_size
ldflags-y += --wrap=devm_cxl_add_dport_by_dev
ldflags-y += --wrap=devm_cxl_switch_port_decoders_setup
ldflags-y += --wrap=walk_hmem_resources
ldflags-y += --wrap=region_intersects
ldflags-y += --wrap=region_intersects_soft_reserve
DRIVERS := ../../../drivers
DAX_HMEM_SRC := $(DRIVERS)/dax/hmem
CXL_SRC := $(DRIVERS)/cxl
CXL_CORE_SRC := $(DRIVERS)/cxl/core
ccflags-y := -I$(srctree)/drivers/cxl/
@ -70,6 +74,9 @@ cxl_core-y += config_check.o
cxl_core-y += cxl_core_test.o
cxl_core-y += cxl_core_exports.o
obj-m += dax_hmem.o
dax_hmem-y := $(DAX_HMEM_SRC)/hmem.o
KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
obj-m += test/

View File

@ -7,6 +7,7 @@ obj-m += cxl_mock_mem.o
obj-m += cxl_translate.o
cxl_test-y := cxl.o
cxl_test-y += hmem_test.o
cxl_mock-y := mock.o
cxl_mock_mem-y := mem.o

View File

@ -16,6 +16,7 @@
static int interleave_arithmetic;
static bool extended_linear_cache;
static bool fail_autoassemble;
#define FAKE_QTG_ID 42
@ -1135,6 +1136,12 @@ static bool mock_init_hdm_decoder(struct cxl_decoder *cxld)
return false;
}
/* Simulate missing cxl_mem.4 configuration */
if (hb0 && pdev->id == 4 && cxld->id == 0 && fail_autoassemble) {
default_mock_decoder(cxld);
return false;
}
base = window->base_hpa;
if (extended_linear_cache)
base += mock_auto_region_size;
@ -1436,6 +1443,53 @@ static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
cxl_endpoint_get_perf_coordinates(port, ep_c);
}
/*
* Simulate that the first half of mock CXL Window 0 is "Soft Reserve" capacity
*/
static int mock_walk_hmem_resources(struct device *host, walk_hmem_fn fn)
{
struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
struct resource window =
DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
dev_dbg(host, "walk cxl_test resource: %pr\n", &window);
return fn(host, 0, &window);
}
/*
* This should only be called by the dax_hmem case, treat mismatches (negative
* result) as "fallback to base region_intersects()". Simulate that the first
* half of mock CXL Window 0 is IORES_DESC_CXL capacity.
*/
static int mock_region_intersects(resource_size_t start, size_t size,
unsigned long flags, unsigned long desc)
{
struct resource res = DEFINE_RES_MEM(start, size);
struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
struct resource window =
DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
if (resource_overlaps(&res, &window))
return REGION_INTERSECTS;
pr_debug("warning: no cxl_test CXL intersection for %pr\n", &res);
return -1;
}
static int
mock_region_intersects_soft_reserve(resource_size_t start, size_t size)
{
struct resource res = DEFINE_RES_MEM(start, size);
struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
struct resource window =
DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
if (resource_overlaps(&res, &window))
return REGION_INTERSECTS;
pr_debug("warning: no cxl_test soft reserve intersection for %pr\n", &res);
return -1;
}
static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_adev = is_mock_adev,
.is_mock_bridge = is_mock_bridge,
@ -1451,6 +1505,9 @@ static struct cxl_mock_ops cxl_mock_ops = {
.devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
.hmat_get_extended_linear_cache_size =
mock_hmat_get_extended_linear_cache_size,
.walk_hmem_resources = mock_walk_hmem_resources,
.region_intersects = mock_region_intersects,
.region_intersects_soft_reserve = mock_region_intersects_soft_reserve,
.list = LIST_HEAD_INIT(cxl_mock_ops.list),
};
@ -1904,8 +1961,14 @@ static __init int cxl_test_init(void)
if (rc)
goto err_root;
rc = hmem_test_init();
if (rc)
goto err_mem;
return 0;
err_mem:
cxl_mem_exit();
err_root:
platform_device_put(cxl_acpi);
err_rch:
@ -1954,6 +2017,7 @@ static __exit void cxl_test_exit(void)
{
int i;
hmem_test_exit();
cxl_mem_exit();
platform_device_unregister(cxl_acpi);
cxl_rch_topo_exit();
@ -1983,6 +2047,8 @@ module_param(interleave_arithmetic, int, 0444);
MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
module_param(extended_linear_cache, bool, 0444);
MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
module_param(fail_autoassemble, bool, 0444);
MODULE_PARM_DESC(fail_autoassemble, "Simulate missing member of an auto-region");
module_init(cxl_test_init);
module_exit(cxl_test_exit);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2026 Intel Corporation */
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include "../../../drivers/dax/bus.h"
static bool hmem_test;
static void hmem_test_work(struct work_struct *work)
{
}
static void hmem_test_release(struct device *dev)
{
struct hmem_platform_device *hpdev =
container_of(dev, typeof(*hpdev), pdev.dev);
memset(hpdev, 0, sizeof(*hpdev));
}
static struct hmem_platform_device hmem_test_device = {
.pdev = {
.name = "hmem_platform",
.id = 1,
.dev = {
.release = hmem_test_release,
},
},
.work = __WORK_INITIALIZER(hmem_test_device.work, hmem_test_work),
};
int hmem_test_init(void)
{
if (!hmem_test)
return 0;
return platform_device_register(&hmem_test_device.pdev);
}
void hmem_test_exit(void)
{
if (hmem_test)
platform_device_unregister(&hmem_test_device.pdev);
}
module_param(hmem_test, bool, 0444);
MODULE_PARM_DESC(hmem_test, "Enable/disable the dax_hmem test platform device");

View File

@ -1695,6 +1695,9 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
struct cxl_dpa_info range_info = { 0 };
int rc;
/* Increase async probe race window */
usleep_range(500*1000, 1000*1000);
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
if (!mdata)
return -ENOMEM;

View File

@ -251,6 +251,56 @@ struct cxl_dport *__wrap_devm_cxl_add_dport_by_dev(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_dport_by_dev, "CXL");
int __wrap_region_intersects(resource_size_t start, size_t size,
unsigned long flags, unsigned long desc)
{
int rc = -1;
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops)
rc = ops->region_intersects(start, size, flags, desc);
if (rc < 0)
rc = region_intersects(start, size, flags, desc);
put_cxl_mock_ops(index);
return rc;
}
EXPORT_SYMBOL_GPL(__wrap_region_intersects);
int __wrap_region_intersects_soft_reserve(resource_size_t start, size_t size)
{
int rc = -1;
int index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops)
rc = ops->region_intersects_soft_reserve(start, size);
if (rc < 0)
rc = region_intersects_soft_reserve(start, size);
put_cxl_mock_ops(index);
return rc;
}
EXPORT_SYMBOL_GPL(__wrap_region_intersects_soft_reserve);
int __wrap_walk_hmem_resources(struct device *host, walk_hmem_fn fn)
{
int index, rc = 0;
bool is_mock = strcmp(dev_name(host), "hmem_platform.1") == 0;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (is_mock) {
if (ops)
rc = ops->walk_hmem_resources(host, fn);
} else {
rc = walk_hmem_resources(host, fn);
}
put_cxl_mock_ops(index);
return rc;
}
EXPORT_SYMBOL_GPL(__wrap_walk_hmem_resources);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("cxl_test: emulation module");
MODULE_IMPORT_NS("ACPI");

View File

@ -2,6 +2,7 @@
#include <linux/list.h>
#include <linux/acpi.h>
#include <linux/dax.h>
#include <cxl.h>
struct cxl_mock_ops {
@ -27,8 +28,15 @@ struct cxl_mock_ops {
int (*hmat_get_extended_linear_cache_size)(struct resource *backing_res,
int nid,
resource_size_t *cache_size);
int (*walk_hmem_resources)(struct device *host, walk_hmem_fn fn);
int (*region_intersects)(resource_size_t start, size_t size,
unsigned long flags, unsigned long desc);
int (*region_intersects_soft_reserve)(resource_size_t start,
size_t size);
};
int hmem_test_init(void);
void hmem_test_exit(void);
void register_cxl_mock_ops(struct cxl_mock_ops *ops);
void unregister_cxl_mock_ops(struct cxl_mock_ops *ops);
struct cxl_mock_ops *get_cxl_mock_ops(int *index);