drm/msm: Support pgtable preallocation

Introduce a mechanism to count the worst case # of pages required in a
VM_BIND op.

Note that previously we would have had to somehow account for
allocations in unmap, when splitting a block.  This behavior was removed
in commit 33729a5fc0 ("iommu/io-pgtable-arm: Remove split on unmap
behavior)"

Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com>
Tested-by: Antonino Maniscalco <antomani103@gmail.com>
Reviewed-by: Antonino Maniscalco <antomani103@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/661515/
This commit is contained in:
Rob Clark 2025-06-29 13:13:16 -07:00 committed by Rob Clark
parent 2b93efeb83
commit e601ea31d6
3 changed files with 225 additions and 1 deletions

View File

@ -7,6 +7,7 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
#include "msm_mmu.h"
#include <linux/kref.h>
#include <linux/dma-resv.h>
#include "drm/drm_exec.h"

View File

@ -6,6 +6,7 @@
#include <linux/adreno-smmu-priv.h>
#include <linux/io-pgtable.h>
#include <linux/kmemleak.h>
#include "msm_drv.h"
#include "msm_mmu.h"
@ -14,6 +15,8 @@ struct msm_iommu {
struct iommu_domain *domain;
atomic_t pagetables;
struct page *prr_page;
struct kmem_cache *pt_cache;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
@ -27,6 +30,9 @@ struct msm_iommu_pagetable {
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
/** @root_page_table: Stores the root page table pointer. */
void *root_page_table;
};
static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
{
@ -282,7 +288,145 @@ msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[
return 0;
}
static void
msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
uint64_t iova, size_t len)
{
u64 pt_count;
/*
* L1, L2 and L3 page tables.
*
* We could optimize L3 allocation by iterating over the sgt and merging
* 2M contiguous blocks, but it's simpler to over-provision and return
* the pages if they're not used.
*
* The first level descriptor (v8 / v7-lpae page table format) encodes
* 30 bits of address. The second level encodes 29. For the 3rd it is
* 39.
*
* https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB
*/
pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) +
((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) +
((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21);
p->count += pt_count;
}
static struct kmem_cache *
get_pt_cache(struct msm_mmu *mmu)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
return to_msm_iommu(pagetable->parent)->pt_cache;
}
static int
msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
{
struct kmem_cache *pt_cache = get_pt_cache(mmu);
int ret;
p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL);
if (!p->pages)
return -ENOMEM;
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
if (ret != p->count) {
p->count = ret;
return -ENOMEM;
}
return 0;
}
static void
msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
{
struct kmem_cache *pt_cache = get_pt_cache(mmu);
uint32_t remaining_pt_count = p->count - p->ptr;
kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
kvfree(p->pages);
}
/**
* alloc_pt() - Custom page table allocator
* @cookie: Cookie passed at page table allocation time.
* @size: Size of the page table. This size should be fixed,
* and determined at creation time based on the granule size.
* @gfp: GFP flags.
*
* We want a custom allocator so we can use a cache for page table
* allocations and amortize the cost of the over-reservation that's
* done to allow asynchronous VM operations.
*
* Return: non-NULL on success, NULL if the allocation failed for any
* reason.
*/
static void *
msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
{
struct msm_iommu_pagetable *pagetable = cookie;
struct msm_mmu_prealloc *p = pagetable->base.prealloc;
void *page;
/* Allocation of the root page table happening during init. */
if (unlikely(!pagetable->root_page_table)) {
struct page *p;
p = alloc_pages_node(dev_to_node(pagetable->iommu_dev),
gfp | __GFP_ZERO, get_order(size));
page = p ? page_address(p) : NULL;
pagetable->root_page_table = page;
return page;
}
if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count))
return NULL;
page = p->pages[p->ptr++];
memset(page, 0, size);
/*
* Page table entries don't use virtual addresses, which trips out
* kmemleak. kmemleak_alloc_phys() might work, but physical addresses
* are mixed with other fields, and I fear kmemleak won't detect that
* either.
*
* Let's just ignore memory passed to the page-table driver for now.
*/
kmemleak_ignore(page);
return page;
}
/**
* free_pt() - Custom page table free function
* @cookie: Cookie passed at page table allocation time.
* @data: Page table to free.
* @size: Size of the page table. This size should be fixed,
* and determined at creation time based on the granule size.
*/
static void
msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size)
{
struct msm_iommu_pagetable *pagetable = cookie;
if (unlikely(pagetable->root_page_table == data)) {
free_pages((unsigned long)data, get_order(size));
pagetable->root_page_table = NULL;
return;
}
kmem_cache_free(get_pt_cache(&pagetable->base), data);
}
static const struct msm_mmu_funcs pagetable_funcs = {
.prealloc_count = msm_iommu_pagetable_prealloc_count,
.prealloc_allocate = msm_iommu_pagetable_prealloc_allocate,
.prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup,
.map = msm_iommu_pagetable_map,
.unmap = msm_iommu_pagetable_unmap,
.destroy = msm_iommu_pagetable_destroy,
@ -333,6 +477,17 @@ static const struct iommu_flush_ops tlb_ops = {
static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *arg);
static size_t get_tblsz(const struct io_pgtable_cfg *cfg)
{
int pg_shift, bits_per_level;
pg_shift = __ffs(cfg->pgsize_bitmap);
/* arm_lpae_iopte is u64: */
bits_per_level = pg_shift - ilog2(sizeof(u64));
return sizeof(u64) << bits_per_level;
}
struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
@ -369,8 +524,34 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
if (!kernel_managed) {
ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
/*
* With userspace managed VM (aka VM_BIND), we need to pre-
* allocate pages ahead of time for map/unmap operations,
* handing them to io-pgtable via custom alloc/free ops as
* needed:
*/
ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt;
ttbr0_cfg.free = msm_iommu_pagetable_free_pt;
/*
* Restrict to single page granules. Otherwise we may run
* into a situation where userspace wants to unmap/remap
* only a part of a larger block mapping, which is not
* possible without unmapping the entire block. Which in
* turn could cause faults if the GPU is accessing other
* parts of the block mapping.
*
* Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm:
* Remove split on unmap behavior)" this was handled in
* io-pgtable-arm. But this apparently does not work
* correctly on SMMUv3.
*/
WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
}
pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
&ttbr0_cfg, pagetable);
@ -414,7 +595,6 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->tlb = ttbr1_cfg->tlb;
pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
@ -510,6 +690,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_domain_free(iommu->domain);
kmem_cache_destroy(iommu->pt_cache);
kfree(iommu);
}
@ -583,6 +764,14 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
return mmu;
iommu = to_msm_iommu(mmu);
if (adreno_smmu && adreno_smmu->cookie) {
const struct io_pgtable_cfg *cfg =
adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
size_t tblsz = get_tblsz(cfg);
iommu->pt_cache =
kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL);
}
iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
/* Enable stall on iommu fault: */

View File

@ -9,8 +9,16 @@
#include <linux/iommu.h>
struct msm_mmu_prealloc;
struct msm_mmu;
struct msm_gpu;
struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu);
void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
uint64_t iova, size_t len);
int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
size_t off, size_t len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
@ -24,12 +32,38 @@ enum msm_mmu_type {
MSM_MMU_IOMMU_PAGETABLE,
};
/**
* struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates.
*/
struct msm_mmu_prealloc {
/** @count: Number of pages reserved. */
uint32_t count;
/** @ptr: Index of first unused page in @pages */
uint32_t ptr;
/**
* @pages: Array of pages preallocated for MMU table updates.
*
* After a VM operation, there might be free pages remaining in this
* array (since the amount allocated is a worst-case). These are
* returned to the pt_cache at mmu->prealloc_cleanup().
*/
void **pages;
};
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
struct device *dev;
int (*handler)(void *arg, unsigned long iova, int flags, void *data);
void *arg;
enum msm_mmu_type type;
/**
* @prealloc: pre-allocated pages for pgtable
*
* Set while a VM_BIND job is running, serialized under
* msm_gem_vm::mmu_lock.
*/
struct msm_mmu_prealloc *prealloc;
};
static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,