panthor: use drm_gem_object.gpuva.lock instead of gpuva_list_lock

Now that drm_gem_object has a dedicated mutex for the gpuva list that is
intended to be used in cases that must be fence signalling safe, use it
in Panthor.

Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Alice Ryhl <aliceryhl@google.com>
Link: https://lore.kernel.org/r/20250827-gpuva-mutex-in-gem-v3-2-bd89f5a82c0d@google.com
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
This commit is contained in:
Alice Ryhl 2025-08-27 13:38:38 +00:00 committed by Danilo Krummrich
parent e7fa80e293
commit 69013f52b4
3 changed files with 9 additions and 23 deletions

View File

@ -74,7 +74,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
mutex_destroy(&bo->label.lock);
drm_gem_free_mmap_offset(&bo->base.base);
mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
drm_gem_object_put(vm_root_gem);
}
@ -246,8 +245,7 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.base.funcs = &panthor_gem_funcs;
obj->base.map_wc = !ptdev->coherent;
mutex_init(&obj->gpuva_list_lock);
drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
drm_gem_gpuva_set_lock(&obj->base.base, &obj->base.base.gpuva.lock);
mutex_init(&obj->label.lock);
panthor_gem_debugfs_bo_init(obj);

View File

@ -79,18 +79,6 @@ struct panthor_gem_object {
*/
struct drm_gem_object *exclusive_vm_root_gem;
/**
* @gpuva_list_lock: Custom GPUVA lock.
*
* Used to protect insertion of drm_gpuva elements to the
* drm_gem_object.gpuva.list list.
*
* We can't use the GEM resv for that, because drm_gpuva_link() is
* called in a dma-signaling path, where we're not allowed to take
* resv locks.
*/
struct mutex gpuva_list_lock;
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;

View File

@ -1107,9 +1107,9 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
* GEM vm_bo list.
*/
dma_resv_lock(drm_gpuvm_resv(vm), NULL);
mutex_lock(&bo->gpuva_list_lock);
mutex_lock(&bo->base.base.gpuva.lock);
unpin = drm_gpuvm_bo_put(vm_bo);
mutex_unlock(&bo->gpuva_list_lock);
mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(drm_gpuvm_resv(vm));
/* If the vm_bo object was destroyed, release the pin reference that
@ -1282,9 +1282,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
mutex_lock(&bo->gpuva_list_lock);
mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
mutex_unlock(&bo->gpuva_list_lock);
mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(panthor_vm_resv(vm));
/* If the a vm_bo for this <VM,BO> combination exists, it already
@ -2036,10 +2036,10 @@ static void panthor_vma_link(struct panthor_vm *vm,
{
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
mutex_lock(&bo->gpuva_list_lock);
mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_link(&vma->base, vm_bo);
drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
mutex_unlock(&bo->gpuva_list_lock);
mutex_unlock(&bo->base.base.gpuva.lock);
}
static void panthor_vma_unlink(struct panthor_vm *vm,
@ -2048,9 +2048,9 @@ static void panthor_vma_unlink(struct panthor_vm *vm,
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
mutex_lock(&bo->gpuva_list_lock);
mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_unlink(&vma->base);
mutex_unlock(&bo->gpuva_list_lock);
mutex_unlock(&bo->base.base.gpuva.lock);
/* drm_gpuva_unlink() release the vm_bo, but we manually retained it
* when entering this function, so we can implement deferred VMA