drm/amdgpu/userq: fix access to stale wptr mapping

Use drm_exec to take both locks i.e vm root bo and
wptr_obj bo to access the mapping data properly.

This fixes the security issue of unmap the wptr_obj while
a queue creation is in progress and passing other
bo at same address.

Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
(cherry picked from commit 1fc6c8ab45dbee096469c08c13f6099d57a52d6c)
Cc: stable@vger.kernel.org
This commit is contained in:
Sunil Khatri 2026-05-04 18:21:17 +05:30 committed by Alex Deucher
parent 81665e35f1
commit 6da7b1242d

View File

@ -30,34 +30,6 @@
#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
static int
mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
{
int ret;
ret = amdgpu_bo_reserve(bo, true);
if (ret) {
DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
goto err_reserve_bo_failed;
}
ret = amdgpu_ttm_alloc_gart(&bo->tbo);
if (ret) {
DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
goto err_map_bo_gart_failed;
}
amdgpu_bo_unreserve(bo);
bo = amdgpu_bo_ref(bo);
return 0;
err_map_bo_gart_failed:
amdgpu_bo_unreserve(bo);
err_reserve_bo_failed:
return ret;
}
static int
mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
struct amdgpu_userq_mgr *uq_mgr,
@ -65,55 +37,62 @@ mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
uint64_t wptr)
{
struct amdgpu_bo_va_mapping *wptr_mapping;
struct amdgpu_vm *wptr_vm;
struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
struct amdgpu_bo *obj;
struct amdgpu_vm *vm = queue->vm;
struct drm_exec exec;
int ret;
wptr_vm = queue->vm;
ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
if (ret)
return ret;
wptr &= AMDGPU_GMC_HOLE_MASK;
wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
amdgpu_bo_unreserve(wptr_vm->root.bo);
if (!wptr_mapping) {
DRM_ERROR("Failed to lookup wptr bo\n");
return -EINVAL;
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 2);
drm_exec_until_all_locked(&exec) {
ret = amdgpu_vm_lock_pd(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto fail_lock;
wptr_mapping = amdgpu_vm_bo_lookup_mapping(vm, wptr >> PAGE_SHIFT);
if (!wptr_mapping) {
ret = -EINVAL;
goto fail_lock;
}
obj = wptr_mapping->bo_va->base.bo;
ret = drm_exec_lock_obj(&exec, &obj->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto fail_lock;
}
wptr_obj->obj = wptr_mapping->bo_va->base.bo;
wptr_obj->obj = amdgpu_bo_ref(wptr_mapping->bo_va->base.bo);
if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
return -EINVAL;
}
ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
if (ret) {
DRM_ERROR("Failed to map wptr bo to GART\n");
return ret;
}
ret = amdgpu_bo_reserve(wptr_obj->obj, true);
if (ret) {
DRM_ERROR("Failed to reserve wptr bo\n");
return ret;
ret = -EINVAL;
goto fail_map;
}
/* TODO use eviction fence instead of pinning. */
ret = amdgpu_bo_pin(wptr_obj->obj, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin wptr bo\n");
goto unresv_bo;
DRM_ERROR("Failed to pin wptr bo. ret %d\n", ret);
goto fail_map;
}
ret = amdgpu_ttm_alloc_gart(&wptr_obj->obj->tbo);
if (ret) {
DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
goto fail_map;
}
queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset(wptr_obj->obj);
amdgpu_bo_unreserve(wptr_obj->obj);
drm_exec_fini(&exec);
return 0;
unresv_bo:
amdgpu_bo_unreserve(wptr_obj->obj);
fail_map:
amdgpu_bo_unref(&wptr_obj->obj);
fail_lock:
drm_exec_fini(&exec);
return ret;
}