drm fixes for 7.1-rc3

core:
 - fix race condition in handle change ioctl
 
 fb-helper:
 - fix clipping
 
 rust:
 - fix unsound initialization
 - fix GEM state cleanup
 - fix wrong ARef import
 
 ttm:
 - update GPU MM stats on pool shrinking
 
 i915:
 - Re-enable ccs modifiers on dg2
 
 nova:
 - fix mailing list
 
 xe:
 - Add NULL check for media_gt in intel_hdcp_gsc_check_status
 - Fix EAGAIN sign in pf_migration_consume
 - Fix MMIO access using PF view instead of VF view during migration
 - Exclude indirect ring state page from ADS engine state size
 
 amdgpu:
 - GFX9 fixes
 - Hawaii SMU fixes
 - SDMA4 fix
 - GART fix
 - Userq fixes
 
 amdkfd:
 - GPUVM TLB flush fix
 - Hotplug fix
 
 radeon:
 - Hawaii SMU fixes
 
 bochs:
 - fix managed cleanup
 
 bridge:
 - tda998x: fix sparse warnings on type correctness
 
 etnaviv:
 - schedule armed jobs
 
 exynos:
 - managed bridge cleanup
 
 ivpu:
 - disallow reexport of GEM buffer objects
 
 noveau:
 - revert support for GA100
 
 panel:
 - boe-tv101wum-nl16: use correct MIPI_DSI mode
 - feyjang-fy07024di26a30d: fix error reporting
 - himax-hx83102: use correct MIPI_DSI mode
 - himax-hx83121a: fix error checks
 - himax-hx83121a: select DRM_DISPLAY_DSC_HELPER
 
 qaic:
 - fix RAS message handling
 
 qxl:
 - clean up polling
 
 sti:
 - managed bridge cleanup
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmn9mggACgkQDHTzWXnE
 hr5UkQ//Yb4HiE1jN3Ji0ulR+VlYuyXQj3rrIbZMiGafL7xK32qM0hWSwXUGIZI2
 Hbe6fgQGc9yDt9svd56xVjCWgU72gwxVcd2x9nAj3UbzMreBol3A0s19TLmKyuU2
 7O5HSQ4dcLJc42rlJ18vqSnsErynhKTjnFqF0y3Mz12DOqakDSK0a0FxNit5qJlu
 3t6O5bJohIRlwTVRkgM3YxtY0Mx8sg0XHz2JhlzerwfiRe3eXIOX5GmIWODjfGPy
 sjGICWTaO/zCTBbgVtGKfIKJkhbCvm1LkZJUXQypcT0EpGaHMFkOxj3cyG88QwnQ
 8DQfQKuWO5H7jMle+K6nuJNiEYK7TJ7NUr7IzL5ksqGtN/Vo414BFnqbx0nY57Xb
 kaYUth/Zka4eHi9wRueKXb5i/Vh2NJUbj+zdEawW0gbgIexMl5CD14LrxaNRL0xP
 a/Tf4FbG81Y/0+3/ohjE/n149UgcWC2Td/+cLvHxAfp3yFyGMQAo0sKUxOC0E4wW
 IPRfVHZIkeZgdMBUI0KNg83aABLZTtXNDKzjNTa8pCrrHbVlpg7egQaLUGgEtlK2
 fuy9gTMVISAAu309acrGcE26o6Dv9MpkLab74W98ynfNLB+WSifbA9xhD4MIeg2h
 k5OS2r7g0zoLSOhDhG0tzCvZT7Ky6bFkapWVD3ooCMgSQya13ZE=
 =t9m/
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2026-05-08-1' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Weekly fixes, lots of them but all pretty small, amdgpu and xe are the
  usual but then a large amount of fixes all over.

  core:
   - fix race condition in handle change ioctl

  fb-helper:
   - fix clipping

  rust:
   - fix unsound initialization
   - fix GEM state cleanup
   - fix wrong ARef import

  ttm:
   - update GPU MM stats on pool shrinking

  i915:
   - Re-enable ccs modifiers on dg2

  nova:
   - fix mailing list

  xe:
   - Add NULL check for media_gt in intel_hdcp_gsc_check_status
   - Fix EAGAIN sign in pf_migration_consume
   - Fix MMIO access using PF view instead of VF view during migration
   - Exclude indirect ring state page from ADS engine state size

  amdgpu:
   - GFX9 fixes
   - Hawaii SMU fixes
   - SDMA4 fix
   - GART fix
   - Userq fixes

  amdkfd:
   - GPUVM TLB flush fix
   - Hotplug fix

  radeon:
   - Hawaii SMU fixes

  bochs:
   - fix managed cleanup

  bridge:
   - tda998x: fix sparse warnings on type correctness

  etnaviv:
   - schedule armed jobs

  exynos:
   - managed bridge cleanup

  ivpu:
   - disallow reexport of GEM buffer objects

  noveau:
   - revert support for GA100

  panel:
   - boe-tv101wum-nl16: use correct MIPI_DSI mode
   - feyjang-fy07024di26a30d: fix error reporting
   - himax-hx83102: use correct MIPI_DSI mode
   - himax-hx83121a: fix error checks
   - himax-hx83121a: select DRM_DISPLAY_DSC_HELPER

  qaic:
   - fix RAS message handling

  qxl:
   - clean up polling

  sti:
   - managed bridge cleanup

* tag 'drm-fixes-2026-05-08-1' of https://gitlab.freedesktop.org/drm/kernel: (37 commits)
  drm: Set old handle to NULL before prime swap in change_handle
  drm/bochs: Drop manual put on probe error path
  drm/xe/guc: Exclude indirect ring state page from ADS engine state size
  drm/xe/pf: Fix MMIO access using PF view instead of VF view during migration
  drm/xe/pf: Fix EAGAIN sign in pf_migration_consume()
  drm/xe/hdcp: Add NULL check for media_gt in intel_hdcp_gsc_check_status()
  drm/exynos: remove bridge when component_add fails
  drm/amdgpu: nuke amdgpu_userq_fence_slab v2
  drm/amdgpu/userq: fix access to stale wptr mapping
  drm/amdkfd: Check if there are kfd porcesses using adev by kfd_processes_count
  drm/amdgpu: zero-initialize GART table on allocation
  drm/amdgpu/sdma4: replace BUG_ON with WARN_ON in fence emission
  drm/radeon: add missing revision check for CI
  drm/amdgpu/pm: align Hawaii mclk workaround with radeon
  drm/amdgpu/pm: add missing revision check for CI
  drm/amdgpu/gfx9: drop unnecessary 64-bit fence flag check in KIQ
  drm/amdkfd: Make all TLB-flushes heavy-weight
  drm/panel: himax-hx83102: restore MODE_LPM after sending disable cmds
  drm/panel: boe-tv101wum-nl6: restore MODE_LPM after sending disable cmds
  drm/panel: feiyang-fy07024di26a30d: return display-on error
  ...
This commit is contained in:
Linus Torvalds 2026-05-08 08:23:06 -07:00
commit 51d24842ac
44 changed files with 254 additions and 247 deletions

View File

@ -8199,10 +8199,9 @@ F: include/uapi/drm/nouveau_drm.h
CORE DRIVER FOR NVIDIA GPUS [RUST]
M: Danilo Krummrich <dakr@kernel.org>
M: Alexandre Courbot <acourbot@nvidia.com>
L: nouveau@lists.freedesktop.org
L: nova-gpu@lists.linux.dev
S: Supported
W: https://rust-for-linux.com/nova-gpu-driver
Q: https://patchwork.freedesktop.org/project/nouveau/
B: https://gitlab.freedesktop.org/drm/nova/-/issues
C: irc://irc.oftc.net/nouveau
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git drm-rust-next
@ -8211,10 +8210,9 @@ F: drivers/gpu/nova-core/
DRM DRIVER FOR NVIDIA GPUS [RUST]
M: Danilo Krummrich <dakr@kernel.org>
L: nouveau@lists.freedesktop.org
L: nova-gpu@lists.linux.dev
S: Supported
W: https://rust-for-linux.com/nova-gpu-driver
Q: https://patchwork.freedesktop.org/project/nouveau/
B: https://gitlab.freedesktop.org/drm/nova/-/issues
C: irc://irc.oftc.net/nouveau
T: git https://gitlab.freedesktop.org/drm/rust/kernel.git drm-rust-next

View File

@ -537,6 +537,26 @@ static const struct file_operations ivpu_fops = {
#endif
};
static int ivpu_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv,
u32 handle, u32 flags, int *prime_fd)
{
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(file_priv, handle);
if (!obj)
return -ENOENT;
if (drm_gem_is_imported(obj)) {
/* Do not allow re-exporting */
drm_gem_object_put(obj);
return -EOPNOTSUPP;
}
drm_gem_object_put(obj);
return drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
}
static const struct drm_driver driver = {
.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
@ -545,6 +565,7 @@ static const struct drm_driver driver = {
.gem_create_object = ivpu_gem_create_object,
.gem_prime_import = ivpu_gem_prime_import,
.prime_handle_to_fd = ivpu_gem_prime_handle_to_fd,
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),

View File

@ -497,11 +497,11 @@ static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg)
qdev->ce_count++;
break;
case UE:
if (qdev->ce_count != UINT_MAX)
if (qdev->ue_count != UINT_MAX)
qdev->ue_count++;
break;
case UE_NF:
if (qdev->ce_count != UINT_MAX)
if (qdev->ue_nf_count != UINT_MAX)
qdev->ue_nf_count++;
break;
default:

View File

@ -3149,11 +3149,7 @@ static int __init amdgpu_init(void)
r = amdgpu_sync_init();
if (r)
goto error_sync;
r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
return r;
amdgpu_register_atpx_handler();
amdgpu_acpi_detect();
@ -3161,7 +3157,7 @@ static int __init amdgpu_init(void)
/* Ignore KFD init failures when CONFIG_HSA_AMD is not set. */
r = amdgpu_amdkfd_init();
if (r && r != -ENOENT)
goto error_fence;
goto error_fini_sync;
if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) {
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
@ -3172,10 +3168,8 @@ static int __init amdgpu_init(void)
/* let modprobe override vga console setting */
return pci_register_driver(&amdgpu_kms_pci_driver);
error_fence:
error_fini_sync:
amdgpu_sync_fini();
error_sync:
return r;
}
@ -3186,7 +3180,6 @@ static void __exit amdgpu_exit(void)
amdgpu_unregister_atpx_handler();
amdgpu_acpi_release();
amdgpu_sync_fini();
amdgpu_userq_fence_slab_fini();
mmu_notifier_synchronize();
amdgpu_xcp_drv_release();
}

View File

@ -262,12 +262,19 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
*/
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{
int r;
if (adev->gart.bo != NULL)
return 0;
return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
NULL, (void *)&adev->gart.ptr);
r = amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
NULL, (void *)&adev->gart.ptr);
if (r)
return r;
memset_io(adev->gart.ptr, adev->gart.gart_pte_flags, adev->gart.table_size);
return 0;
}
/**

View File

@ -32,29 +32,9 @@
#include "amdgpu.h"
#include "amdgpu_userq_fence.h"
static const struct dma_fence_ops amdgpu_userq_fence_ops;
static struct kmem_cache *amdgpu_userq_fence_slab;
#define AMDGPU_USERQ_MAX_HANDLES (1U << 16)
int amdgpu_userq_fence_slab_init(void)
{
amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
sizeof(struct amdgpu_userq_fence),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!amdgpu_userq_fence_slab)
return -ENOMEM;
return 0;
}
void amdgpu_userq_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(amdgpu_userq_fence_slab);
}
static const struct dma_fence_ops amdgpu_userq_fence_ops;
static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
{
@ -231,7 +211,7 @@ void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
{
*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
*userq_fence = kmalloc(sizeof(**userq_fence), GFP_KERNEL);
return *userq_fence ? 0 : -ENOMEM;
}
@ -342,7 +322,7 @@ static void amdgpu_userq_fence_free(struct rcu_head *rcu)
amdgpu_userq_fence_driver_put(fence_drv);
kvfree(userq_fence->fence_drv_array);
kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
kfree(userq_fence);
}
static void amdgpu_userq_fence_release(struct dma_fence *f)
@ -545,7 +525,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
if (r) {
mutex_unlock(&userq_mgr->userq_mutex);
kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
kfree(userq_fence);
goto put_gobj_write;
}

View File

@ -58,9 +58,6 @@ struct amdgpu_userq_fence_driver {
char timeline_name[TASK_COMM_LEN];
};
int amdgpu_userq_fence_slab_init(void);
void amdgpu_userq_fence_slab_fini(void);
void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,

View File

@ -5660,9 +5660,6 @@ static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
{
struct amdgpu_device *adev = ring->adev;
/* we only allocate 32bit for each seq wb address */
BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
/* write fence seq to the "addr" */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |

View File

@ -30,34 +30,6 @@
#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
static int
mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
{
int ret;
ret = amdgpu_bo_reserve(bo, true);
if (ret) {
DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
goto err_reserve_bo_failed;
}
ret = amdgpu_ttm_alloc_gart(&bo->tbo);
if (ret) {
DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
goto err_map_bo_gart_failed;
}
amdgpu_bo_unreserve(bo);
bo = amdgpu_bo_ref(bo);
return 0;
err_map_bo_gart_failed:
amdgpu_bo_unreserve(bo);
err_reserve_bo_failed:
return ret;
}
static int
mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
struct amdgpu_userq_mgr *uq_mgr,
@ -65,55 +37,62 @@ mes_userq_create_wptr_mapping(struct amdgpu_device *adev,
uint64_t wptr)
{
struct amdgpu_bo_va_mapping *wptr_mapping;
struct amdgpu_vm *wptr_vm;
struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
struct amdgpu_bo *obj;
struct amdgpu_vm *vm = queue->vm;
struct drm_exec exec;
int ret;
wptr_vm = queue->vm;
ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
if (ret)
return ret;
wptr &= AMDGPU_GMC_HOLE_MASK;
wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
amdgpu_bo_unreserve(wptr_vm->root.bo);
if (!wptr_mapping) {
DRM_ERROR("Failed to lookup wptr bo\n");
return -EINVAL;
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 2);
drm_exec_until_all_locked(&exec) {
ret = amdgpu_vm_lock_pd(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto fail_lock;
wptr_mapping = amdgpu_vm_bo_lookup_mapping(vm, wptr >> PAGE_SHIFT);
if (!wptr_mapping) {
ret = -EINVAL;
goto fail_lock;
}
obj = wptr_mapping->bo_va->base.bo;
ret = drm_exec_lock_obj(&exec, &obj->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(ret))
goto fail_lock;
}
wptr_obj->obj = wptr_mapping->bo_va->base.bo;
wptr_obj->obj = amdgpu_bo_ref(wptr_mapping->bo_va->base.bo);
if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
return -EINVAL;
}
ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
if (ret) {
DRM_ERROR("Failed to map wptr bo to GART\n");
return ret;
}
ret = amdgpu_bo_reserve(wptr_obj->obj, true);
if (ret) {
DRM_ERROR("Failed to reserve wptr bo\n");
return ret;
ret = -EINVAL;
goto fail_map;
}
/* TODO use eviction fence instead of pinning. */
ret = amdgpu_bo_pin(wptr_obj->obj, AMDGPU_GEM_DOMAIN_GTT);
if (ret) {
drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin wptr bo\n");
goto unresv_bo;
DRM_ERROR("Failed to pin wptr bo. ret %d\n", ret);
goto fail_map;
}
ret = amdgpu_ttm_alloc_gart(&wptr_obj->obj->tbo);
if (ret) {
DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
goto fail_map;
}
queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset(wptr_obj->obj);
amdgpu_bo_unreserve(wptr_obj->obj);
drm_exec_fini(&exec);
return 0;
unresv_bo:
amdgpu_bo_unreserve(wptr_obj->obj);
fail_map:
amdgpu_bo_unref(&wptr_obj->obj);
fail_lock:
drm_exec_fini(&exec);
return ret;
}

View File

@ -889,7 +889,7 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
/* write the fence */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
/* zero in first two bits */
BUG_ON(addr & 0x3);
WARN_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, lower_32_bits(seq));
@ -899,7 +899,7 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
addr += 4;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
/* zero in first two bits */
BUG_ON(addr & 0x3);
WARN_ON(addr & 0x3);
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(seq));

View File

@ -1360,7 +1360,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
if (WARN_ON_ONCE(!peer_pdd))
continue;
kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
kfd_flush_tlb(peer_pdd);
}
kfree(devices_arr);
@ -1455,7 +1455,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
if (WARN_ON_ONCE(!peer_pdd))
continue;
if (flush_tlb)
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
kfd_flush_tlb(peer_pdd);
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);

View File

@ -1737,37 +1737,6 @@ bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entr
return false;
}
/* check if there is kfd process still uses adev */
static bool kgd2kfd_check_device_idle(struct amdgpu_device *adev)
{
struct kfd_process *p;
struct hlist_node *p_temp;
unsigned int temp;
struct kfd_node *dev;
mutex_lock(&kfd_processes_mutex);
if (hash_empty(kfd_processes_table)) {
mutex_unlock(&kfd_processes_mutex);
return true;
}
/* check if there is device still use adev */
hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) {
for (int i = 0; i < p->n_pdds; i++) {
dev = p->pdds[i]->dev;
if (dev->adev == adev) {
mutex_unlock(&kfd_processes_mutex);
return false;
}
}
}
mutex_unlock(&kfd_processes_mutex);
return true;
}
/** kgd2kfd_teardown_processes - gracefully tear down existing
* kfd processes that use adev
*
@ -1800,7 +1769,7 @@ void kgd2kfd_teardown_processes(struct amdgpu_device *adev)
mutex_unlock(&kfd_processes_mutex);
/* wait all kfd processes use adev terminate */
while (!kgd2kfd_check_device_idle(adev))
while (!!atomic_read(&adev->kfd.dev->kfd_processes_count))
cond_resched();
}

View File

@ -572,7 +572,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
qpd->vmid,
qpd->page_table_base);
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
kfd_flush_tlb(qpd_to_pdd(qpd));
if (dqm->dev->kfd2kgd->set_scratch_backing_va)
dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
@ -610,7 +610,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
if (flush_texture_cache_nocpsch(q->device, qpd))
dev_err(dev, "Failed to flush TC\n");
kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
kfd_flush_tlb(qpd_to_pdd(qpd));
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
@ -1284,7 +1284,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
dqm->dev->adev,
qpd->vmid,
qpd->page_table_base);
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
kfd_flush_tlb(pdd);
}
/* Take a safe reference to the mm_struct, which may otherwise

View File

@ -1554,13 +1554,13 @@ void kfd_signal_reset_event(struct kfd_node *dev);
void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid);
void kfd_signal_process_terminate_event(struct kfd_process *p);
static inline void kfd_flush_tlb(struct kfd_process_device *pdd,
enum TLB_FLUSH_TYPE type)
static inline void kfd_flush_tlb(struct kfd_process_device *pdd)
{
struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
amdgpu_vm_flush_compute_tlb(adev, vm, TLB_FLUSH_HEAVYWEIGHT,
pdd->dev->xcc_mask);
}
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)

View File

@ -1424,7 +1424,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
if (r)
break;
}
kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
kfd_flush_tlb(pdd);
}
return r;
@ -1571,7 +1571,7 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
}
}
kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
kfd_flush_tlb(pdd);
}
return r;

View File

@ -1333,12 +1333,13 @@ static int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
dev_id = adev->pdev->device;
if ((dpm_table->mclk_table.count >= 2)
&& ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
smu_data->smc_state_table.MemoryLevel[1].MinVddci =
smu_data->smc_state_table.MemoryLevel[0].MinVddci;
smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
if ((dpm_table->mclk_table.count >= 2) &&
((dev_id == 0x67B0) || (dev_id == 0x67B1)) &&
(adev->pdev->revision == 0)) {
smu_data->smc_state_table.MemoryLevel[1].MinVddc =
smu_data->smc_state_table.MemoryLevel[0].MinVddc;
smu_data->smc_state_table.MemoryLevel[1].MinVddcPhases =
smu_data->smc_state_table.MemoryLevel[0].MinVddcPhases;
}
smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);

View File

@ -1293,7 +1293,7 @@ static const struct drm_edid *tda998x_edid_read(struct tda998x_priv *priv,
* can't handle signals gracefully.
*/
if (tda998x_edid_delay_wait(priv))
return 0;
return NULL;
if (priv->rev == TDA19988)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
@ -1762,7 +1762,7 @@ static const struct drm_bridge_funcs tda998x_bridge_funcs = {
static int tda998x_get_audio_ports(struct tda998x_priv *priv,
struct device_node *np)
{
const u32 *port_data;
const __be32 *port_data;
u32 size;
int i;

View File

@ -490,7 +490,7 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off,
* the number of horizontal pixels that need an update.
*/
off_t bit_off = (off % line_length) * 8;
off_t bit_end = (end % line_length) * 8;
off_t bit_end = bit_off + len * 8;
x1 = bit_off / info->var.bits_per_pixel;
x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel);

View File

@ -1019,7 +1019,7 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_change_handle *args = data;
struct drm_gem_object *obj;
struct drm_gem_object *obj, *idrobj;
int handle, ret;
if (!drm_core_check_feature(dev, DRIVER_GEM))
@ -1042,8 +1042,29 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
mutex_lock(&file_priv->prime.lock);
spin_lock(&file_priv->table_lock);
/* When create_tail allocs an obj idr, it needs to first alloc as NULL,
* then later replace with the correct object. This is not necessary
* here, because the only operations that could race are drm_prime
* bookkeeping, and we hold the prime lock.
*/
ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1,
GFP_NOWAIT);
if (ret < 0) {
spin_unlock(&file_priv->table_lock);
goto out_unlock;
}
idrobj = idr_replace(&file_priv->object_idr, NULL, handle);
if (idrobj != obj) {
idr_replace(&file_priv->object_idr, idrobj, handle);
idr_remove(&file_priv->object_idr, args->new_handle);
spin_unlock(&file_priv->table_lock);
ret = -ENOENT;
goto out_unlock;
}
spin_unlock(&file_priv->table_lock);
if (ret < 0)
@ -1055,6 +1076,8 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
if (ret < 0) {
spin_lock(&file_priv->table_lock);
idr_remove(&file_priv->object_idr, handle);
idrobj = idr_replace(&file_priv->object_idr, obj, handle);
WARN_ON(idrobj != NULL);
spin_unlock(&file_priv->table_lock);
goto out_unlock;
}

View File

@ -116,16 +116,18 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
*/
mutex_lock(&gpu->sched_lock);
ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
NULL, xa_limit_32b, &gpu->next_user_fence,
GFP_KERNEL);
if (ret < 0)
goto out_unlock;
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
submit->out_fence, xa_limit_32b,
&gpu->next_user_fence, GFP_KERNEL);
if (ret < 0) {
drm_sched_job_cleanup(&submit->sched_job);
goto out_unlock;
}
xa_store(&gpu->user_fences, submit->out_fence_id,
submit->out_fence, GFP_KERNEL);
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);

View File

@ -423,7 +423,9 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->bridge.of_node = dev->of_node;
drm_bridge_add(&mic->bridge);
ret = devm_drm_bridge_add(dev, &mic->bridge);
if (ret)
goto err;
pm_runtime_enable(dev);
@ -443,12 +445,8 @@ static int exynos_mic_probe(struct platform_device *pdev)
static void exynos_mic_remove(struct platform_device *pdev)
{
struct exynos_mic *mic = platform_get_drvdata(pdev);
component_del(&pdev->dev, &exynos_mic_component_ops);
pm_runtime_disable(&pdev->dev);
drm_bridge_remove(&mic->bridge);
}
static const struct of_device_id exynos_mic_of_match[] = {

View File

@ -750,9 +750,8 @@ static bool has_auxccs(struct drm_device *drm)
{
struct drm_i915_private *i915 = to_i915(drm);
return IS_GRAPHICS_VER(i915, 9, 12) ||
IS_ALDERLAKE_P(i915) ||
IS_METEORLAKE(i915);
return IS_GRAPHICS_VER(i915, 9, 12) &&
!HAS_FLAT_CCS(i915);
}
static bool has_fenced_regions(struct drm_device *drm)

View File

@ -2513,6 +2513,7 @@ static const struct nvkm_device_chip
nv170_chipset = {
.name = "GA100",
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
.fb = { 0x00000001, ga100_fb_new },
@ -2529,7 +2530,6 @@ nv170_chipset = {
.vfn = { 0x00000001, ga100_vfn_new },
.ce = { 0x000003ff, ga100_ce_new },
.fifo = { 0x00000001, ga100_fifo_new },
.sec2 = { 0x00000001, tu102_sec2_new },
};
static const struct nvkm_device_chip
@ -3341,7 +3341,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x170: device->chip = &nv170_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x173: device->chip = &nv173_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
@ -3361,6 +3360,14 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x1b6: device->chip = &nv1b6_chipset; break;
case 0x1b7: device->chip = &nv1b7_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
case 0x170: device->chip = &nv170_chipset; break;
default:
break;
}
}
if (!device->chip) {
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;

View File

@ -41,11 +41,15 @@ ga100_gsp_flcn = {
static const struct nvkm_gsp_func
ga100_gsp = {
.flcn = &ga100_gsp_flcn,
.fwsec = &tu102_gsp_fwsec,
.sig_section = ".fwsignature_ga100",
.booter.ctor = tu102_gsp_booter_ctor,
.fwsec_sb.ctor = tu102_gsp_fwsec_sb_ctor,
.fwsec_sb.dtor = tu102_gsp_fwsec_sb_dtor,
.dtor = r535_gsp_dtor,
.oneinit = tu102_gsp_oneinit,
.init = tu102_gsp_init,

View File

@ -318,13 +318,8 @@ tu102_gsp_oneinit(struct nvkm_gsp *gsp)
if (ret)
return ret;
/*
* Calculate FB layout. FRTS is a memory region created by the FWSEC-FRTS firmware.
* FWSEC comes from VBIOS. So on systems with no VBIOS (e.g. GA100), the FRTS does
* not exist. Therefore, use the existence of VBIOS to determine whether to reserve
* an FRTS region.
*/
gsp->fb.wpr2.frts.size = device->bios ? 0x100000 : 0;
/* Calculate FB layout. */
gsp->fb.wpr2.frts.size = 0x100000;
gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
@ -348,12 +343,9 @@ tu102_gsp_oneinit(struct nvkm_gsp *gsp)
if (ret)
return ret;
/* Only boot FWSEC-FRTS if it actually exists */
if (gsp->fb.wpr2.frts.size) {
ret = nvkm_gsp_fwsec_frts(gsp);
if (WARN_ON(ret))
return ret;
}
ret = nvkm_gsp_fwsec_frts(gsp);
if (WARN_ON(ret))
return ret;
/* Reset GSP into RISC-V mode. */
ret = gsp->func->reset(gsp);

View File

@ -208,6 +208,7 @@ config DRM_PANEL_HIMAX_HX83121A
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select DRM_DISPLAY_DSC_HELPER
select DRM_KMS_HELPER
help
Say Y here if you want to enable support for Himax HX83121A-based

View File

@ -1324,6 +1324,8 @@ static int boe_panel_disable(struct drm_panel *panel)
mipi_dsi_dcs_set_display_off_multi(&ctx);
mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
boe->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
mipi_dsi_msleep(&ctx, 150);
return ctx.accum_err;

View File

@ -98,9 +98,7 @@ static int feiyang_enable(struct drm_panel *panel)
/* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
msleep(200);
mipi_dsi_dcs_set_display_on(ctx->dsi);
return 0;
return mipi_dsi_dcs_set_display_on(ctx->dsi);
}
static int feiyang_disable(struct drm_panel *panel)

View File

@ -937,6 +937,8 @@ static int hx83102_disable(struct drm_panel *panel)
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
mipi_dsi_msleep(&dsi_ctx, 150);
return dsi_ctx.accum_err;

View File

@ -596,8 +596,8 @@ static int himax_probe(struct mipi_dsi_device *dsi)
ctx = devm_drm_panel_alloc(dev, struct himax, panel, &himax_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
if (!ctx)
return -ENOMEM;
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(himax_supplies),

View File

@ -118,12 +118,13 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Complete initialization. */
ret = drm_dev_register(&qdev->ddev, ent->driver_data);
if (ret)
goto modeset_cleanup;
goto poll_fini;
drm_client_setup(&qdev->ddev, NULL);
return 0;
modeset_cleanup:
poll_fini:
drm_kms_helper_poll_fini(&qdev->ddev);
qxl_modeset_fini(qdev);
unload:
qxl_device_fini(qdev);
@ -154,6 +155,7 @@ qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_kms_helper_poll_fini(dev);
drm_dev_unregister(dev);
drm_atomic_helper_shutdown(dev);
if (pci_is_vga(pdev) && pdev->revision < 5)

View File

@ -2461,7 +2461,8 @@ static void ci_register_patching_mc_arb(struct radeon_device *rdev,
if (patch &&
((rdev->pdev->device == 0x67B0) ||
(rdev->pdev->device == 0x67B1))) {
(rdev->pdev->device == 0x67B1)) &&
(rdev->pdev->revision == 0)) {
if ((memory_clock > 100000) && (memory_clock <= 125000)) {
tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
*dram_timimg2 &= ~0x00ff0000;
@ -3304,7 +3305,8 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
if ((dpm_table->mclk_table.count >= 2) &&
((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1)) &&
(rdev->pdev->revision == 0)) {
pi->smc_state_table.MemoryLevel[1].MinVddc =
pi->smc_state_table.MemoryLevel[0].MinVddc;
pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
@ -4493,7 +4495,8 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
if (patch &&
((rdev->pdev->device == 0x67B0) ||
(rdev->pdev->device == 0x67B1))) {
(rdev->pdev->device == 0x67B1)) &&
(rdev->pdev->revision == 0)) {
for (i = 0; i < table->last; i++) {
if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL;

View File

@ -741,6 +741,7 @@ static int sti_hda_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_hda *hda;
struct resource *res;
int ret;
DRM_INFO("%s\n", __func__);
@ -779,7 +780,9 @@ static int sti_hda_probe(struct platform_device *pdev)
return PTR_ERR(hda->clk_hddac);
}
drm_bridge_add(&hda->bridge);
ret = devm_drm_bridge_add(dev, &hda->bridge);
if (ret)
return ret;
platform_set_drvdata(pdev, hda);
@ -788,10 +791,7 @@ static int sti_hda_probe(struct platform_device *pdev)
static void sti_hda_remove(struct platform_device *pdev)
{
struct sti_hda *hda = platform_get_drvdata(pdev);
component_del(&pdev->dev, &sti_hda_ops);
drm_bridge_remove(&hda->bridge);
}
static const struct of_device_id hda_of_match[] = {

View File

@ -761,25 +761,21 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent
ret = pcim_enable_device(pdev);
if (ret)
goto err_free_dev;
return ret;
pci_set_drvdata(pdev, dev);
ret = bochs_load(bochs);
if (ret)
goto err_free_dev;
return ret;
ret = drm_dev_register(dev, 0);
if (ret)
goto err_free_dev;
return ret;
drm_client_setup(dev, NULL);
return ret;
err_free_dev:
drm_dev_put(dev);
return ret;
}
static void bochs_pci_remove(struct pci_dev *pdev)

View File

@ -206,6 +206,14 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
return NULL;
}
static void __free_pages_gpu_account(struct page *p, unsigned int order,
bool reclaim)
{
mod_lruvec_page_state(p, reclaim ? NR_GPU_RECLAIM : NR_GPU_ACTIVE,
-(1 << order));
__free_pages(p, order);
}
/* Reset the caching and pages of size 1 << order */
static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
unsigned int order, struct page *p, bool reclaim)
@ -223,9 +231,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
#endif
if (!pool || !ttm_pool_uses_dma_alloc(pool)) {
mod_lruvec_page_state(p, reclaim ? NR_GPU_RECLAIM : NR_GPU_ACTIVE,
-(1 << order));
__free_pages(p, order);
__free_pages_gpu_account(p, order, reclaim);
return;
}
@ -606,7 +612,7 @@ static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
*/
ttm_pool_split_for_swap(restore->pool, p);
copy_highpage(restore->alloced_page + i, p);
__free_pages(p, 0);
__free_pages_gpu_account(p, 0, false);
}
restore->restored_pages++;
@ -1068,7 +1074,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
if (flags->purge) {
shrunken += num_pages;
page->private = 0;
__free_pages(page, order);
__free_pages_gpu_account(page, order, false);
memset(tt->pages + i, 0,
num_pages * sizeof(*tt->pages));
}
@ -1109,7 +1115,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
}
handle = shandle;
tt->pages[i] = ttm_backup_handle_to_page_ptr(handle);
put_page(page);
__free_pages_gpu_account(page, 0, false);
shrunken++;
}

View File

@ -37,9 +37,17 @@ static bool intel_hdcp_gsc_check_status(struct drm_device *drm)
struct xe_device *xe = to_xe_device(drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
struct xe_gsc *gsc;
if (!gsc || !xe_uc_fw_is_available(&gsc->fw)) {
if (!gt) {
drm_dbg_kms(&xe->drm,
"not checking GSC status for HDCP2.x: media GT not present or disabled\n");
return false;
}
gsc = &gt->uc.gsc;
if (!xe_uc_fw_is_available(&gsc->fw)) {
drm_dbg_kms(&xe->drm,
"GSC Components not ready for HDCP2.x\n");
return false;

View File

@ -385,10 +385,10 @@ static int pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid, void *buf
if (xe_gt_is_media_type(gt))
for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
regs[n] = xe_mmio_read32(&gt->mmio, MED_VF_SW_FLAG(n));
regs[n] = xe_mmio_read32(&mmio, MED_VF_SW_FLAG(n));
else
for (n = 0; n < VF_SW_FLAG_COUNT; n++)
regs[n] = xe_mmio_read32(&gt->mmio, VF_SW_FLAG(n));
regs[n] = xe_mmio_read32(&mmio, VF_SW_FLAG(n));
return 0;
}
@ -407,10 +407,10 @@ static int pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
if (xe_gt_is_media_type(gt))
for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
xe_mmio_write32(&gt->mmio, MED_VF_SW_FLAG(n), regs[n]);
xe_mmio_write32(&mmio, MED_VF_SW_FLAG(n), regs[n]);
else
for (n = 0; n < VF_SW_FLAG_COUNT; n++)
xe_mmio_write32(&gt->mmio, VF_SW_FLAG(n), regs[n]);
xe_mmio_write32(&mmio, VF_SW_FLAG(n), regs[n]);
return 0;
}

View File

@ -512,12 +512,9 @@ static void guc_golden_lrc_init(struct xe_guc_ads *ads)
* that starts after the execlists LRC registers. This is
* required to allow the GuC to restore just the engine state
* when a watchdog reset occurs.
* We calculate the engine state size by removing the size of
* what comes before it in the context image (which is identical
* on all engines).
*/
ads_blob_write(ads, ads.eng_state_size[guc_class],
real_size - xe_lrc_skip_size(xe));
xe_lrc_engine_state_size(gt, class));
ads_blob_write(ads, ads.golden_context_lrca[guc_class],
addr_ggtt);

View File

@ -746,9 +746,16 @@ size_t xe_lrc_reg_size(struct xe_device *xe)
return 80 * sizeof(u32);
}
size_t xe_lrc_skip_size(struct xe_device *xe)
/**
* xe_lrc_engine_state_size() - Get size of the engine state within LRC
* @gt: the &xe_gt struct instance
* @class: Hardware engine class
*
* Returns: Size of the engine state
*/
size_t xe_lrc_engine_state_size(struct xe_gt *gt, enum xe_engine_class class)
{
return LRC_PPHWSP_SIZE + xe_lrc_reg_size(xe);
return xe_gt_lrc_hang_replay_size(gt, class) - xe_lrc_reg_size(gt_to_xe(gt));
}
static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc)

View File

@ -130,7 +130,7 @@ u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc);
struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc);
size_t xe_lrc_reg_size(struct xe_device *xe);
size_t xe_lrc_skip_size(struct xe_device *xe);
size_t xe_lrc_engine_state_size(struct xe_gt *gt, enum xe_engine_class class);
void xe_lrc_dump_default(struct drm_printer *p,
struct xe_gt *gt,

View File

@ -149,10 +149,11 @@ pf_migration_consume(struct xe_device *xe, unsigned int vfid)
for_each_gt(gt, xe, gt_id) {
data = xe_gt_sriov_pf_migration_save_consume(gt, vfid);
if (data && PTR_ERR(data) != EAGAIN)
if (!data)
continue;
if (!IS_ERR(data) || PTR_ERR(data) != -EAGAIN)
return data;
if (PTR_ERR(data) == -EAGAIN)
more_data = true;
more_data = true;
}
if (!more_data)

View File

@ -119,13 +119,20 @@ pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<A
// compatible `Layout`.
let layout = Kmalloc::aligned_layout(Layout::new::<Self>());
// Use a temporary vtable without a `release` callback until `data` is initialized, so
// init failure can release the DRM device without dropping uninitialized fields.
let alloc_vtable = bindings::drm_driver {
release: None,
..Self::VTABLE
};
// SAFETY:
// - `VTABLE`, as a `const` is pinned to the read-only section of the compilation,
// - `alloc_vtable` reference remains valid until no longer used,
// - `dev` is valid by its type invarants,
let raw_drm: *mut Self = unsafe {
bindings::__drm_dev_alloc(
dev.as_raw(),
&Self::VTABLE,
&alloc_vtable,
layout.size(),
mem::offset_of!(Self, dev),
)
@ -133,6 +140,10 @@ pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<A
.cast();
let raw_drm = NonNull::new(from_err_ptr(raw_drm)?).ok_or(ENOMEM)?;
// SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was
// successful.
let drm_dev = unsafe { Self::into_drm_device(raw_drm) };
// SAFETY: `raw_drm` is a valid pointer to `Self`.
let raw_data = unsafe { ptr::addr_of_mut!((*raw_drm.as_ptr()).data) };
@ -140,15 +151,14 @@ pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<A
// - `raw_data` is a valid pointer to uninitialized memory.
// - `raw_data` will not move until it is dropped.
unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| {
// SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was
// successful.
let drm_dev = unsafe { Self::into_drm_device(raw_drm) };
// SAFETY: `__drm_dev_alloc()` was successful, hence `drm_dev` must be valid and the
// refcount must be non-zero.
unsafe { bindings::drm_dev_put(drm_dev) };
})?;
// SAFETY: `drm_dev` is still private to this function.
unsafe { (*drm_dev).driver = const { &Self::VTABLE } };
// SAFETY: The reference count is one, and now we take ownership of that reference as a
// `drm::Device`.
Ok(unsafe { ARef::from_raw(raw_drm) })

View File

@ -277,8 +277,17 @@ pub fn new(dev: &drm::Device<T::Driver>, size: usize, args: T::Args) -> Result<A
// SAFETY: `obj.as_raw()` is guaranteed to be valid by the initialization above.
unsafe { (*obj.as_raw()).funcs = &Self::OBJECT_FUNCS };
// SAFETY: The arguments are all valid per the type invariants.
to_result(unsafe { bindings::drm_gem_object_init(dev.as_raw(), obj.obj.get(), size) })?;
if let Err(err) =
// SAFETY: The arguments are all valid per the type invariants.
to_result(unsafe {
bindings::drm_gem_object_init(dev.as_raw(), obj.obj.get(), size)
})
{
// SAFETY: `drm_gem_object_init()` initializes the private GEM object state before
// failing, so `drm_gem_private_object_fini()` is the matching cleanup.
unsafe { bindings::drm_gem_private_object_fini(obj.obj.get()) };
return Err(err);
}
// SAFETY: We will never move out of `Self` as `ARef<Self>` is always treated as pinned.
let ptr = KBox::into_raw(unsafe { Pin::into_inner_unchecked(obj) });

View File

@ -19,10 +19,8 @@
},
error::to_result,
prelude::*,
types::{
ARef,
Opaque, //
}, //
sync::aref::ARef,
types::Opaque, //
};
use core::{
ops::{