mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
drm/xe: Drop registration of guc_submit_wedged_fini from xe_guc_submit_wedge()
xe_guc_submit_wedge() runs in the DMA-fence signaling path, where
GFP_KERNEL memory allocations are not permitted. However, registering
guc_submit_wedged_fini via drmm_add_action_or_reset() triggers such an
allocation.
Avoid this by moving the logic from guc_submit_wedged_fini() into
guc_submit_fini(), where wedged exec queue references are dropped during
normal teardown.
Fixes: 8ed9aaae39 ("drm/xe: Force wedged state and block GT reset upon any GPU hang")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patch.msgid.link/20260326210116.202585-3-matthew.brost@intel.com
(cherry picked from commit 4a706bd93c4fb156a13477e26ffdf2e633edeb10)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
254f49634e
commit
a0fc362f09
|
|
@ -259,24 +259,12 @@ static void guc_submit_sw_fini(struct drm_device *drm, void *arg)
|
|||
}
|
||||
|
||||
static void guc_submit_fini(void *arg)
|
||||
{
|
||||
struct xe_guc *guc = arg;
|
||||
|
||||
/* Forcefully kill any remaining exec queues */
|
||||
xe_guc_ct_stop(&guc->ct);
|
||||
guc_submit_reset_prepare(guc);
|
||||
xe_guc_softreset(guc);
|
||||
xe_guc_submit_stop(guc);
|
||||
xe_uc_fw_sanitize(&guc->fw);
|
||||
xe_guc_submit_pause_abort(guc);
|
||||
}
|
||||
|
||||
static void guc_submit_wedged_fini(void *arg)
|
||||
{
|
||||
struct xe_guc *guc = arg;
|
||||
struct xe_exec_queue *q;
|
||||
unsigned long index;
|
||||
|
||||
/* Drop any wedged queue refs */
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
|
||||
if (exec_queue_wedged(q)) {
|
||||
|
|
@ -286,6 +274,14 @@ static void guc_submit_wedged_fini(void *arg)
|
|||
}
|
||||
}
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
|
||||
/* Forcefully kill any remaining exec queues */
|
||||
xe_guc_ct_stop(&guc->ct);
|
||||
guc_submit_reset_prepare(guc);
|
||||
xe_guc_softreset(guc);
|
||||
xe_guc_submit_stop(guc);
|
||||
xe_uc_fw_sanitize(&guc->fw);
|
||||
xe_guc_submit_pause_abort(guc);
|
||||
}
|
||||
|
||||
static const struct xe_exec_queue_ops guc_exec_queue_ops;
|
||||
|
|
@ -1320,10 +1316,8 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
|
|||
void xe_guc_submit_wedge(struct xe_guc *guc)
|
||||
{
|
||||
struct xe_device *xe = guc_to_xe(guc);
|
||||
struct xe_gt *gt = guc_to_gt(guc);
|
||||
struct xe_exec_queue *q;
|
||||
unsigned long index;
|
||||
int err;
|
||||
|
||||
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
|
||||
|
||||
|
|
@ -1335,15 +1329,6 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
|
|||
return;
|
||||
|
||||
if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) {
|
||||
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
|
||||
guc_submit_wedged_fini, guc);
|
||||
if (err) {
|
||||
xe_gt_err(gt, "Failed to register clean-up on wedged.mode=%s; "
|
||||
"Although device is wedged.\n",
|
||||
xe_wedged_mode_to_string(XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET));
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
|
||||
if (xe_exec_queue_get_unless_zero(q))
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user