From: Lizhi Hou <lizhi.hou@amd.com>
To: Mario Limonciello <mario.limonciello@amd.com>,
<ogabbay@kernel.org>, <quic_jhugo@quicinc.com>,
<dri-devel@lists.freedesktop.org>,
<maciej.falkowski@linux.intel.com>
Cc: <linux-kernel@vger.kernel.org>, <max.zhen@amd.com>,
<sonal.santan@amd.com>
Subject: Re: [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume
Date: Wed, 11 Feb 2026 11:29:54 -0800 [thread overview]
Message-ID: <34ab9aee-30b4-d4b2-c23f-cd97da35f133@amd.com> (raw)
In-Reply-To: <86576aff-a280-4529-9976-da87f5b67d4a@amd.com>
On 2/11/26 11:23, Mario Limonciello wrote:
> On 2/10/26 1:16 PM, Lizhi Hou wrote:
>> When an application issues a query IOCTL while auto suspend is running,
>> a deadlock can occur. The query path holds dev_lock and then calls
>> pm_runtime_resume_and_get(), which waits for the ongoing suspend to
>> complete. Meanwhile, the suspend callback attempts to acquire dev_lock
>> and blocks, resulting in a deadlock.
>>
>> Fix this by releasing dev_lock before calling
>> pm_runtime_resume_and_get()
>> and reacquiring it after the call completes. Also acquire dev_lock in
>> the
>> resume callback to keep the locking consistent.
>>
>> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
> No Fixes tag?
Oh, forgot that. Will add and post V2 patch
Thanks,
Lizhi
>
>> ---
>> drivers/accel/amdxdna/aie2_ctx.c | 4 ++--
>> drivers/accel/amdxdna/aie2_pci.c | 7 +++----
>> drivers/accel/amdxdna/aie2_pm.c | 2 +-
>> drivers/accel/amdxdna/amdxdna_ctx.c | 19 +++++++------------
>> drivers/accel/amdxdna/amdxdna_pm.c | 2 ++
>> drivers/accel/amdxdna/amdxdna_pm.h | 11 +++++++++++
>> 6 files changed, 26 insertions(+), 19 deletions(-)
>>
>> diff --git a/drivers/accel/amdxdna/aie2_ctx.c
>> b/drivers/accel/amdxdna/aie2_ctx.c
>> index 37d05f2e986f..58e146172b61 100644
>> --- a/drivers/accel/amdxdna/aie2_ctx.c
>> +++ b/drivers/accel/amdxdna/aie2_ctx.c
>> @@ -629,7 +629,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
>> goto free_entity;
>> }
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto free_col_list;
>> @@ -760,7 +760,7 @@ static int aie2_hwctx_cu_config(struct
>> amdxdna_hwctx *hwctx, void *buf, u32 size
>> if (!hwctx->cus)
>> return -ENOMEM;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto free_cus;
>> diff --git a/drivers/accel/amdxdna/aie2_pci.c
>> b/drivers/accel/amdxdna/aie2_pci.c
>> index f70ccf0f3c01..5b326e4610e6 100644
>> --- a/drivers/accel/amdxdna/aie2_pci.c
>> +++ b/drivers/accel/amdxdna/aie2_pci.c
>> @@ -451,7 +451,6 @@ static int aie2_hw_suspend(struct amdxdna_dev *xdna)
>> {
>> struct amdxdna_client *client;
>> - guard(mutex)(&xdna->dev_lock);
>> list_for_each_entry(client, &xdna->client_list, node)
>> aie2_hwctx_suspend(client);
>> @@ -951,7 +950,7 @@ static int aie2_get_info(struct amdxdna_client
>> *client, struct amdxdna_drm_get_i
>> if (!drm_dev_enter(&xdna->ddev, &idx))
>> return -ENODEV;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto dev_exit;
>> @@ -1044,7 +1043,7 @@ static int aie2_get_array(struct
>> amdxdna_client *client,
>> if (!drm_dev_enter(&xdna->ddev, &idx))
>> return -ENODEV;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto dev_exit;
>> @@ -1134,7 +1133,7 @@ static int aie2_set_state(struct
>> amdxdna_client *client,
>> if (!drm_dev_enter(&xdna->ddev, &idx))
>> return -ENODEV;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto dev_exit;
>> diff --git a/drivers/accel/amdxdna/aie2_pm.c
>> b/drivers/accel/amdxdna/aie2_pm.c
>> index 579b8be13b18..29bd4403a94d 100644
>> --- a/drivers/accel/amdxdna/aie2_pm.c
>> +++ b/drivers/accel/amdxdna/aie2_pm.c
>> @@ -31,7 +31,7 @@ int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev,
>> u32 dpm_level)
>> {
>> int ret;
>> - ret = amdxdna_pm_resume_get(ndev->xdna);
>> + ret = amdxdna_pm_resume_get_locked(ndev->xdna);
>> if (ret)
>> return ret;
>> diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c
>> b/drivers/accel/amdxdna/amdxdna_ctx.c
>> index d17aef89a0ad..db3aa26fb55f 100644
>> --- a/drivers/accel/amdxdna/amdxdna_ctx.c
>> +++ b/drivers/accel/amdxdna/amdxdna_ctx.c
>> @@ -266,9 +266,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct
>> drm_device *dev, void *data, struct dr
>> struct amdxdna_drm_config_hwctx *args = data;
>> struct amdxdna_dev *xdna = to_xdna_dev(dev);
>> struct amdxdna_hwctx *hwctx;
>> - int ret, idx;
>> u32 buf_size;
>> void *buf;
>> + int ret;
>> u64 val;
>> if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
>> @@ -310,20 +310,17 @@ int amdxdna_drm_config_hwctx_ioctl(struct
>> drm_device *dev, void *data, struct dr
>> return -EINVAL;
>> }
>> - mutex_lock(&xdna->dev_lock);
>> - idx = srcu_read_lock(&client->hwctx_srcu);
>> + guard(mutex)(&xdna->dev_lock);
>> hwctx = xa_load(&client->hwctx_xa, args->handle);
>> if (!hwctx) {
>> XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
>> client->pid, args->handle);
>> ret = -EINVAL;
>> - goto unlock_srcu;
>> + goto free_buf;
>> }
>> ret = xdna->dev_info->ops->hwctx_config(hwctx,
>> args->param_type, val, buf, buf_size);
>> -unlock_srcu:
>> - srcu_read_unlock(&client->hwctx_srcu, idx);
>> - mutex_unlock(&xdna->dev_lock);
>> +free_buf:
>> kfree(buf);
>> return ret;
>> }
>> @@ -334,7 +331,7 @@ int amdxdna_hwctx_sync_debug_bo(struct
>> amdxdna_client *client, u32 debug_bo_hdl)
>> struct amdxdna_hwctx *hwctx;
>> struct amdxdna_gem_obj *abo;
>> struct drm_gem_object *gobj;
>> - int ret, idx;
>> + int ret;
>> if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
>> return -EOPNOTSUPP;
>> @@ -345,17 +342,15 @@ int amdxdna_hwctx_sync_debug_bo(struct
>> amdxdna_client *client, u32 debug_bo_hdl)
>> abo = to_xdna_obj(gobj);
>> guard(mutex)(&xdna->dev_lock);
>> - idx = srcu_read_lock(&client->hwctx_srcu);
>> hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
>> if (!hwctx) {
>> ret = -EINVAL;
>> - goto unlock_srcu;
>> + goto put_obj;
>> }
>> ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx,
>> debug_bo_hdl);
>> -unlock_srcu:
>> - srcu_read_unlock(&client->hwctx_srcu, idx);
>> +put_obj:
>> drm_gem_object_put(gobj);
>> return ret;
>> }
>> diff --git a/drivers/accel/amdxdna/amdxdna_pm.c
>> b/drivers/accel/amdxdna/amdxdna_pm.c
>> index d024d480521c..b1fafddd7ad5 100644
>> --- a/drivers/accel/amdxdna/amdxdna_pm.c
>> +++ b/drivers/accel/amdxdna/amdxdna_pm.c
>> @@ -16,6 +16,7 @@ int amdxdna_pm_suspend(struct device *dev)
>> struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
>> int ret = -EOPNOTSUPP;
>> + guard(mutex)(&xdna->dev_lock);
>> if (xdna->dev_info->ops->suspend)
>> ret = xdna->dev_info->ops->suspend(xdna);
>> @@ -28,6 +29,7 @@ int amdxdna_pm_resume(struct device *dev)
>> struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
>> int ret = -EOPNOTSUPP;
>> + guard(mutex)(&xdna->dev_lock);
>> if (xdna->dev_info->ops->resume)
>> ret = xdna->dev_info->ops->resume(xdna);
>> diff --git a/drivers/accel/amdxdna/amdxdna_pm.h
>> b/drivers/accel/amdxdna/amdxdna_pm.h
>> index 77b2d6e45570..3d26b973e0e3 100644
>> --- a/drivers/accel/amdxdna/amdxdna_pm.h
>> +++ b/drivers/accel/amdxdna/amdxdna_pm.h
>> @@ -15,4 +15,15 @@ void amdxdna_pm_suspend_put(struct amdxdna_dev
>> *xdna);
>> void amdxdna_pm_init(struct amdxdna_dev *xdna);
>> void amdxdna_pm_fini(struct amdxdna_dev *xdna);
>> +static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev
>> *xdna)
>> +{
>> + int ret;
>> +
>> + mutex_unlock(&xdna->dev_lock);
>> + ret = amdxdna_pm_resume_get(xdna);
>> + mutex_lock(&xdna->dev_lock);
>> +
>> + return ret;
>> +}
>> +
>> #endif /* _AMDXDNA_PM_H_ */
>
prev parent reply other threads:[~2026-02-11 19:30 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-10 19:16 [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume Lizhi Hou
2026-02-11 6:16 ` Claude review: " Claude Code Review Bot
2026-02-11 6:16 ` Claude Code Review Bot
2026-02-11 19:23 ` [PATCH V1] " Mario Limonciello
2026-02-11 19:29 ` Lizhi Hou [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=34ab9aee-30b4-d4b2-c23f-cd97da35f133@amd.com \
--to=lizhi.hou@amd.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=linux-kernel@vger.kernel.org \
--cc=maciej.falkowski@linux.intel.com \
--cc=mario.limonciello@amd.com \
--cc=max.zhen@amd.com \
--cc=ogabbay@kernel.org \
--cc=quic_jhugo@quicinc.com \
--cc=sonal.santan@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox