* [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume
@ 2026-02-10 19:16 Lizhi Hou
2026-02-11 6:16 ` Claude review: " Claude Code Review Bot
` (2 more replies)
0 siblings, 3 replies; 5+ messages in thread
From: Lizhi Hou @ 2026-02-10 19:16 UTC (permalink / raw)
To: ogabbay, quic_jhugo, dri-devel, maciej.falkowski
Cc: Lizhi Hou, linux-kernel, max.zhen, sonal.santan,
mario.limonciello
When an application issues a query IOCTL while auto suspend is running,
a deadlock can occur. The query path holds dev_lock and then calls
pm_runtime_resume_and_get(), which waits for the ongoing suspend to
complete. Meanwhile, the suspend callback attempts to acquire dev_lock
and blocks, resulting in a deadlock.
Fix this by releasing dev_lock before calling pm_runtime_resume_and_get()
and reacquiring it after the call completes. Also acquire dev_lock in the
resume callback to keep the locking consistent.
Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
---
drivers/accel/amdxdna/aie2_ctx.c | 4 ++--
drivers/accel/amdxdna/aie2_pci.c | 7 +++----
drivers/accel/amdxdna/aie2_pm.c | 2 +-
drivers/accel/amdxdna/amdxdna_ctx.c | 19 +++++++------------
drivers/accel/amdxdna/amdxdna_pm.c | 2 ++
drivers/accel/amdxdna/amdxdna_pm.h | 11 +++++++++++
6 files changed, 26 insertions(+), 19 deletions(-)
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 37d05f2e986f..58e146172b61 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -629,7 +629,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
goto free_entity;
}
- ret = amdxdna_pm_resume_get(xdna);
+ ret = amdxdna_pm_resume_get_locked(xdna);
if (ret)
goto free_col_list;
@@ -760,7 +760,7 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
if (!hwctx->cus)
return -ENOMEM;
- ret = amdxdna_pm_resume_get(xdna);
+ ret = amdxdna_pm_resume_get_locked(xdna);
if (ret)
goto free_cus;
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index f70ccf0f3c01..5b326e4610e6 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -451,7 +451,6 @@ static int aie2_hw_suspend(struct amdxdna_dev *xdna)
{
struct amdxdna_client *client;
- guard(mutex)(&xdna->dev_lock);
list_for_each_entry(client, &xdna->client_list, node)
aie2_hwctx_suspend(client);
@@ -951,7 +950,7 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
- ret = amdxdna_pm_resume_get(xdna);
+ ret = amdxdna_pm_resume_get_locked(xdna);
if (ret)
goto dev_exit;
@@ -1044,7 +1043,7 @@ static int aie2_get_array(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
- ret = amdxdna_pm_resume_get(xdna);
+ ret = amdxdna_pm_resume_get_locked(xdna);
if (ret)
goto dev_exit;
@@ -1134,7 +1133,7 @@ static int aie2_set_state(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
- ret = amdxdna_pm_resume_get(xdna);
+ ret = amdxdna_pm_resume_get_locked(xdna);
if (ret)
goto dev_exit;
diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
index 579b8be13b18..29bd4403a94d 100644
--- a/drivers/accel/amdxdna/aie2_pm.c
+++ b/drivers/accel/amdxdna/aie2_pm.c
@@ -31,7 +31,7 @@ int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{
int ret;
- ret = amdxdna_pm_resume_get(ndev->xdna);
+ ret = amdxdna_pm_resume_get_locked(ndev->xdna);
if (ret)
return ret;
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index d17aef89a0ad..db3aa26fb55f 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -266,9 +266,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
struct amdxdna_drm_config_hwctx *args = data;
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_hwctx *hwctx;
- int ret, idx;
u32 buf_size;
void *buf;
+ int ret;
u64 val;
if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
@@ -310,20 +310,17 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
return -EINVAL;
}
- mutex_lock(&xdna->dev_lock);
- idx = srcu_read_lock(&client->hwctx_srcu);
+ guard(mutex)(&xdna->dev_lock);
hwctx = xa_load(&client->hwctx_xa, args->handle);
if (!hwctx) {
XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
ret = -EINVAL;
- goto unlock_srcu;
+ goto free_buf;
}
ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
-unlock_srcu:
- srcu_read_unlock(&client->hwctx_srcu, idx);
- mutex_unlock(&xdna->dev_lock);
+free_buf:
kfree(buf);
return ret;
}
@@ -334,7 +331,7 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
struct amdxdna_hwctx *hwctx;
struct amdxdna_gem_obj *abo;
struct drm_gem_object *gobj;
- int ret, idx;
+ int ret;
if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
return -EOPNOTSUPP;
@@ -345,17 +342,15 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
abo = to_xdna_obj(gobj);
guard(mutex)(&xdna->dev_lock);
- idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
if (!hwctx) {
ret = -EINVAL;
- goto unlock_srcu;
+ goto put_obj;
}
ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
-unlock_srcu:
- srcu_read_unlock(&client->hwctx_srcu, idx);
+put_obj:
drm_gem_object_put(gobj);
return ret;
}
diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c
index d024d480521c..b1fafddd7ad5 100644
--- a/drivers/accel/amdxdna/amdxdna_pm.c
+++ b/drivers/accel/amdxdna/amdxdna_pm.c
@@ -16,6 +16,7 @@ int amdxdna_pm_suspend(struct device *dev)
struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
int ret = -EOPNOTSUPP;
+ guard(mutex)(&xdna->dev_lock);
if (xdna->dev_info->ops->suspend)
ret = xdna->dev_info->ops->suspend(xdna);
@@ -28,6 +29,7 @@ int amdxdna_pm_resume(struct device *dev)
struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
int ret = -EOPNOTSUPP;
+ guard(mutex)(&xdna->dev_lock);
if (xdna->dev_info->ops->resume)
ret = xdna->dev_info->ops->resume(xdna);
diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h
index 77b2d6e45570..3d26b973e0e3 100644
--- a/drivers/accel/amdxdna/amdxdna_pm.h
+++ b/drivers/accel/amdxdna/amdxdna_pm.h
@@ -15,4 +15,15 @@ void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
void amdxdna_pm_init(struct amdxdna_dev *xdna);
void amdxdna_pm_fini(struct amdxdna_dev *xdna);
+static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna)
+{
+ int ret;
+
+ mutex_unlock(&xdna->dev_lock);
+ ret = amdxdna_pm_resume_get(xdna);
+ mutex_lock(&xdna->dev_lock);
+
+ return ret;
+}
+
#endif /* _AMDXDNA_PM_H_ */
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Claude review: accel/amdxdna: Fix dead lock for suspend and resume
2026-02-10 19:16 [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume Lizhi Hou
@ 2026-02-11 6:16 ` Claude Code Review Bot
2026-02-11 6:16 ` Claude Code Review Bot
2026-02-11 19:23 ` [PATCH V1] " Mario Limonciello
2 siblings, 0 replies; 5+ messages in thread
From: Claude Code Review Bot @ 2026-02-11 6:16 UTC (permalink / raw)
To: dri-devel-reviews
Overall Series Review
Subject: accel/amdxdna: Fix dead lock for suspend and resume
Author: Lizhi Hou <lizhi.hou@amd.com>
Patches: 1
Reviewed: 2026-02-11T16:16:32.498103
---
This single-patch series addresses a **critical deadlock** in the amdxdna accelerator driver that occurs during power management operations. The deadlock scenario is:
1. IOCTL path: holds `dev_lock` → calls `pm_runtime_resume_and_get()` → waits for suspend to complete
2. Suspend path: tries to acquire `dev_lock` → blocks waiting for IOCTL to release it
**Deadlock: ABBA lock ordering problem** (dev_lock vs PM subsystem internal locks)
The fix introduces `amdxdna_pm_resume_get_locked()` helper that temporarily drops `dev_lock` before calling into PM runtime, then reacquires it. The suspend/resume callbacks now also explicitly take `dev_lock`.
**Concerns:**
- **Race conditions**: Dropping and reacquiring locks creates windows where device state could change
- **Inconsistent locking**: Not all PM callsites converted (only some use `_locked` variant)
- **Unprotected hardware access**: Code between unlock/lock may execute with device in inconsistent state
- **Missing validation**: After reacquiring lock, no checks if device state changed
- **SRCU removal**: Unrelated cleanup removing SRCU protection without justification
**Severity: HIGH** - This fixes a deadlock but introduces potential race conditions that could cause use-after-free or hardware corruption.
---
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 5+ messages in thread
* Claude review: accel/amdxdna: Fix dead lock for suspend and resume
2026-02-10 19:16 [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume Lizhi Hou
2026-02-11 6:16 ` Claude review: " Claude Code Review Bot
@ 2026-02-11 6:16 ` Claude Code Review Bot
2026-02-11 19:23 ` [PATCH V1] " Mario Limonciello
2 siblings, 0 replies; 5+ messages in thread
From: Claude Code Review Bot @ 2026-02-11 6:16 UTC (permalink / raw)
To: dri-devel-reviews
Patch Review
**Commit Message Analysis:**
- Clearly describes the deadlock scenario
- Missing: Fixes tag, impact description, testing methodology
- Missing: Whether this is observed in production or theoretical
- Missing: Alternative approaches considered
**Code Review:**
#### 1. Core Lock Inversion Fix (amdxdna_pm.h:337-346)
```c
+static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna)
+{
+ int ret;
+
+ mutex_unlock(&xdna->dev_lock);
+ ret = amdxdna_pm_resume_get(xdna);
+ mutex_lock(&xdna->dev_lock);
+
+ return ret;
+}
```
**Issues:**
1. **No lockdep annotation** - Should use `lockdep_assert_held()` to verify caller holds lock
2. **No validation after relock** - Device state could change while lock was dropped:
- Device could be removed (hot-unplug)
- Suspend could complete and immediately start again
- Hardware could be reset
3. **Error path unclear** - If `amdxdna_pm_resume_get()` fails, we reacquire lock but device may be suspended
4. **Window of vulnerability** - Other threads can acquire `dev_lock` between unlock/lock and modify state
**Suggested improvements:**
```c
static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna)
{
int ret;
lockdep_assert_held(&xdna->dev_lock);
mutex_unlock(&xdna->dev_lock);
ret = amdxdna_pm_resume_get(xdna);
mutex_lock(&xdna->dev_lock);
/* TODO: Validate device state unchanged? */
return ret;
}
```
#### 2. Suspend Callback Changes (aie2_pci.c:193-199)
```c
static int aie2_hw_suspend(struct amdxdna_dev *xdna)
{
struct amdxdna_client *client;
- guard(mutex)(&xdna->dev_lock);
list_for_each_entry(client, &xdna->client_list, node)
aie2_hwctx_suspend(client);
```
**Critical Issue:**
- **Removes lock protection from client_list traversal**
- The `client_list` is now accessed **without any lock protection**
- This is a **use-after-free bug** - clients can be added/removed during iteration
- `list_for_each_entry()` is not safe without synchronization
**The commit message says:** "acquire dev_lock in the resume callback to keep the locking consistent"
But the code **removes** the lock from suspend! This is the **opposite** of what's described.
**Where is the lock now?** Looking at amdxdna_pm.c:313-319:
```c
int amdxdna_pm_suspend(struct device *dev)
{
struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
int ret = -EOPNOTSUPP;
+ guard(mutex)(&xdna->dev_lock);
if (xdna->dev_info->ops->suspend)
ret = xdna->dev_info->ops->suspend(xdna);
```
**Ah!** The lock moved **up** to the PM callback wrapper. This is actually correct, but:
- The lock is now held across the **entire** suspend/resume operation
- This is the **original deadlock** scenario - holding dev_lock while in PM callbacks
- This **does not fix the deadlock**, it makes it **worse**
**Deadlock scenario still exists:**
1. IOCTL: holds dev_lock → calls `pm_runtime_resume_and_get()`
2. PM runtime: waits for idle, tries to call `amdxdna_pm_suspend()`
3. `amdxdna_pm_suspend()`: tries to acquire dev_lock → **DEADLOCK**
#### 3. IOCTL Path Conversions (aie2_pci.c, aie2_ctx.c, aie2_pm.c)
Multiple callsites changed from `amdxdna_pm_resume_get()` to `amdxdna_pm_resume_get_locked()`:
```c
- ret = amdxdna_pm_resume_get(xdna);
+ ret = amdxdna_pm_resume_get_locked(xdna);
```
**Issue:**
- These callsites already hold `dev_lock` (verified by the need for `_locked` variant)
- After the unlock/lock cycle, **no validation** that:
- Hardware context still valid
- Device still present
- Resources not freed
**Example - aie2_ctx.c:629-678 (aie2_hwctx_init):**
```c
ret = amdxdna_pm_resume_get_locked(xdna); // Drops & reacquires dev_lock
if (ret)
goto free_col_list;
// Device could have been suspended/removed while lock was dropped
ret = xdna->dev_info->ops->hwctx_config(hwctx, ...); // Uses hardware!
```
**Race condition:** Hardware could be in suspend state or removed entirely.
#### 4. Unrelated SRCU Removal (amdxdna_ctx.c:260-279)
```c
- mutex_lock(&xdna->dev_lock);
- idx = srcu_read_lock(&client->hwctx_srcu);
+ guard(mutex)(&xdna->dev_lock);
hwctx = xa_load(&client->hwctx_xa, args->handle);
if (!hwctx) {
XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
ret = -EINVAL;
- goto unlock_srcu;
+ goto free_buf;
}
ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
-unlock_srcu:
- srcu_read_unlock(&client->hwctx_srcu, idx);
```
**Critical Issue:**
- **Removes SRCU read-side critical section**
- SRCU protects against hwctx being freed during use
- Now relying **only** on `dev_lock` - but we just introduced code that **drops** dev_lock!
- This creates **use-after-free** vulnerability:
1. Thread A: loads hwctx from xa_load()
2. Thread A: calls `hwctx_config()` which calls `amdxdna_pm_resume_get_locked()`
3. Thread A: drops dev_lock in `_locked()` helper
4. Thread B: acquires dev_lock, destroys hwctx, frees memory
5. Thread A: reacquires dev_lock, returns from `_locked()`
6. Thread A: continues using freed hwctx → **use-after-free**
**This removal is WRONG and introduces memory safety bugs.**
The same issue exists in `amdxdna_hwctx_sync_debug_bo()` at amdxdna_ctx.c:331-308.
#### 5. Missing Error Handling
Throughout the patch, after `amdxdna_pm_resume_get_locked()` fails, code continues to hold `dev_lock` but device is suspended. Should there be cleanup or state validation?
**Summary of Technical Issues:**
| Issue | Severity | Location |
|-------|----------|----------|
| client_list traversal without lock | **CRITICAL** | aie2_pci.c:198 |
| SRCU removal creates use-after-free | **CRITICAL** | amdxdna_ctx.c:261-274 |
| Deadlock still possible | **HIGH** | amdxdna_pm.c:313-327 |
| No state validation after relock | **HIGH** | All `_locked()` callsites |
| No lockdep annotations | **MEDIUM** | amdxdna_pm.h:337 |
| Missing Fixes tag | **LOW** | Commit message |
**Fundamental Design Flaw:**
The approach of dropping locks to avoid deadlock is **not correct**. The proper solutions are:
1. **Refcount-based approach**: Take PM reference before acquiring locks
2. **Separate locks**: Use different locks for PM and device state
3. **Lock ordering**: Define and enforce consistent lock ordering
4. **PM workqueues**: Handle PM transitions asynchronously
**Recommendation: NAK**
This patch introduces more bugs than it fixes:
- Use-after-free from SRCU removal
- Potential use-after-free from client_list access
- Race conditions from lock dropping
- May not even fix the original deadlock
The driver needs a **comprehensive locking redesign**, not a tactical patch.
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume
2026-02-10 19:16 [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume Lizhi Hou
2026-02-11 6:16 ` Claude review: " Claude Code Review Bot
2026-02-11 6:16 ` Claude Code Review Bot
@ 2026-02-11 19:23 ` Mario Limonciello
2026-02-11 19:29 ` Lizhi Hou
2 siblings, 1 reply; 5+ messages in thread
From: Mario Limonciello @ 2026-02-11 19:23 UTC (permalink / raw)
To: Lizhi Hou, ogabbay, quic_jhugo, dri-devel, maciej.falkowski
Cc: linux-kernel, max.zhen, sonal.santan
On 2/10/26 1:16 PM, Lizhi Hou wrote:
> When an application issues a query IOCTL while auto suspend is running,
> a deadlock can occur. The query path holds dev_lock and then calls
> pm_runtime_resume_and_get(), which waits for the ongoing suspend to
> complete. Meanwhile, the suspend callback attempts to acquire dev_lock
> and blocks, resulting in a deadlock.
>
> Fix this by releasing dev_lock before calling pm_runtime_resume_and_get()
> and reacquiring it after the call completes. Also acquire dev_lock in the
> resume callback to keep the locking consistent.
>
> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
No Fixes tag?
> ---
> drivers/accel/amdxdna/aie2_ctx.c | 4 ++--
> drivers/accel/amdxdna/aie2_pci.c | 7 +++----
> drivers/accel/amdxdna/aie2_pm.c | 2 +-
> drivers/accel/amdxdna/amdxdna_ctx.c | 19 +++++++------------
> drivers/accel/amdxdna/amdxdna_pm.c | 2 ++
> drivers/accel/amdxdna/amdxdna_pm.h | 11 +++++++++++
> 6 files changed, 26 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
> index 37d05f2e986f..58e146172b61 100644
> --- a/drivers/accel/amdxdna/aie2_ctx.c
> +++ b/drivers/accel/amdxdna/aie2_ctx.c
> @@ -629,7 +629,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
> goto free_entity;
> }
>
> - ret = amdxdna_pm_resume_get(xdna);
> + ret = amdxdna_pm_resume_get_locked(xdna);
> if (ret)
> goto free_col_list;
>
> @@ -760,7 +760,7 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
> if (!hwctx->cus)
> return -ENOMEM;
>
> - ret = amdxdna_pm_resume_get(xdna);
> + ret = amdxdna_pm_resume_get_locked(xdna);
> if (ret)
> goto free_cus;
>
> diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
> index f70ccf0f3c01..5b326e4610e6 100644
> --- a/drivers/accel/amdxdna/aie2_pci.c
> +++ b/drivers/accel/amdxdna/aie2_pci.c
> @@ -451,7 +451,6 @@ static int aie2_hw_suspend(struct amdxdna_dev *xdna)
> {
> struct amdxdna_client *client;
>
> - guard(mutex)(&xdna->dev_lock);
> list_for_each_entry(client, &xdna->client_list, node)
> aie2_hwctx_suspend(client);
>
> @@ -951,7 +950,7 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
> if (!drm_dev_enter(&xdna->ddev, &idx))
> return -ENODEV;
>
> - ret = amdxdna_pm_resume_get(xdna);
> + ret = amdxdna_pm_resume_get_locked(xdna);
> if (ret)
> goto dev_exit;
>
> @@ -1044,7 +1043,7 @@ static int aie2_get_array(struct amdxdna_client *client,
> if (!drm_dev_enter(&xdna->ddev, &idx))
> return -ENODEV;
>
> - ret = amdxdna_pm_resume_get(xdna);
> + ret = amdxdna_pm_resume_get_locked(xdna);
> if (ret)
> goto dev_exit;
>
> @@ -1134,7 +1133,7 @@ static int aie2_set_state(struct amdxdna_client *client,
> if (!drm_dev_enter(&xdna->ddev, &idx))
> return -ENODEV;
>
> - ret = amdxdna_pm_resume_get(xdna);
> + ret = amdxdna_pm_resume_get_locked(xdna);
> if (ret)
> goto dev_exit;
>
> diff --git a/drivers/accel/amdxdna/aie2_pm.c b/drivers/accel/amdxdna/aie2_pm.c
> index 579b8be13b18..29bd4403a94d 100644
> --- a/drivers/accel/amdxdna/aie2_pm.c
> +++ b/drivers/accel/amdxdna/aie2_pm.c
> @@ -31,7 +31,7 @@ int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
> {
> int ret;
>
> - ret = amdxdna_pm_resume_get(ndev->xdna);
> + ret = amdxdna_pm_resume_get_locked(ndev->xdna);
> if (ret)
> return ret;
>
> diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
> index d17aef89a0ad..db3aa26fb55f 100644
> --- a/drivers/accel/amdxdna/amdxdna_ctx.c
> +++ b/drivers/accel/amdxdna/amdxdna_ctx.c
> @@ -266,9 +266,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
> struct amdxdna_drm_config_hwctx *args = data;
> struct amdxdna_dev *xdna = to_xdna_dev(dev);
> struct amdxdna_hwctx *hwctx;
> - int ret, idx;
> u32 buf_size;
> void *buf;
> + int ret;
> u64 val;
>
> if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
> @@ -310,20 +310,17 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
> return -EINVAL;
> }
>
> - mutex_lock(&xdna->dev_lock);
> - idx = srcu_read_lock(&client->hwctx_srcu);
> + guard(mutex)(&xdna->dev_lock);
> hwctx = xa_load(&client->hwctx_xa, args->handle);
> if (!hwctx) {
> XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
> ret = -EINVAL;
> - goto unlock_srcu;
> + goto free_buf;
> }
>
> ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
>
> -unlock_srcu:
> - srcu_read_unlock(&client->hwctx_srcu, idx);
> - mutex_unlock(&xdna->dev_lock);
> +free_buf:
> kfree(buf);
> return ret;
> }
> @@ -334,7 +331,7 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
> struct amdxdna_hwctx *hwctx;
> struct amdxdna_gem_obj *abo;
> struct drm_gem_object *gobj;
> - int ret, idx;
> + int ret;
>
> if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
> return -EOPNOTSUPP;
> @@ -345,17 +342,15 @@ int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
>
> abo = to_xdna_obj(gobj);
> guard(mutex)(&xdna->dev_lock);
> - idx = srcu_read_lock(&client->hwctx_srcu);
> hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
> if (!hwctx) {
> ret = -EINVAL;
> - goto unlock_srcu;
> + goto put_obj;
> }
>
> ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
>
> -unlock_srcu:
> - srcu_read_unlock(&client->hwctx_srcu, idx);
> +put_obj:
> drm_gem_object_put(gobj);
> return ret;
> }
> diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c
> index d024d480521c..b1fafddd7ad5 100644
> --- a/drivers/accel/amdxdna/amdxdna_pm.c
> +++ b/drivers/accel/amdxdna/amdxdna_pm.c
> @@ -16,6 +16,7 @@ int amdxdna_pm_suspend(struct device *dev)
> struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
> int ret = -EOPNOTSUPP;
>
> + guard(mutex)(&xdna->dev_lock);
> if (xdna->dev_info->ops->suspend)
> ret = xdna->dev_info->ops->suspend(xdna);
>
> @@ -28,6 +29,7 @@ int amdxdna_pm_resume(struct device *dev)
> struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
> int ret = -EOPNOTSUPP;
>
> + guard(mutex)(&xdna->dev_lock);
> if (xdna->dev_info->ops->resume)
> ret = xdna->dev_info->ops->resume(xdna);
>
> diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h
> index 77b2d6e45570..3d26b973e0e3 100644
> --- a/drivers/accel/amdxdna/amdxdna_pm.h
> +++ b/drivers/accel/amdxdna/amdxdna_pm.h
> @@ -15,4 +15,15 @@ void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
> void amdxdna_pm_init(struct amdxdna_dev *xdna);
> void amdxdna_pm_fini(struct amdxdna_dev *xdna);
>
> +static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev *xdna)
> +{
> + int ret;
> +
> + mutex_unlock(&xdna->dev_lock);
> + ret = amdxdna_pm_resume_get(xdna);
> + mutex_lock(&xdna->dev_lock);
> +
> + return ret;
> +}
> +
> #endif /* _AMDXDNA_PM_H_ */
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume
2026-02-11 19:23 ` [PATCH V1] " Mario Limonciello
@ 2026-02-11 19:29 ` Lizhi Hou
0 siblings, 0 replies; 5+ messages in thread
From: Lizhi Hou @ 2026-02-11 19:29 UTC (permalink / raw)
To: Mario Limonciello, ogabbay, quic_jhugo, dri-devel,
maciej.falkowski
Cc: linux-kernel, max.zhen, sonal.santan
On 2/11/26 11:23, Mario Limonciello wrote:
> On 2/10/26 1:16 PM, Lizhi Hou wrote:
>> When an application issues a query IOCTL while auto suspend is running,
>> a deadlock can occur. The query path holds dev_lock and then calls
>> pm_runtime_resume_and_get(), which waits for the ongoing suspend to
>> complete. Meanwhile, the suspend callback attempts to acquire dev_lock
>> and blocks, resulting in a deadlock.
>>
>> Fix this by releasing dev_lock before calling
>> pm_runtime_resume_and_get()
>> and reacquiring it after the call completes. Also acquire dev_lock in
>> the
>> resume callback to keep the locking consistent.
>>
>> Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
> No Fixes tag?
Oh, forgot that. Will add and post V2 patch
Thanks,
Lizhi
>
>> ---
>> drivers/accel/amdxdna/aie2_ctx.c | 4 ++--
>> drivers/accel/amdxdna/aie2_pci.c | 7 +++----
>> drivers/accel/amdxdna/aie2_pm.c | 2 +-
>> drivers/accel/amdxdna/amdxdna_ctx.c | 19 +++++++------------
>> drivers/accel/amdxdna/amdxdna_pm.c | 2 ++
>> drivers/accel/amdxdna/amdxdna_pm.h | 11 +++++++++++
>> 6 files changed, 26 insertions(+), 19 deletions(-)
>>
>> diff --git a/drivers/accel/amdxdna/aie2_ctx.c
>> b/drivers/accel/amdxdna/aie2_ctx.c
>> index 37d05f2e986f..58e146172b61 100644
>> --- a/drivers/accel/amdxdna/aie2_ctx.c
>> +++ b/drivers/accel/amdxdna/aie2_ctx.c
>> @@ -629,7 +629,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
>> goto free_entity;
>> }
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto free_col_list;
>> @@ -760,7 +760,7 @@ static int aie2_hwctx_cu_config(struct
>> amdxdna_hwctx *hwctx, void *buf, u32 size
>> if (!hwctx->cus)
>> return -ENOMEM;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto free_cus;
>> diff --git a/drivers/accel/amdxdna/aie2_pci.c
>> b/drivers/accel/amdxdna/aie2_pci.c
>> index f70ccf0f3c01..5b326e4610e6 100644
>> --- a/drivers/accel/amdxdna/aie2_pci.c
>> +++ b/drivers/accel/amdxdna/aie2_pci.c
>> @@ -451,7 +451,6 @@ static int aie2_hw_suspend(struct amdxdna_dev *xdna)
>> {
>> struct amdxdna_client *client;
>> - guard(mutex)(&xdna->dev_lock);
>> list_for_each_entry(client, &xdna->client_list, node)
>> aie2_hwctx_suspend(client);
>> @@ -951,7 +950,7 @@ static int aie2_get_info(struct amdxdna_client
>> *client, struct amdxdna_drm_get_i
>> if (!drm_dev_enter(&xdna->ddev, &idx))
>> return -ENODEV;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto dev_exit;
>> @@ -1044,7 +1043,7 @@ static int aie2_get_array(struct
>> amdxdna_client *client,
>> if (!drm_dev_enter(&xdna->ddev, &idx))
>> return -ENODEV;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto dev_exit;
>> @@ -1134,7 +1133,7 @@ static int aie2_set_state(struct
>> amdxdna_client *client,
>> if (!drm_dev_enter(&xdna->ddev, &idx))
>> return -ENODEV;
>> - ret = amdxdna_pm_resume_get(xdna);
>> + ret = amdxdna_pm_resume_get_locked(xdna);
>> if (ret)
>> goto dev_exit;
>> diff --git a/drivers/accel/amdxdna/aie2_pm.c
>> b/drivers/accel/amdxdna/aie2_pm.c
>> index 579b8be13b18..29bd4403a94d 100644
>> --- a/drivers/accel/amdxdna/aie2_pm.c
>> +++ b/drivers/accel/amdxdna/aie2_pm.c
>> @@ -31,7 +31,7 @@ int aie2_pm_set_dpm(struct amdxdna_dev_hdl *ndev,
>> u32 dpm_level)
>> {
>> int ret;
>> - ret = amdxdna_pm_resume_get(ndev->xdna);
>> + ret = amdxdna_pm_resume_get_locked(ndev->xdna);
>> if (ret)
>> return ret;
>> diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c
>> b/drivers/accel/amdxdna/amdxdna_ctx.c
>> index d17aef89a0ad..db3aa26fb55f 100644
>> --- a/drivers/accel/amdxdna/amdxdna_ctx.c
>> +++ b/drivers/accel/amdxdna/amdxdna_ctx.c
>> @@ -266,9 +266,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct
>> drm_device *dev, void *data, struct dr
>> struct amdxdna_drm_config_hwctx *args = data;
>> struct amdxdna_dev *xdna = to_xdna_dev(dev);
>> struct amdxdna_hwctx *hwctx;
>> - int ret, idx;
>> u32 buf_size;
>> void *buf;
>> + int ret;
>> u64 val;
>> if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
>> @@ -310,20 +310,17 @@ int amdxdna_drm_config_hwctx_ioctl(struct
>> drm_device *dev, void *data, struct dr
>> return -EINVAL;
>> }
>> - mutex_lock(&xdna->dev_lock);
>> - idx = srcu_read_lock(&client->hwctx_srcu);
>> + guard(mutex)(&xdna->dev_lock);
>> hwctx = xa_load(&client->hwctx_xa, args->handle);
>> if (!hwctx) {
>> XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
>> client->pid, args->handle);
>> ret = -EINVAL;
>> - goto unlock_srcu;
>> + goto free_buf;
>> }
>> ret = xdna->dev_info->ops->hwctx_config(hwctx,
>> args->param_type, val, buf, buf_size);
>> -unlock_srcu:
>> - srcu_read_unlock(&client->hwctx_srcu, idx);
>> - mutex_unlock(&xdna->dev_lock);
>> +free_buf:
>> kfree(buf);
>> return ret;
>> }
>> @@ -334,7 +331,7 @@ int amdxdna_hwctx_sync_debug_bo(struct
>> amdxdna_client *client, u32 debug_bo_hdl)
>> struct amdxdna_hwctx *hwctx;
>> struct amdxdna_gem_obj *abo;
>> struct drm_gem_object *gobj;
>> - int ret, idx;
>> + int ret;
>> if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
>> return -EOPNOTSUPP;
>> @@ -345,17 +342,15 @@ int amdxdna_hwctx_sync_debug_bo(struct
>> amdxdna_client *client, u32 debug_bo_hdl)
>> abo = to_xdna_obj(gobj);
>> guard(mutex)(&xdna->dev_lock);
>> - idx = srcu_read_lock(&client->hwctx_srcu);
>> hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
>> if (!hwctx) {
>> ret = -EINVAL;
>> - goto unlock_srcu;
>> + goto put_obj;
>> }
>> ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx,
>> debug_bo_hdl);
>> -unlock_srcu:
>> - srcu_read_unlock(&client->hwctx_srcu, idx);
>> +put_obj:
>> drm_gem_object_put(gobj);
>> return ret;
>> }
>> diff --git a/drivers/accel/amdxdna/amdxdna_pm.c
>> b/drivers/accel/amdxdna/amdxdna_pm.c
>> index d024d480521c..b1fafddd7ad5 100644
>> --- a/drivers/accel/amdxdna/amdxdna_pm.c
>> +++ b/drivers/accel/amdxdna/amdxdna_pm.c
>> @@ -16,6 +16,7 @@ int amdxdna_pm_suspend(struct device *dev)
>> struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
>> int ret = -EOPNOTSUPP;
>> + guard(mutex)(&xdna->dev_lock);
>> if (xdna->dev_info->ops->suspend)
>> ret = xdna->dev_info->ops->suspend(xdna);
>> @@ -28,6 +29,7 @@ int amdxdna_pm_resume(struct device *dev)
>> struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
>> int ret = -EOPNOTSUPP;
>> + guard(mutex)(&xdna->dev_lock);
>> if (xdna->dev_info->ops->resume)
>> ret = xdna->dev_info->ops->resume(xdna);
>> diff --git a/drivers/accel/amdxdna/amdxdna_pm.h
>> b/drivers/accel/amdxdna/amdxdna_pm.h
>> index 77b2d6e45570..3d26b973e0e3 100644
>> --- a/drivers/accel/amdxdna/amdxdna_pm.h
>> +++ b/drivers/accel/amdxdna/amdxdna_pm.h
>> @@ -15,4 +15,15 @@ void amdxdna_pm_suspend_put(struct amdxdna_dev
>> *xdna);
>> void amdxdna_pm_init(struct amdxdna_dev *xdna);
>> void amdxdna_pm_fini(struct amdxdna_dev *xdna);
>> +static inline int amdxdna_pm_resume_get_locked(struct amdxdna_dev
>> *xdna)
>> +{
>> + int ret;
>> +
>> + mutex_unlock(&xdna->dev_lock);
>> + ret = amdxdna_pm_resume_get(xdna);
>> + mutex_lock(&xdna->dev_lock);
>> +
>> + return ret;
>> +}
>> +
>> #endif /* _AMDXDNA_PM_H_ */
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2026-02-11 19:30 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-10 19:16 [PATCH V1] accel/amdxdna: Fix dead lock for suspend and resume Lizhi Hou
2026-02-11 6:16 ` Claude review: " Claude Code Review Bot
2026-02-11 6:16 ` Claude Code Review Bot
2026-02-11 19:23 ` [PATCH V1] " Mario Limonciello
2026-02-11 19:29 ` Lizhi Hou
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox