* [PATCH v3] drm/qxl: Convert qxl release idr to xarray
@ 2026-04-22 3:17 liuqiangneo
2026-04-22 22:03 ` Claude review: " Claude Code Review Bot
2026-04-22 22:03 ` Claude Code Review Bot
0 siblings, 2 replies; 7+ messages in thread
From: liuqiangneo @ 2026-04-22 3:17 UTC (permalink / raw)
To: airlied, kraxel
Cc: maarten.lankhorst, mripard, tzimmermann, airlied, simona,
virtualization, spice-devel, dri-devel, linux-kernel, Qiang Liu
From: Qiang Liu <liuqiang@kylinos.cn>
Replace the release_idr + release_idr_lock to an XArray.
IDR internally uses xarray so we can use it directly which simplifies our
code by removing the IDR wrapper.
Signed-off-by: Qiang Liu <liuqiang@kylinos.cn>
---
v3:
- Change the type of 'handle' to u32.
'xa_limit_31b' restricts the range of 'handle',
so an implicit cast to 'int' is safe.
- Keep xa_lock in xa_lock due to concurrent access.
v2:
- Use xa_limit_31b instead of xa_limit_32b to keep IDs within INT_MAX.
- Use GFP_KERNEL instead of GFP_NOWAIT because the context is sleepable.
- Cast to u32 to avoid sign extension when atomic counter wraps above INT_MAX.
---
drivers/gpu/drm/qxl/qxl_drv.h | 6 ++--
drivers/gpu/drm/qxl/qxl_kms.c | 4 +--
drivers/gpu/drm/qxl/qxl_release.c | 49 ++++++++++++++-----------------
3 files changed, 27 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index cc02b5f10ad9..cf9decf39022 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -35,6 +35,7 @@
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
+#include <linux/xarray.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
@@ -208,11 +209,10 @@ struct qxl_device {
struct qxl_memslot surfaces_slot;
spinlock_t release_lock;
- struct idr release_idr;
- uint32_t release_seqno;
+ struct xarray release_xa;
+ atomic_t release_seqno;
atomic_t release_count;
wait_queue_head_t release_event;
- spinlock_t release_idr_lock;
struct mutex async_io_mutex;
unsigned int last_sent_io_cmd;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 461b7ab9ad5c..0cebaf88f407 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -227,8 +227,8 @@ int qxl_device_init(struct qxl_device *qdev,
goto cursor_ring_free;
}
- idr_init_base(&qdev->release_idr, 1);
- spin_lock_init(&qdev->release_idr_lock);
+ xa_init_flags(&qdev->release_xa, XA_FLAGS_ALLOC1);
+ atomic_set(&qdev->release_seqno, 0);
spin_lock_init(&qdev->release_lock);
idr_init_base(&qdev->surf_id_idr, 1);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 06979d0e8a9f..afed1096cf7b 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -88,8 +88,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
struct qxl_release *release;
- int handle;
+ u32 handle;
size_t size = sizeof(*release);
+ int r;
release = kmalloc(size, GFP_KERNEL);
if (!release) {
@@ -102,19 +103,15 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
release->surface_release_id = 0;
INIT_LIST_HEAD(&release->bos);
- idr_preload(GFP_KERNEL);
- spin_lock(&qdev->release_idr_lock);
- handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
- release->base.seqno = ++qdev->release_seqno;
- spin_unlock(&qdev->release_idr_lock);
- idr_preload_end();
- if (handle < 0) {
+ r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
+ release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
+ if (r < 0) {
kfree(release);
*ret = NULL;
- return handle;
+ return r;
}
*ret = release;
- DRM_DEBUG_DRIVER("allocated release %d\n", handle);
+ DRM_DEBUG_DRIVER("allocated release %u\n", handle);
release->id = handle;
return handle;
}
@@ -143,9 +140,7 @@ qxl_release_free(struct qxl_device *qdev,
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
- spin_lock(&qdev->release_idr_lock);
- idr_remove(&qdev->release_idr, release->id);
- spin_unlock(&qdev->release_idr_lock);
+ xa_erase(&qdev->release_xa, release->id);
if (dma_fence_was_initialized(&release->base)) {
WARN_ON(list_empty(&release->bos));
@@ -261,14 +256,14 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
struct qxl_release **release)
{
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
- int idr_ret;
+ int xa_ret;
struct qxl_bo *bo;
union qxl_release_info *info;
/* stash the release after the create command */
- idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
- if (idr_ret < 0)
- return idr_ret;
+ xa_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+ if (xa_ret < 0)
+ return xa_ret;
bo = create_rel->release_bo;
(*release)->release_bo = bo;
@@ -277,7 +272,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
qxl_release_list_add(*release, bo);
info = qxl_release_map(qdev, *release);
- info->id = idr_ret;
+ info->id = xa_ret;
qxl_release_unmap(qdev, *release, info);
return 0;
}
@@ -291,7 +286,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
struct qxl_bo **rbo)
{
struct qxl_bo *bo, *free_bo = NULL;
- int idr_ret;
+ int xa_ret;
int ret = 0;
union qxl_release_info *info;
int cur_idx;
@@ -312,11 +307,11 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
return -EINVAL;
}
- idr_ret = qxl_release_alloc(qdev, type, release);
- if (idr_ret < 0) {
+ xa_ret = qxl_release_alloc(qdev, type, release);
+ if (xa_ret < 0) {
if (rbo)
*rbo = NULL;
- return idr_ret;
+ return xa_ret;
}
atomic_inc(&qdev->release_count);
@@ -362,7 +357,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
}
info = qxl_release_map(qdev, *release);
- info->id = idr_ret;
+ info->id = xa_ret;
qxl_release_unmap(qdev, *release, info);
return ret;
@@ -373,11 +368,11 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
{
struct qxl_release *release;
- spin_lock(&qdev->release_idr_lock);
- release = idr_find(&qdev->release_idr, id);
- spin_unlock(&qdev->release_idr_lock);
+ xa_lock(&qdev->release_xa);
+ release = xa_load(&qdev->release_xa, id);
+ xa_unlock(&qdev->release_xa);
if (!release) {
- DRM_ERROR("failed to find id in release_idr\n");
+ DRM_ERROR("failed to find id in release_xa\n");
return NULL;
}
--
2.43.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Claude review: drm/qxl: Convert qxl release idr to xarray
2026-04-22 3:17 [PATCH v3] drm/qxl: Convert qxl release idr to xarray liuqiangneo
@ 2026-04-22 22:03 ` Claude Code Review Bot
2026-04-22 22:03 ` Claude Code Review Bot
1 sibling, 0 replies; 7+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:03 UTC (permalink / raw)
To: dri-devel-reviews
Overall Series Review
Subject: drm/qxl: Convert qxl release idr to xarray
Author: liuqiangneo@163.com
Patches: 1
Reviewed: 2026-04-23T08:03:17.782075
---
This is a single-patch series (v3) converting the QXL driver's release tracking from IDR to XArray. The conversion is well-motivated — IDR is an XArray wrapper internally, so using XArray directly simplifies the code and removes the need for a separate spinlock and `idr_preload`/`idr_preload_end` boilerplate.
The conversion is mostly correct and clean. There is one issue worth addressing: unnecessary locking around `xa_load()` in the lookup function. The rest of the changes are straightforward and well-reasoned. The v3 changelog shows the author has been responsive to review feedback across iterations.
**Verdict: Acceptable with minor comments.**
---
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 7+ messages in thread
* Claude review: drm/qxl: Convert qxl release idr to xarray
2026-04-22 3:17 [PATCH v3] drm/qxl: Convert qxl release idr to xarray liuqiangneo
2026-04-22 22:03 ` Claude review: " Claude Code Review Bot
@ 2026-04-22 22:03 ` Claude Code Review Bot
1 sibling, 0 replies; 7+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:03 UTC (permalink / raw)
To: dri-devel-reviews
Patch Review
**Unnecessary locking around `xa_load()` in `qxl_release_from_id_locked`:**
```c
xa_lock(&qdev->release_xa);
release = xa_load(&qdev->release_xa, id);
xa_unlock(&qdev->release_xa);
```
`xa_load()` is RCU-safe internally and does not require external locking for read access. The `xa_lock`/`xa_unlock` pair here is harmless but unnecessary overhead. A bare `xa_load()` call would be sufficient and more idiomatic. The v3 changelog states "Keep xa_lock in xa_lock due to concurrent access", but this reflects a misunderstanding — `xa_load()` already handles concurrent access correctly via RCU. `xa_lock` is only needed for modification operations (`xa_store`, `xa_erase`, `xa_alloc`), and the modifications in this patch (`xa_alloc`, `xa_erase`) already handle their own locking internally. The simplification should be:
```c
release = xa_load(&qdev->release_xa, id);
```
**Seqno ordering subtlety (observation, not a blocker):**
In the original code, IDR allocation and seqno increment were both inside `release_idr_lock`:
```c
spin_lock(&qdev->release_idr_lock);
handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
release->base.seqno = ++qdev->release_seqno;
spin_unlock(&qdev->release_idr_lock);
```
In the new code these are separate atomic operations:
```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
```
Two concurrent allocations could now observe ID ordering that doesn't match seqno ordering (thread A gets ID 5/seqno 7, thread B gets ID 6/seqno 6). In practice this is fine — the DMA fence seqno just needs uniqueness within a context, not strict correlation with the xarray index — but it's a behavioral change worth noting.
**Missing `xa_destroy()` in teardown (pre-existing):**
`qxl_device_fini()` never called `idr_destroy()` on `release_idr`, and the patch doesn't add `xa_destroy()` for `release_xa` either. While by teardown time the xarray should be empty (after the release event wait + GC flush), adding `xa_destroy(&qdev->release_xa)` in `qxl_device_fini()` would be good hygiene. This is a pre-existing issue rather than a regression, but this patch would be a natural place to fix it.
**Positive aspects:**
- `xa_limit_31b` correctly keeps IDs within `INT_MAX`, matching `release->id` being `int` and the function returning `int`.
- Using `GFP_KERNEL` instead of the `idr_preload(GFP_KERNEL)` + `idr_alloc(..., GFP_NOWAIT)` dance is a good simplification enabled by `xa_alloc` supporting sleeping allocation directly.
- `XA_FLAGS_ALLOC1` correctly mirrors the old `idr_init_base(&qdev->release_idr, 1)` behavior.
- The `(u32)` cast on `atomic_inc_return` correctly handles potential wrapping above `INT_MAX`.
- Format string change `%d` → `%u` for `handle` is correct.
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 7+ messages in thread
* Claude review: drm/qxl: Convert qxl release idr to xarray
2026-04-21 6:00 [PATCH v2] " liuqiangneo
@ 2026-04-22 22:54 ` Claude Code Review Bot
2026-04-22 22:54 ` Claude Code Review Bot
1 sibling, 0 replies; 7+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:54 UTC (permalink / raw)
To: dri-devel-reviews
Overall Series Review
Subject: drm/qxl: Convert qxl release idr to xarray
Author: liuqiangneo@163.com
Patches: 1
Reviewed: 2026-04-23T08:54:43.877264
---
This is a single-patch series converting the QXL release IDR to xarray. The conversion is mechanically correct and the v2 changelog shows the author addressed prior review feedback (xa_limit_31b, GFP_KERNEL, u32 cast). The patch is straightforward and reasonable — xarray provides built-in locking that simplifies the code by removing the external `release_idr_lock` spinlock.
There are two issues worth raising: one correctness bug with the seqno assignment ordering, and one minor concern about the `qxl_release_from_id_locked` function doing unnecessary explicit locking.
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 7+ messages in thread
* Claude review: drm/qxl: Convert qxl release idr to xarray
2026-04-21 6:00 [PATCH v2] " liuqiangneo
2026-04-22 22:54 ` Claude review: " Claude Code Review Bot
@ 2026-04-22 22:54 ` Claude Code Review Bot
1 sibling, 0 replies; 7+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:54 UTC (permalink / raw)
To: dri-devel-reviews
Patch Review
**Bug: seqno assigned after xa_alloc but before error check**
In `qxl_release_alloc()`, the seqno is assigned *between* `xa_alloc` and the error check:
```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
if (r < 0) {
kfree(release);
*ret = NULL;
return r;
}
```
If `xa_alloc` fails (`r < 0`), the code still increments `release_seqno` before taking the error path and freeing `release`. This wastes a sequence number on every allocation failure. The original IDR code had the same logical issue (seqno incremented inside the spinlock before the error check), so this is a pre-existing problem being preserved, but the conversion to `atomic_inc_return` makes it slightly more visible. Moving the seqno assignment after the error check would be cleaner:
```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
if (r < 0) {
kfree(release);
*ret = NULL;
return r;
}
release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
```
This is not a regression since the original code had the same behavior, but since you're already refactoring, it would be a nice cleanup.
**Unnecessary explicit locking in `qxl_release_from_id_locked`**
```c
xa_lock(&qdev->release_xa);
release = xa_load(&qdev->release_xa, id);
xa_unlock(&qdev->release_xa);
```
`xa_load()` is already RCU-safe and internally handles its own read-side locking via `rcu_read_lock()`. The explicit `xa_lock`/`xa_unlock` around `xa_load` is unnecessary — `xa_load` does not require the xa_lock to be held. This could simply be:
```c
release = xa_load(&qdev->release_xa, id);
```
The returned pointer remains valid because the caller holds the release via reference or the garbage collector is serialized. Using `xa_load` alone is both correct and more performant (RCU read-side vs. spinlock).
**Missing `xa_destroy` in teardown path**
Looking at `qxl_device_fini()`, there is no `idr_destroy()` call for `release_idr` in the existing code, and likewise the patch does not add an `xa_destroy()` call for `release_xa`. While xarray (like IDR) will not leak the entries themselves (those are freed via `qxl_release_free`), calling `xa_destroy(&qdev->release_xa)` in `qxl_device_fini()` would free any internal xarray nodes and is good practice. This is a pre-existing issue but worth fixing while converting.
**Minor: variable rename churn**
The `idr_ret` → `xa_ret` renames in `qxl_alloc_surface_release_reserved` and `qxl_alloc_release_reserved` are fine but purely cosmetic. The variable could equally be just `ret` or `r` to be more generic, but this is a style nit.
**Good aspects:**
- `xa_limit_31b` correctly keeps IDs within `INT_MAX`, matching the old `idr_alloc(..., 1, 0, ...)` range (v2 fix).
- `XA_FLAGS_ALLOC1` correctly mirrors `idr_init_base(&..., 1)`, starting allocation from 1.
- `GFP_KERNEL` is appropriate since the callers are in process context (v2 fix).
- The `(u32)` cast on `atomic_inc_return` prevents sign-extension issues (v2 fix).
- `xa_erase` in `qxl_release_free` is a clean 1:1 replacement for the locked `idr_remove`.
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 7+ messages in thread
* Claude review: drm/qxl: Convert qxl release idr to xarray
2026-04-20 8:36 [PATCH] " liuqiangneo
2026-04-23 0:09 ` Claude review: " Claude Code Review Bot
@ 2026-04-23 0:09 ` Claude Code Review Bot
1 sibling, 0 replies; 7+ messages in thread
From: Claude Code Review Bot @ 2026-04-23 0:09 UTC (permalink / raw)
To: dri-devel-reviews
Overall Series Review
Subject: drm/qxl: Convert qxl release idr to xarray
Author: liuqiangneo@163.com
Patches: 1
Reviewed: 2026-04-23T10:09:16.073690
---
This is a single-patch series converting the qxl driver's `release_idr` (IDR + spinlock) to an xarray. The motivation is sound — IDR is built on top of xarray, so using xarray directly removes the need for external locking around allocations and removals. The conversion is mostly correct but has one real bug (`GFP_NOWAIT` without preloading) and one unnecessary overhead issue (locking around `xa_load`).
**Verdict: Needs revision** — the GFP flag issue could cause allocation failures under memory pressure that the original code would not have experienced.
---
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 7+ messages in thread
* Claude review: drm/qxl: Convert qxl release idr to xarray
2026-04-20 8:36 [PATCH] " liuqiangneo
@ 2026-04-23 0:09 ` Claude Code Review Bot
2026-04-23 0:09 ` Claude Code Review Bot
1 sibling, 0 replies; 7+ messages in thread
From: Claude Code Review Bot @ 2026-04-23 0:09 UTC (permalink / raw)
To: dri-devel-reviews
Patch Review
**Bug: `GFP_NOWAIT` should be `GFP_KERNEL`**
The original code used the IDR preload pattern to ensure allocation success:
```c
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
...
spin_unlock(&qdev->release_idr_lock);
idr_preload_end();
```
`idr_preload(GFP_KERNEL)` pre-allocates memory (sleeping if necessary) so that the subsequent `GFP_NOWAIT` allocation under the spinlock is guaranteed to succeed. The replacement:
```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_32b, GFP_NOWAIT);
```
uses `GFP_NOWAIT` without any preloading. Since `xa_alloc` may need to allocate internal xarray nodes, and `GFP_NOWAIT` prevents sleeping, this will fail more readily under memory pressure than the original code. Now that no external spinlock is held, `GFP_KERNEL` should be used instead.
**Unnecessary locking around `xa_load`**
In `qxl_release_from_id_locked`:
```c
xa_lock(&qdev->release_xa);
release = xa_load(&qdev->release_xa, id);
xa_unlock(&qdev->release_xa);
```
`xa_load` uses `rcu_read_lock()` internally and is safe to call without holding the xa_lock. The xa_lock is only needed for store operations. This adds unnecessary spinlock contention. A plain `xa_load()` call without the surrounding lock/unlock is sufficient and correct.
**Seqno atomicity — correct but worth noting**
The change from `++qdev->release_seqno` (under spinlock) to `atomic_inc_return(&qdev->release_seqno)` (after xa_alloc returns) introduces a theoretical window where two concurrent allocations could get handles in one order but seqnos in the reverse order. However, since `dma_fence_init` uses `release->id | 0xf0000000` as the fence context (unique per release), cross-release seqno ordering doesn't matter. This is fine.
**Minor: seqno incremented on allocation failure**
```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_32b, GFP_NOWAIT);
release->base.seqno = atomic_inc_return(&qdev->release_seqno);
if (r < 0) {
```
The seqno is bumped before the error check, wasting a sequence number on failure. The original code had the same behavior, so this isn't a regression, but while touching this code it would be easy to move the seqno assignment after the error check.
**Missing `xa_destroy` in teardown**
`qxl_device_fini()` doesn't call `xa_destroy()` for the new xarray. The original code also omitted `idr_destroy()`, so this isn't a regression, but it would be good to clean up here while converting.
**Variable renames — fine**
The `idr_ret` → `xa_ret` renames in `qxl_alloc_surface_release_reserved` and `qxl_alloc_release_reserved` are straightforward and correct. The error message update from `release_idr` to `release_xa` is appropriate.
---
Generated by Claude Code Patch Reviewer
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2026-04-23 0:09 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-22 3:17 [PATCH v3] drm/qxl: Convert qxl release idr to xarray liuqiangneo
2026-04-22 22:03 ` Claude review: " Claude Code Review Bot
2026-04-22 22:03 ` Claude Code Review Bot
-- strict thread matches above, loose matches on Subject: below --
2026-04-21 6:00 [PATCH v2] " liuqiangneo
2026-04-22 22:54 ` Claude review: " Claude Code Review Bot
2026-04-22 22:54 ` Claude Code Review Bot
2026-04-20 8:36 [PATCH] " liuqiangneo
2026-04-23 0:09 ` Claude review: " Claude Code Review Bot
2026-04-23 0:09 ` Claude Code Review Bot
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox