public inbox for drm-ai-reviews@public-inbox.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH v3] drm/qxl: Convert qxl release idr to xarray
@ 2026-04-22  3:17 liuqiangneo
  2026-04-22 22:03 ` Claude review: " Claude Code Review Bot
  2026-04-22 22:03 ` Claude Code Review Bot
  0 siblings, 2 replies; 3+ messages in thread
From: liuqiangneo @ 2026-04-22  3:17 UTC (permalink / raw)
  To: airlied, kraxel
  Cc: maarten.lankhorst, mripard, tzimmermann, airlied, simona,
	virtualization, spice-devel, dri-devel, linux-kernel, Qiang Liu

From: Qiang Liu <liuqiang@kylinos.cn>

Replace the release_idr + release_idr_lock to an XArray.
IDR internally uses xarray so we can use it directly which simplifies our
code by removing the IDR wrapper.

Signed-off-by: Qiang Liu <liuqiang@kylinos.cn>
---
v3:
- Change the type of 'handle' to u32.
  'xa_limit_31b' restricts the range of 'handle',
  so an implicit cast to 'int' is safe.
- Keep xa_lock in xa_lock due to concurrent access.

v2:
- Use xa_limit_31b instead of xa_limit_32b to keep IDs within INT_MAX.
- Use GFP_KERNEL instead of GFP_NOWAIT because the context is sleepable.
- Cast to u32 to avoid sign extension when atomic counter wraps above INT_MAX.
---
 drivers/gpu/drm/qxl/qxl_drv.h     |  6 ++--
 drivers/gpu/drm/qxl/qxl_kms.c     |  4 +--
 drivers/gpu/drm/qxl/qxl_release.c | 49 ++++++++++++++-----------------
 3 files changed, 27 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index cc02b5f10ad9..cf9decf39022 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -35,6 +35,7 @@
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
+#include <linux/xarray.h>
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_encoder.h>
@@ -208,11 +209,10 @@ struct qxl_device {
 	struct qxl_memslot surfaces_slot;
 
 	spinlock_t	release_lock;
-	struct idr	release_idr;
-	uint32_t	release_seqno;
+	struct xarray	release_xa;
+	atomic_t	release_seqno;
 	atomic_t	release_count;
 	wait_queue_head_t release_event;
-	spinlock_t release_idr_lock;
 	struct mutex	async_io_mutex;
 	unsigned int last_sent_io_cmd;
 
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 461b7ab9ad5c..0cebaf88f407 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -227,8 +227,8 @@ int qxl_device_init(struct qxl_device *qdev,
 		goto cursor_ring_free;
 	}
 
-	idr_init_base(&qdev->release_idr, 1);
-	spin_lock_init(&qdev->release_idr_lock);
+	xa_init_flags(&qdev->release_xa, XA_FLAGS_ALLOC1);
+	atomic_set(&qdev->release_seqno, 0);
 	spin_lock_init(&qdev->release_lock);
 
 	idr_init_base(&qdev->surf_id_idr, 1);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 06979d0e8a9f..afed1096cf7b 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -88,8 +88,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
 		  struct qxl_release **ret)
 {
 	struct qxl_release *release;
-	int handle;
+	u32 handle;
 	size_t size = sizeof(*release);
+	int r;
 
 	release = kmalloc(size, GFP_KERNEL);
 	if (!release) {
@@ -102,19 +103,15 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
 	release->surface_release_id = 0;
 	INIT_LIST_HEAD(&release->bos);
 
-	idr_preload(GFP_KERNEL);
-	spin_lock(&qdev->release_idr_lock);
-	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
-	release->base.seqno = ++qdev->release_seqno;
-	spin_unlock(&qdev->release_idr_lock);
-	idr_preload_end();
-	if (handle < 0) {
+	r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
+	release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
+	if (r < 0) {
 		kfree(release);
 		*ret = NULL;
-		return handle;
+		return r;
 	}
 	*ret = release;
-	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
+	DRM_DEBUG_DRIVER("allocated release %u\n", handle);
 	release->id = handle;
 	return handle;
 }
@@ -143,9 +140,7 @@ qxl_release_free(struct qxl_device *qdev,
 	if (release->surface_release_id)
 		qxl_surface_id_dealloc(qdev, release->surface_release_id);
 
-	spin_lock(&qdev->release_idr_lock);
-	idr_remove(&qdev->release_idr, release->id);
-	spin_unlock(&qdev->release_idr_lock);
+	xa_erase(&qdev->release_xa, release->id);
 
 	if (dma_fence_was_initialized(&release->base)) {
 		WARN_ON(list_empty(&release->bos));
@@ -261,14 +256,14 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 				       struct qxl_release **release)
 {
 	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
-		int idr_ret;
+		int xa_ret;
 		struct qxl_bo *bo;
 		union qxl_release_info *info;
 
 		/* stash the release after the create command */
-		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
-		if (idr_ret < 0)
-			return idr_ret;
+		xa_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+		if (xa_ret < 0)
+			return xa_ret;
 		bo = create_rel->release_bo;
 
 		(*release)->release_bo = bo;
@@ -277,7 +272,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 		qxl_release_list_add(*release, bo);
 
 		info = qxl_release_map(qdev, *release);
-		info->id = idr_ret;
+		info->id = xa_ret;
 		qxl_release_unmap(qdev, *release, info);
 		return 0;
 	}
@@ -291,7 +286,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 				       struct qxl_bo **rbo)
 {
 	struct qxl_bo *bo, *free_bo = NULL;
-	int idr_ret;
+	int xa_ret;
 	int ret = 0;
 	union qxl_release_info *info;
 	int cur_idx;
@@ -312,11 +307,11 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 		return -EINVAL;
 	}
 
-	idr_ret = qxl_release_alloc(qdev, type, release);
-	if (idr_ret < 0) {
+	xa_ret = qxl_release_alloc(qdev, type, release);
+	if (xa_ret < 0) {
 		if (rbo)
 			*rbo = NULL;
-		return idr_ret;
+		return xa_ret;
 	}
 	atomic_inc(&qdev->release_count);
 
@@ -362,7 +357,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 	}
 
 	info = qxl_release_map(qdev, *release);
-	info->id = idr_ret;
+	info->id = xa_ret;
 	qxl_release_unmap(qdev, *release, info);
 
 	return ret;
@@ -373,11 +368,11 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
 {
 	struct qxl_release *release;
 
-	spin_lock(&qdev->release_idr_lock);
-	release = idr_find(&qdev->release_idr, id);
-	spin_unlock(&qdev->release_idr_lock);
+	xa_lock(&qdev->release_xa);
+	release = xa_load(&qdev->release_xa, id);
+	xa_unlock(&qdev->release_xa);
 	if (!release) {
-		DRM_ERROR("failed to find id in release_idr\n");
+		DRM_ERROR("failed to find id in release_xa\n");
 		return NULL;
 	}
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Claude review: drm/qxl: Convert qxl release idr to xarray
  2026-04-22  3:17 [PATCH v3] drm/qxl: Convert qxl release idr to xarray liuqiangneo
@ 2026-04-22 22:03 ` Claude Code Review Bot
  2026-04-22 22:03 ` Claude Code Review Bot
  1 sibling, 0 replies; 3+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:03 UTC (permalink / raw)
  To: dri-devel-reviews

Overall Series Review

Subject: drm/qxl: Convert qxl release idr to xarray
Author: liuqiangneo@163.com
Patches: 1
Reviewed: 2026-04-23T08:03:17.782075

---

This is a single-patch series (v3) converting the QXL driver's release tracking from IDR to XArray. The conversion is well-motivated — IDR is an XArray wrapper internally, so using XArray directly simplifies the code and removes the need for a separate spinlock and `idr_preload`/`idr_preload_end` boilerplate.

The conversion is mostly correct and clean. There is one issue worth addressing: unnecessary locking around `xa_load()` in the lookup function. The rest of the changes are straightforward and well-reasoned. The v3 changelog shows the author has been responsive to review feedback across iterations.

**Verdict: Acceptable with minor comments.**

---

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Claude review: drm/qxl: Convert qxl release idr to xarray
  2026-04-22  3:17 [PATCH v3] drm/qxl: Convert qxl release idr to xarray liuqiangneo
  2026-04-22 22:03 ` Claude review: " Claude Code Review Bot
@ 2026-04-22 22:03 ` Claude Code Review Bot
  1 sibling, 0 replies; 3+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:03 UTC (permalink / raw)
  To: dri-devel-reviews

Patch Review

**Unnecessary locking around `xa_load()` in `qxl_release_from_id_locked`:**

```c
xa_lock(&qdev->release_xa);
release = xa_load(&qdev->release_xa, id);
xa_unlock(&qdev->release_xa);
```

`xa_load()` is RCU-safe internally and does not require external locking for read access. The `xa_lock`/`xa_unlock` pair here is harmless but unnecessary overhead. A bare `xa_load()` call would be sufficient and more idiomatic. The v3 changelog states "Keep xa_lock in xa_lock due to concurrent access", but this reflects a misunderstanding — `xa_load()` already handles concurrent access correctly via RCU. `xa_lock` is only needed for modification operations (`xa_store`, `xa_erase`, `xa_alloc`), and the modifications in this patch (`xa_alloc`, `xa_erase`) already handle their own locking internally. The simplification should be:

```c
release = xa_load(&qdev->release_xa, id);
```

**Seqno ordering subtlety (observation, not a blocker):**

In the original code, IDR allocation and seqno increment were both inside `release_idr_lock`:

```c
spin_lock(&qdev->release_idr_lock);
handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
release->base.seqno = ++qdev->release_seqno;
spin_unlock(&qdev->release_idr_lock);
```

In the new code these are separate atomic operations:

```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
```

Two concurrent allocations could now observe ID ordering that doesn't match seqno ordering (thread A gets ID 5/seqno 7, thread B gets ID 6/seqno 6). In practice this is fine — the DMA fence seqno just needs uniqueness within a context, not strict correlation with the xarray index — but it's a behavioral change worth noting.

**Missing `xa_destroy()` in teardown (pre-existing):**

`qxl_device_fini()` never called `idr_destroy()` on `release_idr`, and the patch doesn't add `xa_destroy()` for `release_xa` either. While by teardown time the xarray should be empty (after the release event wait + GC flush), adding `xa_destroy(&qdev->release_xa)` in `qxl_device_fini()` would be good hygiene. This is a pre-existing issue rather than a regression, but this patch would be a natural place to fix it.

**Positive aspects:**

- `xa_limit_31b` correctly keeps IDs within `INT_MAX`, matching `release->id` being `int` and the function returning `int`.
- Using `GFP_KERNEL` instead of the `idr_preload(GFP_KERNEL)` + `idr_alloc(..., GFP_NOWAIT)` dance is a good simplification enabled by `xa_alloc` supporting sleeping allocation directly.
- `XA_FLAGS_ALLOC1` correctly mirrors the old `idr_init_base(&qdev->release_idr, 1)` behavior.
- The `(u32)` cast on `atomic_inc_return` correctly handles potential wrapping above `INT_MAX`.
- Format string change `%d` → `%u` for `handle` is correct.

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-04-22 22:03 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-22  3:17 [PATCH v3] drm/qxl: Convert qxl release idr to xarray liuqiangneo
2026-04-22 22:03 ` Claude review: " Claude Code Review Bot
2026-04-22 22:03 ` Claude Code Review Bot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox