public inbox for drm-ai-reviews@public-inbox.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH v2] drm/qxl: Convert qxl release idr to xarray
@ 2026-04-21  6:00 liuqiangneo
  2026-04-22 22:54 ` Claude review: " Claude Code Review Bot
  2026-04-22 22:54 ` Claude Code Review Bot
  0 siblings, 2 replies; 3+ messages in thread
From: liuqiangneo @ 2026-04-21  6:00 UTC (permalink / raw)
  To: airlied, kraxel
  Cc: maarten.lankhorst, mripard, tzimmermann, airlied, simona,
	virtualization, spice-devel, dri-devel, linux-kernel, Qiang Liu

From: Qiang Liu <liuqiang@kylinos.cn>

Replace the release_idr + release_idr_lock to an XArray.
IDR internally uses xarray so we can use it directly which simplifies our
code by removing the need to do external locking.

Signed-off-by: Qiang Liu <liuqiang@kylinos.cn>
---
v2:
- Use xa_limit_31b instead of xa_limit_32b to keep IDs within INT_MAX.
- Use GFP_KERNEL instead of GFP_NOWAIT because the context is sleepable.
- Cast to u32 to avoid sign extension when atomic counter wraps above INT_MAX.
---
 drivers/gpu/drm/qxl/qxl_drv.h     |  6 ++---
 drivers/gpu/drm/qxl/qxl_kms.c     |  4 +--
 drivers/gpu/drm/qxl/qxl_release.c | 45 ++++++++++++++-----------------
 3 files changed, 25 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index cc02b5f10ad9..cf9decf39022 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -35,6 +35,7 @@
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
 #include <linux/workqueue.h>
+#include <linux/xarray.h>
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_encoder.h>
@@ -208,11 +209,10 @@ struct qxl_device {
 	struct qxl_memslot surfaces_slot;
 
 	spinlock_t	release_lock;
-	struct idr	release_idr;
-	uint32_t	release_seqno;
+	struct xarray	release_xa;
+	atomic_t	release_seqno;
 	atomic_t	release_count;
 	wait_queue_head_t release_event;
-	spinlock_t release_idr_lock;
 	struct mutex	async_io_mutex;
 	unsigned int last_sent_io_cmd;
 
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 461b7ab9ad5c..0cebaf88f407 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -227,8 +227,8 @@ int qxl_device_init(struct qxl_device *qdev,
 		goto cursor_ring_free;
 	}
 
-	idr_init_base(&qdev->release_idr, 1);
-	spin_lock_init(&qdev->release_idr_lock);
+	xa_init_flags(&qdev->release_xa, XA_FLAGS_ALLOC1);
+	atomic_set(&qdev->release_seqno, 0);
 	spin_lock_init(&qdev->release_lock);
 
 	idr_init_base(&qdev->surf_id_idr, 1);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 06979d0e8a9f..929df641b62d 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -90,6 +90,7 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
 	struct qxl_release *release;
 	int handle;
 	size_t size = sizeof(*release);
+	int r;
 
 	release = kmalloc(size, GFP_KERNEL);
 	if (!release) {
@@ -102,16 +103,12 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
 	release->surface_release_id = 0;
 	INIT_LIST_HEAD(&release->bos);
 
-	idr_preload(GFP_KERNEL);
-	spin_lock(&qdev->release_idr_lock);
-	handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
-	release->base.seqno = ++qdev->release_seqno;
-	spin_unlock(&qdev->release_idr_lock);
-	idr_preload_end();
-	if (handle < 0) {
+	r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
+	release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
+	if (r < 0) {
 		kfree(release);
 		*ret = NULL;
-		return handle;
+		return r;
 	}
 	*ret = release;
 	DRM_DEBUG_DRIVER("allocated release %d\n", handle);
@@ -143,9 +140,7 @@ qxl_release_free(struct qxl_device *qdev,
 	if (release->surface_release_id)
 		qxl_surface_id_dealloc(qdev, release->surface_release_id);
 
-	spin_lock(&qdev->release_idr_lock);
-	idr_remove(&qdev->release_idr, release->id);
-	spin_unlock(&qdev->release_idr_lock);
+	xa_erase(&qdev->release_xa, release->id);
 
 	if (dma_fence_was_initialized(&release->base)) {
 		WARN_ON(list_empty(&release->bos));
@@ -261,14 +256,14 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 				       struct qxl_release **release)
 {
 	if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
-		int idr_ret;
+		int xa_ret;
 		struct qxl_bo *bo;
 		union qxl_release_info *info;
 
 		/* stash the release after the create command */
-		idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
-		if (idr_ret < 0)
-			return idr_ret;
+		xa_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
+		if (xa_ret < 0)
+			return xa_ret;
 		bo = create_rel->release_bo;
 
 		(*release)->release_bo = bo;
@@ -277,7 +272,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 		qxl_release_list_add(*release, bo);
 
 		info = qxl_release_map(qdev, *release);
-		info->id = idr_ret;
+		info->id = xa_ret;
 		qxl_release_unmap(qdev, *release, info);
 		return 0;
 	}
@@ -291,7 +286,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 				       struct qxl_bo **rbo)
 {
 	struct qxl_bo *bo, *free_bo = NULL;
-	int idr_ret;
+	int xa_ret;
 	int ret = 0;
 	union qxl_release_info *info;
 	int cur_idx;
@@ -312,11 +307,11 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 		return -EINVAL;
 	}
 
-	idr_ret = qxl_release_alloc(qdev, type, release);
-	if (idr_ret < 0) {
+	xa_ret = qxl_release_alloc(qdev, type, release);
+	if (xa_ret < 0) {
 		if (rbo)
 			*rbo = NULL;
-		return idr_ret;
+		return xa_ret;
 	}
 	atomic_inc(&qdev->release_count);
 
@@ -362,7 +357,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 	}
 
 	info = qxl_release_map(qdev, *release);
-	info->id = idr_ret;
+	info->id = xa_ret;
 	qxl_release_unmap(qdev, *release, info);
 
 	return ret;
@@ -373,11 +368,11 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
 {
 	struct qxl_release *release;
 
-	spin_lock(&qdev->release_idr_lock);
-	release = idr_find(&qdev->release_idr, id);
-	spin_unlock(&qdev->release_idr_lock);
+	xa_lock(&qdev->release_xa);
+	release = xa_load(&qdev->release_xa, id);
+	xa_unlock(&qdev->release_xa);
 	if (!release) {
-		DRM_ERROR("failed to find id in release_idr\n");
+		DRM_ERROR("failed to find id in release_xa\n");
 		return NULL;
 	}
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Claude review: drm/qxl: Convert qxl release idr to xarray
  2026-04-21  6:00 [PATCH v2] drm/qxl: Convert qxl release idr to xarray liuqiangneo
@ 2026-04-22 22:54 ` Claude Code Review Bot
  2026-04-22 22:54 ` Claude Code Review Bot
  1 sibling, 0 replies; 3+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:54 UTC (permalink / raw)
  To: dri-devel-reviews

Overall Series Review

Subject: drm/qxl: Convert qxl release idr to xarray
Author: liuqiangneo@163.com
Patches: 1
Reviewed: 2026-04-23T08:54:43.877264

---

This is a single-patch series converting the QXL release IDR to xarray. The conversion is mechanically correct and the v2 changelog shows the author addressed prior review feedback (xa_limit_31b, GFP_KERNEL, u32 cast). The patch is straightforward and reasonable — xarray provides built-in locking that simplifies the code by removing the external `release_idr_lock` spinlock.

There are two issues worth raising: one correctness bug with the seqno assignment ordering, and one minor concern about the `qxl_release_from_id_locked` function doing unnecessary explicit locking.

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Claude review: drm/qxl: Convert qxl release idr to xarray
  2026-04-21  6:00 [PATCH v2] drm/qxl: Convert qxl release idr to xarray liuqiangneo
  2026-04-22 22:54 ` Claude review: " Claude Code Review Bot
@ 2026-04-22 22:54 ` Claude Code Review Bot
  1 sibling, 0 replies; 3+ messages in thread
From: Claude Code Review Bot @ 2026-04-22 22:54 UTC (permalink / raw)
  To: dri-devel-reviews

Patch Review

**Bug: seqno assigned after xa_alloc but before error check**

In `qxl_release_alloc()`, the seqno is assigned *between* `xa_alloc` and the error check:

```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
if (r < 0) {
    kfree(release);
    *ret = NULL;
    return r;
}
```

If `xa_alloc` fails (`r < 0`), the code still increments `release_seqno` before taking the error path and freeing `release`. This wastes a sequence number on every allocation failure. The original IDR code had the same logical issue (seqno incremented inside the spinlock before the error check), so this is a pre-existing problem being preserved, but the conversion to `atomic_inc_return` makes it slightly more visible. Moving the seqno assignment after the error check would be cleaner:

```c
r = xa_alloc(&qdev->release_xa, &handle, release, xa_limit_31b, GFP_KERNEL);
if (r < 0) {
    kfree(release);
    *ret = NULL;
    return r;
}
release->base.seqno = (u32)atomic_inc_return(&qdev->release_seqno);
```

This is not a regression since the original code had the same behavior, but since you're already refactoring, it would be a nice cleanup.

**Unnecessary explicit locking in `qxl_release_from_id_locked`**

```c
xa_lock(&qdev->release_xa);
release = xa_load(&qdev->release_xa, id);
xa_unlock(&qdev->release_xa);
```

`xa_load()` is already RCU-safe and internally handles its own read-side locking via `rcu_read_lock()`. The explicit `xa_lock`/`xa_unlock` around `xa_load` is unnecessary — `xa_load` does not require the xa_lock to be held. This could simply be:

```c
release = xa_load(&qdev->release_xa, id);
```

The returned pointer remains valid because the caller holds the release via reference or the garbage collector is serialized. Using `xa_load` alone is both correct and more performant (RCU read-side vs. spinlock).

**Missing `xa_destroy` in teardown path**

Looking at `qxl_device_fini()`, there is no `idr_destroy()` call for `release_idr` in the existing code, and likewise the patch does not add an `xa_destroy()` call for `release_xa`. While xarray (like IDR) will not leak the entries themselves (those are freed via `qxl_release_free`), calling `xa_destroy(&qdev->release_xa)` in `qxl_device_fini()` would free any internal xarray nodes and is good practice. This is a pre-existing issue but worth fixing while converting.

**Minor: variable rename churn**

The `idr_ret` → `xa_ret` renames in `qxl_alloc_surface_release_reserved` and `qxl_alloc_release_reserved` are fine but purely cosmetic. The variable could equally be just `ret` or `r` to be more generic, but this is a style nit.

**Good aspects:**
- `xa_limit_31b` correctly keeps IDs within `INT_MAX`, matching the old `idr_alloc(..., 1, 0, ...)` range (v2 fix).
- `XA_FLAGS_ALLOC1` correctly mirrors `idr_init_base(&..., 1)`, starting allocation from 1.
- `GFP_KERNEL` is appropriate since the callers are in process context (v2 fix).
- The `(u32)` cast on `atomic_inc_return` prevents sign-extension issues (v2 fix).
- `xa_erase` in `qxl_release_free` is a clean 1:1 replacement for the locked `idr_remove`.

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-04-22 22:54 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-04-21  6:00 [PATCH v2] drm/qxl: Convert qxl release idr to xarray liuqiangneo
2026-04-22 22:54 ` Claude review: " Claude Code Review Bot
2026-04-22 22:54 ` Claude Code Review Bot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox