public inbox for drm-ai-reviews@public-inbox.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH] accel/ivpu: Limit number of maximum contexts and doorbells per user
@ 2026-02-20 16:00 Maciej Falkowski
  2026-02-20 19:58 ` Claude review: " Claude Code Review Bot
  2026-02-20 19:58 ` Claude Code Review Bot
  0 siblings, 2 replies; 3+ messages in thread
From: Maciej Falkowski @ 2026-02-20 16:00 UTC (permalink / raw)
  To: dri-devel
  Cc: oded.gabbay, jeff.hugo, karol.wachowski, lizhi.hou,
	Maciej Falkowski

From: Karol Wachowski <karol.wachowski@linux.intel.com>

Implement per-user resource limits to prevent resource exhaustion.

Root users can allocate all available contexts (128) and doorbells
(255), while non-root users are limited to half of the available
resources (64 contexts and 127 doorbells respectively).

This prevents scenarios where a single user could monopolize NPU
resources and starve other users on multi-user systems.

Change doorbell ID and command queue ID allocation errors to debug
messages as those are user triggered.

Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com>
Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
---
 drivers/accel/ivpu/ivpu_drv.c | 94 ++++++++++++++++++++++++++++++++---
 drivers/accel/ivpu/ivpu_drv.h | 26 ++++++++--
 drivers/accel/ivpu/ivpu_job.c | 36 ++++++++++----
 3 files changed, 136 insertions(+), 20 deletions(-)

diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3b6ec8eecf2f..1119e3fea29e 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -68,6 +68,73 @@ bool ivpu_force_snoop;
 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
 
+static struct ivpu_user_limits *ivpu_user_limits_alloc(struct ivpu_device *vdev, uid_t uid)
+{
+	struct ivpu_user_limits *limits;
+
+	limits = kzalloc(sizeof(*limits), GFP_KERNEL);
+	if (!limits)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&limits->ref);
+	atomic_set(&limits->db_count, 0);
+	limits->vdev = vdev;
+	limits->uid = uid;
+
+	/* Allow root user to allocate all contexts */
+	if (uid == 0) {
+		limits->max_ctx_count = ivpu_get_context_count(vdev);
+		limits->max_db_count = ivpu_get_doorbell_count(vdev);
+	} else {
+		limits->max_ctx_count = ivpu_get_context_count(vdev) / 2;
+		limits->max_db_count = ivpu_get_doorbell_count(vdev) / 2;
+	}
+
+	hash_add(vdev->user_limits, &limits->hash_node, uid);
+
+	return limits;
+}
+
+static struct ivpu_user_limits *ivpu_user_limits_get(struct ivpu_device *vdev)
+{
+	struct ivpu_user_limits *limits;
+	uid_t uid = current_uid().val;
+
+	guard(mutex)(&vdev->user_limits_lock);
+
+	hash_for_each_possible(vdev->user_limits, limits, hash_node, uid) {
+		if (limits->uid == uid) {
+			if (kref_read(&limits->ref) > limits->max_ctx_count) {
+				ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
+					 limits->max_ctx_count);
+				return ERR_PTR(-EMFILE);
+			}
+
+			kref_get(&limits->ref);
+			return limits;
+		}
+	}
+
+	return ivpu_user_limits_alloc(vdev, uid);
+}
+
+static void ivpu_user_limits_release(struct kref *ref)
+{
+	struct ivpu_user_limits *limits = container_of(ref, struct ivpu_user_limits, ref);
+	struct ivpu_device *vdev = limits->vdev;
+
+	lockdep_assert_held(&vdev->user_limits_lock);
+	drm_WARN_ON(&vdev->drm, atomic_read(&limits->db_count));
+	hash_del(&limits->hash_node);
+	kfree(limits);
+}
+
+static void ivpu_user_limits_put(struct ivpu_device *vdev, struct ivpu_user_limits *limits)
+{
+	guard(mutex)(&vdev->user_limits_lock);
+	kref_put(&limits->ref, ivpu_user_limits_release);
+}
+
 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
 {
 	struct ivpu_device *vdev = file_priv->vdev;
@@ -111,6 +178,7 @@ static void file_priv_release(struct kref *ref)
 	mutex_unlock(&vdev->context_list_lock);
 	pm_runtime_put_autosuspend(vdev->drm.dev);
 
+	ivpu_user_limits_put(vdev, file_priv->user_limits);
 	mutex_destroy(&file_priv->ms_lock);
 	mutex_destroy(&file_priv->lock);
 	kfree(file_priv);
@@ -170,7 +238,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
 		args->value = ivpu_hw_dpu_max_freq_get(vdev);
 		break;
 	case DRM_IVPU_PARAM_NUM_CONTEXTS:
-		args->value = ivpu_get_context_count(vdev);
+		args->value = file_priv->user_limits->max_ctx_count;
 		break;
 	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
 		args->value = vdev->hw->ranges.user.start;
@@ -232,22 +300,30 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct ivpu_device *vdev = to_ivpu_device(dev);
 	struct ivpu_file_priv *file_priv;
+	struct ivpu_user_limits *limits;
 	u32 ctx_id;
 	int idx, ret;
 
 	if (!drm_dev_enter(dev, &idx))
 		return -ENODEV;
 
+	limits = ivpu_user_limits_get(vdev);
+	if (IS_ERR(limits)) {
+		ret = PTR_ERR(limits);
+		goto err_dev_exit;
+	}
+
 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
 	if (!file_priv) {
 		ret = -ENOMEM;
-		goto err_dev_exit;
+		goto err_user_limits_put;
 	}
 
 	INIT_LIST_HEAD(&file_priv->ms_instance_list);
 
 	file_priv->vdev = vdev;
 	file_priv->bound = true;
+	file_priv->user_limits = limits;
 	kref_init(&file_priv->ref);
 	mutex_init(&file_priv->lock);
 	mutex_init(&file_priv->ms_lock);
@@ -285,6 +361,8 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
 	mutex_destroy(&file_priv->ms_lock);
 	mutex_destroy(&file_priv->lock);
 	kfree(file_priv);
+err_user_limits_put:
+	ivpu_user_limits_put(vdev, limits);
 err_dev_exit:
 	drm_dev_exit(idx);
 	return ret;
@@ -344,8 +422,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
 	ivpu_ipc_consumer_del(vdev, &cons);
 
 	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
-		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
-			 ipc_hdr.data_addr);
+		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);
 		return -EIO;
 	}
 
@@ -454,7 +531,7 @@ int ivpu_shutdown(struct ivpu_device *vdev)
 }
 
 static const struct file_operations ivpu_fops = {
-	.owner		= THIS_MODULE,
+	.owner = THIS_MODULE,
 	DRM_ACCEL_FOPS,
 #ifdef CONFIG_PROC_FS
 	.show_fdinfo = drm_show_fdinfo,
@@ -593,6 +670,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
 	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
 	xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
 	INIT_LIST_HEAD(&vdev->bo_list);
+	hash_init(vdev->user_limits);
 
 	vdev->db_limit.min = IVPU_MIN_DB;
 	vdev->db_limit.max = IVPU_MAX_DB;
@@ -601,6 +679,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
 	if (ret)
 		goto err_xa_destroy;
 
+	ret = drmm_mutex_init(&vdev->drm, &vdev->user_limits_lock);
+	if (ret)
+		goto err_xa_destroy;
+
 	ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
 	if (ret)
 		goto err_xa_destroy;
@@ -718,7 +800,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
-	{ }
+	{}
 };
 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
 
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 78ecddf2831d..0b5aa41151c6 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -12,6 +12,7 @@
 #include <drm/drm_mm.h>
 #include <drm/drm_print.h>
 
+#include <linux/hashtable.h>
 #include <linux/pci.h>
 #include <linux/xarray.h>
 #include <uapi/drm/ivpu_accel.h>
@@ -43,7 +44,7 @@
 /* SSID 1 is used by the VPU to represent reserved context */
 #define IVPU_RESERVED_CONTEXT_MMU_SSID 1
 #define IVPU_USER_CONTEXT_MIN_SSID     2
-#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 63)
+#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 128)
 
 #define IVPU_MIN_DB 1
 #define IVPU_MAX_DB 255
@@ -51,9 +52,6 @@
 #define IVPU_JOB_ID_JOB_MASK		GENMASK(7, 0)
 #define IVPU_JOB_ID_CONTEXT_MASK	GENMASK(31, 8)
 
-#define IVPU_NUM_PRIORITIES    4
-#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
-
 #define IVPU_CMDQ_MIN_ID 1
 #define IVPU_CMDQ_MAX_ID 255
 
@@ -124,6 +122,16 @@ struct ivpu_fw_info;
 struct ivpu_ipc_info;
 struct ivpu_pm_info;
 
+struct ivpu_user_limits {
+	struct hlist_node hash_node;
+	struct ivpu_device *vdev;
+	struct kref ref;
+	u32 max_ctx_count;
+	u32 max_db_count;
+	u32 uid;
+	atomic_t db_count;
+};
+
 struct ivpu_device {
 	struct drm_device drm;
 	void __iomem *regb;
@@ -143,6 +151,8 @@ struct ivpu_device {
 	struct mutex context_list_lock; /* Protects user context addition/removal */
 	struct xarray context_xa;
 	struct xa_limit context_xa_limit;
+	DECLARE_HASHTABLE(user_limits, 8);
+	struct mutex user_limits_lock; /* Protects user_limits */
 
 	struct xarray db_xa;
 	struct xa_limit db_limit;
@@ -190,6 +200,7 @@ struct ivpu_file_priv {
 	struct list_head ms_instance_list;
 	struct ivpu_bo *ms_info_bo;
 	struct xa_limit job_limit;
+	struct ivpu_user_limits *user_limits;
 	u32 job_id_next;
 	struct xa_limit cmdq_limit;
 	u32 cmdq_id_next;
@@ -287,6 +298,13 @@ static inline u32 ivpu_get_context_count(struct ivpu_device *vdev)
 	return (ctx_limit.max - ctx_limit.min + 1);
 }
 
+static inline u32 ivpu_get_doorbell_count(struct ivpu_device *vdev)
+{
+	struct xa_limit db_limit = vdev->db_limit;
+
+	return (db_limit.max - db_limit.min + 1);
+}
+
 static inline u32 ivpu_get_platform(struct ivpu_device *vdev)
 {
 	WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 4f8564e2878a..337ed269fd3e 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -173,7 +173,7 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
 	ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
 			      &file_priv->cmdq_id_next, GFP_KERNEL);
 	if (ret < 0) {
-		ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+		ivpu_dbg(vdev, IOCTL, "Failed to allocate command queue ID: %d\n", ret);
 		goto err_free_cmdq;
 	}
 
@@ -215,14 +215,22 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
 
 static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
 {
+	struct ivpu_user_limits *limits = file_priv->user_limits;
 	struct ivpu_device *vdev = file_priv->vdev;
 	int ret;
 
+	if (atomic_inc_return(&limits->db_count) > limits->max_db_count) {
+		ivpu_dbg(vdev, IOCTL, "Maximum number of %u doorbells for uid %u reached\n",
+			 limits->max_db_count, limits->uid);
+		ret = -EBUSY;
+		goto err_dec_db_count;
+	}
+
 	ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
 			      GFP_KERNEL);
 	if (ret < 0) {
-		ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
-		return ret;
+		ivpu_dbg(vdev, IOCTL, "Failed to allocate doorbell ID: %d\n", ret);
+		goto err_dec_db_count;
 	}
 
 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
@@ -231,15 +239,18 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
 	else
 		ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
 					   cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
-
-	if (!ret) {
-		ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
-			 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
-	} else {
+	if (ret) {
 		xa_erase(&vdev->db_xa, cmdq->db_id);
 		cmdq->db_id = 0;
+		goto err_dec_db_count;
 	}
 
+	ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
+		 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
+	return 0;
+
+err_dec_db_count:
+	atomic_dec(&limits->db_count);
 	return ret;
 }
 
@@ -298,6 +309,7 @@ static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm
 	}
 
 	xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+	atomic_dec(&file_priv->user_limits->db_count);
 	cmdq->db_id = 0;
 
 	return 0;
@@ -313,6 +325,7 @@ static inline u8 ivpu_job_to_jsm_priority(u8 priority)
 
 static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
 {
+	lockdep_assert_held(&file_priv->lock);
 	ivpu_cmdq_unregister(file_priv, cmdq);
 	xa_erase(&file_priv->cmdq_xa, cmdq->id);
 	ivpu_cmdq_free(file_priv, cmdq);
@@ -380,8 +393,11 @@ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
 	mutex_lock(&file_priv->lock);
 
 	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
-		xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
-		cmdq->db_id = 0;
+		if (cmdq->db_id) {
+			xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+			atomic_dec(&file_priv->user_limits->db_count);
+			cmdq->db_id = 0;
+		}
 	}
 
 	mutex_unlock(&file_priv->lock);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Claude review: accel/ivpu: Limit number of maximum contexts and doorbells per user
  2026-02-20 16:00 [PATCH] accel/ivpu: Limit number of maximum contexts and doorbells per user Maciej Falkowski
@ 2026-02-20 19:58 ` Claude Code Review Bot
  2026-02-20 19:58 ` Claude Code Review Bot
  1 sibling, 0 replies; 3+ messages in thread
From: Claude Code Review Bot @ 2026-02-20 19:58 UTC (permalink / raw)
  To: dri-devel-reviews

Overall Series Review

Subject: accel/ivpu: Limit number of maximum contexts and doorbells per user
Author: Maciej Falkowski <maciej.falkowski@linux.intel.com>
Patches: 1
Reviewed: 2026-02-21T05:58:02.994714

---

This is a single patch that introduces per-user resource limits in the Intel IVPU (NPU) accelerator driver. The goal is to prevent a single user from monopolizing all contexts and doorbells on multi-user systems, by allowing root to use all resources and non-root users to use at most half.

The general architecture is reasonable: a `struct ivpu_user_limits` is allocated per-uid, reference-counted via kref, and tracked in a hashtable on the device. Each `ivpu_file_priv` holds a reference to the limits structure for its uid. Doorbell counts are tracked with an atomic counter. The implementation correctly handles error paths in `ivpu_open()` and `ivpu_register_db()`, and adds proper cleanup in `ivpu_cmdq_reset()` and `ivpu_cmdq_unregister()`.

There are several issues, most notably an off-by-one error in the context limit enforcement that allows one extra context beyond the intended maximum, a change to `IVPU_USER_CONTEXT_MAX_SSID` that expands the total context count to 129 (an odd number that doesn't divide neatly), and a raw uid comparison for privilege checking instead of using standard kernel credential helpers.

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Claude review: accel/ivpu: Limit number of maximum contexts and doorbells per user
  2026-02-20 16:00 [PATCH] accel/ivpu: Limit number of maximum contexts and doorbells per user Maciej Falkowski
  2026-02-20 19:58 ` Claude review: " Claude Code Review Bot
@ 2026-02-20 19:58 ` Claude Code Review Bot
  1 sibling, 0 replies; 3+ messages in thread
From: Claude Code Review Bot @ 2026-02-20 19:58 UTC (permalink / raw)
  To: dri-devel-reviews

Patch Review

**Off-by-one in context limit check**

> +	hash_for_each_possible(vdev->user_limits, limits, hash_node, uid) {
> +		if (limits->uid == uid) {
> +			if (kref_read(&limits->ref) > limits->max_ctx_count) {
> +				ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
> +					 limits->max_ctx_count);
> +				return ERR_PTR(-EMFILE);
> +			}
> +
> +			kref_get(&limits->ref);
> +			return limits;
> +		}
> +	}
> +
> +	return ivpu_user_limits_alloc(vdev, uid);

The first context for a user goes through `ivpu_user_limits_alloc()`, which calls `kref_init()` (setting the refcount to 1) and returns without hitting the limit check. Every subsequent open hits the `hash_for_each_possible` path and checks `kref_read() > max_ctx_count` *before* calling `kref_get()`. So when `max_ctx_count` contexts have already been allocated, `kref_read()` returns exactly `max_ctx_count`, the condition `> max_ctx_count` is false, and one more context is permitted. This allows `max_ctx_count + 1` contexts in total. The check should use `>=` instead of `>`.

**SSID range produces 129 contexts, not 128**

> -#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 63)
> +#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 128)

With `IVPU_USER_CONTEXT_MIN_SSID` being 2, this gives `MAX_SSID = 130`. The `ivpu_get_context_count()` function returns `max - min + 1 = 130 - 2 + 1 = 129`. The commit message says root gets 128 contexts and non-root gets 64, but root actually gets 129 and non-root gets `129 / 2 = 64` (integer truncation). Combined with the off-by-one above, root can actually get 130 contexts. Was the intent to use `+ 127` here to get exactly 128 contexts?

**Raw uid comparison instead of proper credential check**

> +	if (uid == 0) {
> +		limits->max_ctx_count = ivpu_get_context_count(vdev);
> +		limits->max_db_count = ivpu_get_doorbell_count(vdev);

Comparing `uid == 0` directly ignores user namespaces. In a container where a process has uid 0 within its namespace but maps to a non-zero uid in the init namespace, `current_uid().val` will return the namespaced uid. Depending on intent, this could be the wrong check. The more standard approach for privilege checks in kernel code is `capable(CAP_SYS_ADMIN)` or `uid_eq(current_uid(), GLOBAL_ROOT_UID)`. If the intent is specifically "init namespace root", `uid_eq` with `GLOBAL_ROOT_UID` is the proper idiom.

**Atomic increment-then-check pattern in doorbell limit**

> +	if (atomic_inc_return(&limits->db_count) > limits->max_db_count) {
> +		ivpu_dbg(vdev, IOCTL, "Maximum number of %u doorbells for uid %u reached\n",
> +			 limits->max_db_count, limits->uid);
> +		ret = -EBUSY;
> +		goto err_dec_db_count;
> +	}

This has the same off-by-one flavor as the context check. If `max_db_count` is 127, then `atomic_inc_return` returning 128 causes the check `128 > 127` to fire, so only 127 doorbells are permitted. But for `max_db_count = 255` (root), doorbell IDs only range from 1 to 255 (255 entries), so `xa_alloc_cyclic` would fail at 256 anyway. The limit is consistent here since the increment happens first, so 255 increments means the 255th returns 255, which is not > 255, so it's allowed. This is fine.

**Removal of IVPU_NUM_PRIORITIES and IVPU_NUM_CMDQS_PER_CTX**

> -#define IVPU_NUM_PRIORITIES    4
> -#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)

These macros are removed. The grep confirms they have no remaining users in the tree, so this is fine. Though the removal is unrelated to the stated purpose of this patch and isn't mentioned in the commit message.

**UAPI change for DRM_IVPU_PARAM_NUM_CONTEXTS**

> -		args->value = ivpu_get_context_count(vdev);
> +		args->value = file_priv->user_limits->max_ctx_count;

This changes the semantics of the `DRM_IVPU_PARAM_NUM_CONTEXTS` ioctl from returning the total system context count to returning the per-user maximum. This is a UAPI behavioral change -- existing userspace that queries this value may get a different (smaller) number than before. This may or may not be the right thing to do, but it's worth calling out that this is a change in the user-visible API contract. Userspace that previously saw 64 will now see 64 (for non-root, coincidentally the same here), but for root it will see 129 instead of the old 64. If the SSID range is corrected to give 128 total, non-root would see 64 and root would see 128.

**Unrelated whitespace changes**

> -		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
> -			 ipc_hdr.data_addr);
> +		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);

> -	.owner		= THIS_MODULE,
> +	.owner = THIS_MODULE,

> -	{ }
> +	{}

These are cosmetic whitespace/style changes unrelated to the feature. They add noise to the patch and make it harder to review and bisect.

**lockdep_assert_held addition in ivpu_cmdq_destroy**

> +	lockdep_assert_held(&file_priv->lock);

This is a good addition. All callers (`ivpu_cmdq_release_all_locked` and the `cmdq_manage_ioctl` destroy path) do hold `file_priv->lock`, confirmed by the existing `lockdep_assert_held` in `ivpu_cmdq_release_all_locked` and the `guard(mutex)` in the ioctl path.

**ivpu_cmdq_reset correctly guards against double-decrement**

> +		if (cmdq->db_id) {
> +			xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
> +			atomic_dec(&file_priv->user_limits->db_count);
> +			cmdq->db_id = 0;
> +		}

This is a good change. The original code would call `xa_erase` with `db_id = 0` which is harmless for xarray but sloppy. The new code properly skips already-unregistered queues and avoids decrementing `db_count` for queues that never registered a doorbell.

**No cleanup of user_limits hashtable on device teardown**

There is no code to walk and free remaining `ivpu_user_limits` entries from the hashtable during device removal. If all file_priv instances are properly closed before device removal, the kref would drop to zero and entries would be freed. But if any path leaks a file_priv reference, the hash entries would leak. The `drmm_mutex_init` for the lock will destroy the mutex on device finalization, but the hash entries themselves are not freed as part of any managed cleanup. This is likely acceptable if the driver guarantees all file_priv references are dropped, but worth considering whether a `hash_for_each_safe` cleanup pass is warranted.

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-02-20 19:58 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-20 16:00 [PATCH] accel/ivpu: Limit number of maximum contexts and doorbells per user Maciej Falkowski
2026-02-20 19:58 ` Claude review: " Claude Code Review Bot
2026-02-20 19:58 ` Claude Code Review Bot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox