public inbox for drm-ai-reviews@public-inbox.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH v2] accel/ivpu: Limit number of maximum contexts and doorbells per user
@ 2026-02-25 18:06 Maciej Falkowski
  2026-02-25 20:50 ` Lizhi Hou
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Maciej Falkowski @ 2026-02-25 18:06 UTC (permalink / raw)
  To: dri-devel
  Cc: oded.gabbay, jeff.hugo, karol.wachowski, lizhi.hou,
	Maciej Falkowski

From: Karol Wachowski <karol.wachowski@linux.intel.com>

Implement per-user resource limits to prevent resource exhaustion.

Root users can allocate all available contexts (128) and doorbells
(255), while non-root users are limited to half of the available
resources (64 contexts and 127 doorbells respectively).

This prevents scenarios where a single user could monopolize NPU
resources and starve other users on multi-user systems.

Change doorbell ID and command queue ID allocation errors to debug
messages as those are user triggered.

Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com>
Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
---
v1 -> v2:
  - Fixed off-by-one error (Lizhi)
---
 drivers/accel/ivpu/ivpu_drv.c | 94 ++++++++++++++++++++++++++++++++---
 drivers/accel/ivpu/ivpu_drv.h | 26 ++++++++--
 drivers/accel/ivpu/ivpu_job.c | 36 ++++++++++----
 3 files changed, 136 insertions(+), 20 deletions(-)

diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3b6ec8eecf2f..1d9f7d2f71a2 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -68,6 +68,73 @@ bool ivpu_force_snoop;
 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
 
+static struct ivpu_user_limits *ivpu_user_limits_alloc(struct ivpu_device *vdev, uid_t uid)
+{
+	struct ivpu_user_limits *limits;
+
+	limits = kzalloc(sizeof(*limits), GFP_KERNEL);
+	if (!limits)
+		return ERR_PTR(-ENOMEM);
+
+	kref_init(&limits->ref);
+	atomic_set(&limits->db_count, 0);
+	limits->vdev = vdev;
+	limits->uid = uid;
+
+	/* Allow root user to allocate all contexts */
+	if (uid == 0) {
+		limits->max_ctx_count = ivpu_get_context_count(vdev);
+		limits->max_db_count = ivpu_get_doorbell_count(vdev);
+	} else {
+		limits->max_ctx_count = ivpu_get_context_count(vdev) / 2;
+		limits->max_db_count = ivpu_get_doorbell_count(vdev) / 2;
+	}
+
+	hash_add(vdev->user_limits, &limits->hash_node, uid);
+
+	return limits;
+}
+
+static struct ivpu_user_limits *ivpu_user_limits_get(struct ivpu_device *vdev)
+{
+	struct ivpu_user_limits *limits;
+	uid_t uid = current_uid().val;
+
+	guard(mutex)(&vdev->user_limits_lock);
+
+	hash_for_each_possible(vdev->user_limits, limits, hash_node, uid) {
+		if (limits->uid == uid) {
+			if (kref_read(&limits->ref) >= limits->max_ctx_count) {
+				ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
+					 limits->max_ctx_count);
+				return ERR_PTR(-EMFILE);
+			}
+
+			kref_get(&limits->ref);
+			return limits;
+		}
+	}
+
+	return ivpu_user_limits_alloc(vdev, uid);
+}
+
+static void ivpu_user_limits_release(struct kref *ref)
+{
+	struct ivpu_user_limits *limits = container_of(ref, struct ivpu_user_limits, ref);
+	struct ivpu_device *vdev = limits->vdev;
+
+	lockdep_assert_held(&vdev->user_limits_lock);
+	drm_WARN_ON(&vdev->drm, atomic_read(&limits->db_count));
+	hash_del(&limits->hash_node);
+	kfree(limits);
+}
+
+static void ivpu_user_limits_put(struct ivpu_device *vdev, struct ivpu_user_limits *limits)
+{
+	guard(mutex)(&vdev->user_limits_lock);
+	kref_put(&limits->ref, ivpu_user_limits_release);
+}
+
 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
 {
 	struct ivpu_device *vdev = file_priv->vdev;
@@ -111,6 +178,7 @@ static void file_priv_release(struct kref *ref)
 	mutex_unlock(&vdev->context_list_lock);
 	pm_runtime_put_autosuspend(vdev->drm.dev);
 
+	ivpu_user_limits_put(vdev, file_priv->user_limits);
 	mutex_destroy(&file_priv->ms_lock);
 	mutex_destroy(&file_priv->lock);
 	kfree(file_priv);
@@ -170,7 +238,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
 		args->value = ivpu_hw_dpu_max_freq_get(vdev);
 		break;
 	case DRM_IVPU_PARAM_NUM_CONTEXTS:
-		args->value = ivpu_get_context_count(vdev);
+		args->value = file_priv->user_limits->max_ctx_count;
 		break;
 	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
 		args->value = vdev->hw->ranges.user.start;
@@ -232,22 +300,30 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct ivpu_device *vdev = to_ivpu_device(dev);
 	struct ivpu_file_priv *file_priv;
+	struct ivpu_user_limits *limits;
 	u32 ctx_id;
 	int idx, ret;
 
 	if (!drm_dev_enter(dev, &idx))
 		return -ENODEV;
 
+	limits = ivpu_user_limits_get(vdev);
+	if (IS_ERR(limits)) {
+		ret = PTR_ERR(limits);
+		goto err_dev_exit;
+	}
+
 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
 	if (!file_priv) {
 		ret = -ENOMEM;
-		goto err_dev_exit;
+		goto err_user_limits_put;
 	}
 
 	INIT_LIST_HEAD(&file_priv->ms_instance_list);
 
 	file_priv->vdev = vdev;
 	file_priv->bound = true;
+	file_priv->user_limits = limits;
 	kref_init(&file_priv->ref);
 	mutex_init(&file_priv->lock);
 	mutex_init(&file_priv->ms_lock);
@@ -285,6 +361,8 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
 	mutex_destroy(&file_priv->ms_lock);
 	mutex_destroy(&file_priv->lock);
 	kfree(file_priv);
+err_user_limits_put:
+	ivpu_user_limits_put(vdev, limits);
 err_dev_exit:
 	drm_dev_exit(idx);
 	return ret;
@@ -344,8 +422,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
 	ivpu_ipc_consumer_del(vdev, &cons);
 
 	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
-		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
-			 ipc_hdr.data_addr);
+		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);
 		return -EIO;
 	}
 
@@ -454,7 +531,7 @@ int ivpu_shutdown(struct ivpu_device *vdev)
 }
 
 static const struct file_operations ivpu_fops = {
-	.owner		= THIS_MODULE,
+	.owner = THIS_MODULE,
 	DRM_ACCEL_FOPS,
 #ifdef CONFIG_PROC_FS
 	.show_fdinfo = drm_show_fdinfo,
@@ -593,6 +670,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
 	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
 	xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
 	INIT_LIST_HEAD(&vdev->bo_list);
+	hash_init(vdev->user_limits);
 
 	vdev->db_limit.min = IVPU_MIN_DB;
 	vdev->db_limit.max = IVPU_MAX_DB;
@@ -601,6 +679,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
 	if (ret)
 		goto err_xa_destroy;
 
+	ret = drmm_mutex_init(&vdev->drm, &vdev->user_limits_lock);
+	if (ret)
+		goto err_xa_destroy;
+
 	ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
 	if (ret)
 		goto err_xa_destroy;
@@ -718,7 +800,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
-	{ }
+	{}
 };
 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
 
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 78ecddf2831d..0b5aa41151c6 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -12,6 +12,7 @@
 #include <drm/drm_mm.h>
 #include <drm/drm_print.h>
 
+#include <linux/hashtable.h>
 #include <linux/pci.h>
 #include <linux/xarray.h>
 #include <uapi/drm/ivpu_accel.h>
@@ -43,7 +44,7 @@
 /* SSID 1 is used by the VPU to represent reserved context */
 #define IVPU_RESERVED_CONTEXT_MMU_SSID 1
 #define IVPU_USER_CONTEXT_MIN_SSID     2
-#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 63)
+#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 128)
 
 #define IVPU_MIN_DB 1
 #define IVPU_MAX_DB 255
@@ -51,9 +52,6 @@
 #define IVPU_JOB_ID_JOB_MASK		GENMASK(7, 0)
 #define IVPU_JOB_ID_CONTEXT_MASK	GENMASK(31, 8)
 
-#define IVPU_NUM_PRIORITIES    4
-#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
-
 #define IVPU_CMDQ_MIN_ID 1
 #define IVPU_CMDQ_MAX_ID 255
 
@@ -124,6 +122,16 @@ struct ivpu_fw_info;
 struct ivpu_ipc_info;
 struct ivpu_pm_info;
 
+struct ivpu_user_limits {
+	struct hlist_node hash_node;
+	struct ivpu_device *vdev;
+	struct kref ref;
+	u32 max_ctx_count;
+	u32 max_db_count;
+	u32 uid;
+	atomic_t db_count;
+};
+
 struct ivpu_device {
 	struct drm_device drm;
 	void __iomem *regb;
@@ -143,6 +151,8 @@ struct ivpu_device {
 	struct mutex context_list_lock; /* Protects user context addition/removal */
 	struct xarray context_xa;
 	struct xa_limit context_xa_limit;
+	DECLARE_HASHTABLE(user_limits, 8);
+	struct mutex user_limits_lock; /* Protects user_limits */
 
 	struct xarray db_xa;
 	struct xa_limit db_limit;
@@ -190,6 +200,7 @@ struct ivpu_file_priv {
 	struct list_head ms_instance_list;
 	struct ivpu_bo *ms_info_bo;
 	struct xa_limit job_limit;
+	struct ivpu_user_limits *user_limits;
 	u32 job_id_next;
 	struct xa_limit cmdq_limit;
 	u32 cmdq_id_next;
@@ -287,6 +298,13 @@ static inline u32 ivpu_get_context_count(struct ivpu_device *vdev)
 	return (ctx_limit.max - ctx_limit.min + 1);
 }
 
+static inline u32 ivpu_get_doorbell_count(struct ivpu_device *vdev)
+{
+	struct xa_limit db_limit = vdev->db_limit;
+
+	return (db_limit.max - db_limit.min + 1);
+}
+
 static inline u32 ivpu_get_platform(struct ivpu_device *vdev)
 {
 	WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID);
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 4f8564e2878a..337ed269fd3e 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -173,7 +173,7 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
 	ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
 			      &file_priv->cmdq_id_next, GFP_KERNEL);
 	if (ret < 0) {
-		ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
+		ivpu_dbg(vdev, IOCTL, "Failed to allocate command queue ID: %d\n", ret);
 		goto err_free_cmdq;
 	}
 
@@ -215,14 +215,22 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
 
 static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
 {
+	struct ivpu_user_limits *limits = file_priv->user_limits;
 	struct ivpu_device *vdev = file_priv->vdev;
 	int ret;
 
+	if (atomic_inc_return(&limits->db_count) > limits->max_db_count) {
+		ivpu_dbg(vdev, IOCTL, "Maximum number of %u doorbells for uid %u reached\n",
+			 limits->max_db_count, limits->uid);
+		ret = -EBUSY;
+		goto err_dec_db_count;
+	}
+
 	ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
 			      GFP_KERNEL);
 	if (ret < 0) {
-		ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
-		return ret;
+		ivpu_dbg(vdev, IOCTL, "Failed to allocate doorbell ID: %d\n", ret);
+		goto err_dec_db_count;
 	}
 
 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
@@ -231,15 +239,18 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
 	else
 		ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
 					   cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
-
-	if (!ret) {
-		ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
-			 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
-	} else {
+	if (ret) {
 		xa_erase(&vdev->db_xa, cmdq->db_id);
 		cmdq->db_id = 0;
+		goto err_dec_db_count;
 	}
 
+	ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
+		 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
+	return 0;
+
+err_dec_db_count:
+	atomic_dec(&limits->db_count);
 	return ret;
 }
 
@@ -298,6 +309,7 @@ static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm
 	}
 
 	xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+	atomic_dec(&file_priv->user_limits->db_count);
 	cmdq->db_id = 0;
 
 	return 0;
@@ -313,6 +325,7 @@ static inline u8 ivpu_job_to_jsm_priority(u8 priority)
 
 static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
 {
+	lockdep_assert_held(&file_priv->lock);
 	ivpu_cmdq_unregister(file_priv, cmdq);
 	xa_erase(&file_priv->cmdq_xa, cmdq->id);
 	ivpu_cmdq_free(file_priv, cmdq);
@@ -380,8 +393,11 @@ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
 	mutex_lock(&file_priv->lock);
 
 	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
-		xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
-		cmdq->db_id = 0;
+		if (cmdq->db_id) {
+			xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+			atomic_dec(&file_priv->user_limits->db_count);
+			cmdq->db_id = 0;
+		}
 	}
 
 	mutex_unlock(&file_priv->lock);
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2] accel/ivpu: Limit number of maximum contexts and doorbells per user
  2026-02-25 18:06 [PATCH v2] accel/ivpu: Limit number of maximum contexts and doorbells per user Maciej Falkowski
@ 2026-02-25 20:50 ` Lizhi Hou
  2026-02-27  3:00 ` Claude review: " Claude Code Review Bot
  2026-02-27  3:00 ` Claude Code Review Bot
  2 siblings, 0 replies; 4+ messages in thread
From: Lizhi Hou @ 2026-02-25 20:50 UTC (permalink / raw)
  To: Maciej Falkowski, dri-devel; +Cc: oded.gabbay, jeff.hugo, karol.wachowski

Reviewed-by: Lizhi Hou <lizhi.hou@amd.com>

On 2/25/26 10:06, Maciej Falkowski wrote:
> From: Karol Wachowski <karol.wachowski@linux.intel.com>
>
> Implement per-user resource limits to prevent resource exhaustion.
>
> Root users can allocate all available contexts (128) and doorbells
> (255), while non-root users are limited to half of the available
> resources (64 contexts and 127 doorbells respectively).
>
> This prevents scenarios where a single user could monopolize NPU
> resources and starve other users on multi-user systems.
>
> Change doorbell ID and command queue ID allocation errors to debug
> messages as those are user triggered.
>
> Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com>
> Signed-off-by: Maciej Falkowski <maciej.falkowski@linux.intel.com>
> ---
> v1 -> v2:
>    - Fixed off-by-one error (Lizhi)
> ---
>   drivers/accel/ivpu/ivpu_drv.c | 94 ++++++++++++++++++++++++++++++++---
>   drivers/accel/ivpu/ivpu_drv.h | 26 ++++++++--
>   drivers/accel/ivpu/ivpu_job.c | 36 ++++++++++----
>   3 files changed, 136 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
> index 3b6ec8eecf2f..1d9f7d2f71a2 100644
> --- a/drivers/accel/ivpu/ivpu_drv.c
> +++ b/drivers/accel/ivpu/ivpu_drv.c
> @@ -68,6 +68,73 @@ bool ivpu_force_snoop;
>   module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
>   MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
>   
> +static struct ivpu_user_limits *ivpu_user_limits_alloc(struct ivpu_device *vdev, uid_t uid)
> +{
> +	struct ivpu_user_limits *limits;
> +
> +	limits = kzalloc(sizeof(*limits), GFP_KERNEL);
> +	if (!limits)
> +		return ERR_PTR(-ENOMEM);
> +
> +	kref_init(&limits->ref);
> +	atomic_set(&limits->db_count, 0);
> +	limits->vdev = vdev;
> +	limits->uid = uid;
> +
> +	/* Allow root user to allocate all contexts */
> +	if (uid == 0) {
> +		limits->max_ctx_count = ivpu_get_context_count(vdev);
> +		limits->max_db_count = ivpu_get_doorbell_count(vdev);
> +	} else {
> +		limits->max_ctx_count = ivpu_get_context_count(vdev) / 2;
> +		limits->max_db_count = ivpu_get_doorbell_count(vdev) / 2;
> +	}
> +
> +	hash_add(vdev->user_limits, &limits->hash_node, uid);
> +
> +	return limits;
> +}
> +
> +static struct ivpu_user_limits *ivpu_user_limits_get(struct ivpu_device *vdev)
> +{
> +	struct ivpu_user_limits *limits;
> +	uid_t uid = current_uid().val;
> +
> +	guard(mutex)(&vdev->user_limits_lock);
> +
> +	hash_for_each_possible(vdev->user_limits, limits, hash_node, uid) {
> +		if (limits->uid == uid) {
> +			if (kref_read(&limits->ref) >= limits->max_ctx_count) {
> +				ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
> +					 limits->max_ctx_count);
> +				return ERR_PTR(-EMFILE);
> +			}
> +
> +			kref_get(&limits->ref);
> +			return limits;
> +		}
> +	}
> +
> +	return ivpu_user_limits_alloc(vdev, uid);
> +}
> +
> +static void ivpu_user_limits_release(struct kref *ref)
> +{
> +	struct ivpu_user_limits *limits = container_of(ref, struct ivpu_user_limits, ref);
> +	struct ivpu_device *vdev = limits->vdev;
> +
> +	lockdep_assert_held(&vdev->user_limits_lock);
> +	drm_WARN_ON(&vdev->drm, atomic_read(&limits->db_count));
> +	hash_del(&limits->hash_node);
> +	kfree(limits);
> +}
> +
> +static void ivpu_user_limits_put(struct ivpu_device *vdev, struct ivpu_user_limits *limits)
> +{
> +	guard(mutex)(&vdev->user_limits_lock);
> +	kref_put(&limits->ref, ivpu_user_limits_release);
> +}
> +
>   struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
>   {
>   	struct ivpu_device *vdev = file_priv->vdev;
> @@ -111,6 +178,7 @@ static void file_priv_release(struct kref *ref)
>   	mutex_unlock(&vdev->context_list_lock);
>   	pm_runtime_put_autosuspend(vdev->drm.dev);
>   
> +	ivpu_user_limits_put(vdev, file_priv->user_limits);
>   	mutex_destroy(&file_priv->ms_lock);
>   	mutex_destroy(&file_priv->lock);
>   	kfree(file_priv);
> @@ -170,7 +238,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
>   		args->value = ivpu_hw_dpu_max_freq_get(vdev);
>   		break;
>   	case DRM_IVPU_PARAM_NUM_CONTEXTS:
> -		args->value = ivpu_get_context_count(vdev);
> +		args->value = file_priv->user_limits->max_ctx_count;
>   		break;
>   	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
>   		args->value = vdev->hw->ranges.user.start;
> @@ -232,22 +300,30 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
>   {
>   	struct ivpu_device *vdev = to_ivpu_device(dev);
>   	struct ivpu_file_priv *file_priv;
> +	struct ivpu_user_limits *limits;
>   	u32 ctx_id;
>   	int idx, ret;
>   
>   	if (!drm_dev_enter(dev, &idx))
>   		return -ENODEV;
>   
> +	limits = ivpu_user_limits_get(vdev);
> +	if (IS_ERR(limits)) {
> +		ret = PTR_ERR(limits);
> +		goto err_dev_exit;
> +	}
> +
>   	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
>   	if (!file_priv) {
>   		ret = -ENOMEM;
> -		goto err_dev_exit;
> +		goto err_user_limits_put;
>   	}
>   
>   	INIT_LIST_HEAD(&file_priv->ms_instance_list);
>   
>   	file_priv->vdev = vdev;
>   	file_priv->bound = true;
> +	file_priv->user_limits = limits;
>   	kref_init(&file_priv->ref);
>   	mutex_init(&file_priv->lock);
>   	mutex_init(&file_priv->ms_lock);
> @@ -285,6 +361,8 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
>   	mutex_destroy(&file_priv->ms_lock);
>   	mutex_destroy(&file_priv->lock);
>   	kfree(file_priv);
> +err_user_limits_put:
> +	ivpu_user_limits_put(vdev, limits);
>   err_dev_exit:
>   	drm_dev_exit(idx);
>   	return ret;
> @@ -344,8 +422,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
>   	ivpu_ipc_consumer_del(vdev, &cons);
>   
>   	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
> -		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
> -			 ipc_hdr.data_addr);
> +		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);
>   		return -EIO;
>   	}
>   
> @@ -454,7 +531,7 @@ int ivpu_shutdown(struct ivpu_device *vdev)
>   }
>   
>   static const struct file_operations ivpu_fops = {
> -	.owner		= THIS_MODULE,
> +	.owner = THIS_MODULE,
>   	DRM_ACCEL_FOPS,
>   #ifdef CONFIG_PROC_FS
>   	.show_fdinfo = drm_show_fdinfo,
> @@ -593,6 +670,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
>   	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
>   	xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
>   	INIT_LIST_HEAD(&vdev->bo_list);
> +	hash_init(vdev->user_limits);
>   
>   	vdev->db_limit.min = IVPU_MIN_DB;
>   	vdev->db_limit.max = IVPU_MAX_DB;
> @@ -601,6 +679,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
>   	if (ret)
>   		goto err_xa_destroy;
>   
> +	ret = drmm_mutex_init(&vdev->drm, &vdev->user_limits_lock);
> +	if (ret)
> +		goto err_xa_destroy;
> +
>   	ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
>   	if (ret)
>   		goto err_xa_destroy;
> @@ -718,7 +800,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
>   	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
>   	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
>   	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
> -	{ }
> +	{}
>   };
>   MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
>   
> diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
> index 78ecddf2831d..0b5aa41151c6 100644
> --- a/drivers/accel/ivpu/ivpu_drv.h
> +++ b/drivers/accel/ivpu/ivpu_drv.h
> @@ -12,6 +12,7 @@
>   #include <drm/drm_mm.h>
>   #include <drm/drm_print.h>
>   
> +#include <linux/hashtable.h>
>   #include <linux/pci.h>
>   #include <linux/xarray.h>
>   #include <uapi/drm/ivpu_accel.h>
> @@ -43,7 +44,7 @@
>   /* SSID 1 is used by the VPU to represent reserved context */
>   #define IVPU_RESERVED_CONTEXT_MMU_SSID 1
>   #define IVPU_USER_CONTEXT_MIN_SSID     2
> -#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 63)
> +#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 128)
>   
>   #define IVPU_MIN_DB 1
>   #define IVPU_MAX_DB 255
> @@ -51,9 +52,6 @@
>   #define IVPU_JOB_ID_JOB_MASK		GENMASK(7, 0)
>   #define IVPU_JOB_ID_CONTEXT_MASK	GENMASK(31, 8)
>   
> -#define IVPU_NUM_PRIORITIES    4
> -#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
> -
>   #define IVPU_CMDQ_MIN_ID 1
>   #define IVPU_CMDQ_MAX_ID 255
>   
> @@ -124,6 +122,16 @@ struct ivpu_fw_info;
>   struct ivpu_ipc_info;
>   struct ivpu_pm_info;
>   
> +struct ivpu_user_limits {
> +	struct hlist_node hash_node;
> +	struct ivpu_device *vdev;
> +	struct kref ref;
> +	u32 max_ctx_count;
> +	u32 max_db_count;
> +	u32 uid;
> +	atomic_t db_count;
> +};
> +
>   struct ivpu_device {
>   	struct drm_device drm;
>   	void __iomem *regb;
> @@ -143,6 +151,8 @@ struct ivpu_device {
>   	struct mutex context_list_lock; /* Protects user context addition/removal */
>   	struct xarray context_xa;
>   	struct xa_limit context_xa_limit;
> +	DECLARE_HASHTABLE(user_limits, 8);
> +	struct mutex user_limits_lock; /* Protects user_limits */
>   
>   	struct xarray db_xa;
>   	struct xa_limit db_limit;
> @@ -190,6 +200,7 @@ struct ivpu_file_priv {
>   	struct list_head ms_instance_list;
>   	struct ivpu_bo *ms_info_bo;
>   	struct xa_limit job_limit;
> +	struct ivpu_user_limits *user_limits;
>   	u32 job_id_next;
>   	struct xa_limit cmdq_limit;
>   	u32 cmdq_id_next;
> @@ -287,6 +298,13 @@ static inline u32 ivpu_get_context_count(struct ivpu_device *vdev)
>   	return (ctx_limit.max - ctx_limit.min + 1);
>   }
>   
> +static inline u32 ivpu_get_doorbell_count(struct ivpu_device *vdev)
> +{
> +	struct xa_limit db_limit = vdev->db_limit;
> +
> +	return (db_limit.max - db_limit.min + 1);
> +}
> +
>   static inline u32 ivpu_get_platform(struct ivpu_device *vdev)
>   {
>   	WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID);
> diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
> index 4f8564e2878a..337ed269fd3e 100644
> --- a/drivers/accel/ivpu/ivpu_job.c
> +++ b/drivers/accel/ivpu/ivpu_job.c
> @@ -173,7 +173,7 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
>   	ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
>   			      &file_priv->cmdq_id_next, GFP_KERNEL);
>   	if (ret < 0) {
> -		ivpu_err(vdev, "Failed to allocate command queue ID: %d\n", ret);
> +		ivpu_dbg(vdev, IOCTL, "Failed to allocate command queue ID: %d\n", ret);
>   		goto err_free_cmdq;
>   	}
>   
> @@ -215,14 +215,22 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
>   
>   static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
>   {
> +	struct ivpu_user_limits *limits = file_priv->user_limits;
>   	struct ivpu_device *vdev = file_priv->vdev;
>   	int ret;
>   
> +	if (atomic_inc_return(&limits->db_count) > limits->max_db_count) {
> +		ivpu_dbg(vdev, IOCTL, "Maximum number of %u doorbells for uid %u reached\n",
> +			 limits->max_db_count, limits->uid);
> +		ret = -EBUSY;
> +		goto err_dec_db_count;
> +	}
> +
>   	ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
>   			      GFP_KERNEL);
>   	if (ret < 0) {
> -		ivpu_err(vdev, "Failed to allocate doorbell ID: %d\n", ret);
> -		return ret;
> +		ivpu_dbg(vdev, IOCTL, "Failed to allocate doorbell ID: %d\n", ret);
> +		goto err_dec_db_count;
>   	}
>   
>   	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
> @@ -231,15 +239,18 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
>   	else
>   		ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
>   					   cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
> -
> -	if (!ret) {
> -		ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
> -			 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
> -	} else {
> +	if (ret) {
>   		xa_erase(&vdev->db_xa, cmdq->db_id);
>   		cmdq->db_id = 0;
> +		goto err_dec_db_count;
>   	}
>   
> +	ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
> +		 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
> +	return 0;
> +
> +err_dec_db_count:
> +	atomic_dec(&limits->db_count);
>   	return ret;
>   }
>   
> @@ -298,6 +309,7 @@ static int ivpu_cmdq_unregister(struct ivpu_file_priv *file_priv, struct ivpu_cm
>   	}
>   
>   	xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
> +	atomic_dec(&file_priv->user_limits->db_count);
>   	cmdq->db_id = 0;
>   
>   	return 0;
> @@ -313,6 +325,7 @@ static inline u8 ivpu_job_to_jsm_priority(u8 priority)
>   
>   static void ivpu_cmdq_destroy(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
>   {
> +	lockdep_assert_held(&file_priv->lock);
>   	ivpu_cmdq_unregister(file_priv, cmdq);
>   	xa_erase(&file_priv->cmdq_xa, cmdq->id);
>   	ivpu_cmdq_free(file_priv, cmdq);
> @@ -380,8 +393,11 @@ static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
>   	mutex_lock(&file_priv->lock);
>   
>   	xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
> -		xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
> -		cmdq->db_id = 0;
> +		if (cmdq->db_id) {
> +			xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
> +			atomic_dec(&file_priv->user_limits->db_count);
> +			cmdq->db_id = 0;
> +		}
>   	}
>   
>   	mutex_unlock(&file_priv->lock);

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Claude review: accel/ivpu: Limit number of maximum contexts and doorbells per user
  2026-02-25 18:06 [PATCH v2] accel/ivpu: Limit number of maximum contexts and doorbells per user Maciej Falkowski
  2026-02-25 20:50 ` Lizhi Hou
@ 2026-02-27  3:00 ` Claude Code Review Bot
  2026-02-27  3:00 ` Claude Code Review Bot
  2 siblings, 0 replies; 4+ messages in thread
From: Claude Code Review Bot @ 2026-02-27  3:00 UTC (permalink / raw)
  To: dri-devel-reviews

Overall Series Review

Subject: accel/ivpu: Limit number of maximum contexts and doorbells per user
Author: Maciej Falkowski <maciej.falkowski@linux.intel.com>
Patches: 2
Reviewed: 2026-02-27T13:00:26.146297

---

This single-patch series adds per-user resource limits to the Intel VPU (NPU) accelerator driver, preventing a single user from monopolizing all contexts and doorbells. The general approach is sound: a hash table keyed by UID tracks per-user allocations, with root getting the full pool and non-root users getting half. However, there are several issues ranging from a likely off-by-one in the SSID range expansion, questionable use of `kref_read()` for flow control, unrelated whitespace churn, and a missing cleanup path for the hash table on device teardown.

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Claude review: accel/ivpu: Limit number of maximum contexts and doorbells per user
  2026-02-25 18:06 [PATCH v2] accel/ivpu: Limit number of maximum contexts and doorbells per user Maciej Falkowski
  2026-02-25 20:50 ` Lizhi Hou
  2026-02-27  3:00 ` Claude review: " Claude Code Review Bot
@ 2026-02-27  3:00 ` Claude Code Review Bot
  2 siblings, 0 replies; 4+ messages in thread
From: Claude Code Review Bot @ 2026-02-27  3:00 UTC (permalink / raw)
  To: dri-devel-reviews

Patch Review

**Off-by-one in SSID range (likely bug)**

The SSID range is changed from `+ 63` to `+ 128`:

```c
-#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 63)
+#define IVPU_USER_CONTEXT_MAX_SSID     (IVPU_USER_CONTEXT_MIN_SSID + 128)
```

With `IVPU_USER_CONTEXT_MIN_SSID = 2`, the new MAX is 130, and `ivpu_get_context_count()` returns `130 - 2 + 1 = 129`. But the commit message states root users get 128 contexts and non-root users get 64. Non-root gets `129 / 2 = 64` (integer division), which happens to match, but root gets 129, not 128. If the intent is 128 total contexts, this should be `+ 127`. If 129 is intentional, the commit message should be updated. Given the v1→v2 changelog says "Fixed off-by-one error", this warrants clarification — it's not clear the off-by-one was actually fixed rather than introduced.

**Misuse of `kref_read()` for context counting**

```c
+		if (kref_read(&limits->ref) >= limits->max_ctx_count) {
+			ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
+				 limits->max_ctx_count);
+			return ERR_PTR(-EMFILE);
+		}
+
+		kref_get(&limits->ref);
```

Using `kref_read()` for application-level flow control is a kernel anti-pattern. `kref` is meant for object lifetime management, not as a general-purpose counter. The doorbell tracking already uses `atomic_t db_count` for this exact purpose — a parallel `atomic_t ctx_count` would be more consistent and idiomatic. `kref_read()` documentation explicitly warns it should only be used for debugging/logging, not for flow control decisions.

**UID stored as `u32` instead of `kuid_t`**

```c
+struct ivpu_user_limits {
+	...
+	u32 uid;
+	...
+};
```

The UID is obtained from `current_uid().val` and stored as a bare `u32`. In the kernel, UIDs should be stored as `kuid_t` to maintain proper user namespace awareness. While `current_uid().val` gives the initial-namespace value (so the root check `uid == 0` is technically safe against namespace escapes), using `kuid_t` with helpers like `uid_eq(limits->uid, GLOBAL_ROOT_UID)` would be the proper kernel idiom and more future-proof.

**No hash table cleanup on device teardown**

The `user_limits` hash table entries are allocated with `kzalloc` and freed when their kref drops to zero. However, there is no explicit cleanup during device removal (`ivpu_dev_fini` or equivalent). If any user_limits entries are leaked (e.g., due to a bug in the teardown path), they become permanent memory leaks. Consider adding a hash table walk in the device fini path that warns on and frees remaining entries.

**Unrelated whitespace/style changes**

Several cosmetic changes are mixed in with the functional change:

```c
-	.owner		= THIS_MODULE,
+	.owner = THIS_MODULE,
```

```c
-	{ }
+	{}
```

```c
-		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
-			 ipc_hdr.data_addr);
+		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);
```

These should either be in a separate preparatory patch or dropped entirely. Mixing cosmetic changes with functional changes makes the patch harder to review and bisect.

**`ivpu_cmdq_reset` doorbell tracking fix is good**

```c
+		if (cmdq->db_id) {
 			xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
+			atomic_dec(&file_priv->user_limits->db_count);
 			cmdq->db_id = 0;
+		}
```

This correctly guards against decrementing `db_count` for doorbells that weren't actually registered (db_id == 0). Without this guard, the atomic counter would underflow. Good catch.

**Doorbell increment-then-check pattern**

```c
+	if (atomic_inc_return(&limits->db_count) > limits->max_db_count) {
+		...
+		ret = -EBUSY;
+		goto err_dec_db_count;
+	}
```

This is correct but worth noting: between `atomic_inc_return` and the `atomic_dec` in the error path, the counter temporarily exceeds the limit. This is a well-known pattern and acceptable here since other threads checking this value will also be doing their own increment-then-check.

**`lockdep_assert_held` addition in `ivpu_cmdq_destroy` is good**

```c
+	lockdep_assert_held(&file_priv->lock);
```

This documents and enforces an existing locking requirement. Good improvement.

**Error message demotion to debug is appropriate**

Changing `ivpu_err` to `ivpu_dbg` for doorbell and command queue allocation failures is correct — these are user-triggered conditions, and using `ivpu_err` could allow unprivileged users to flood `dmesg`.

**`IVPU_NUM_PRIORITIES` / `IVPU_NUM_CMDQS_PER_CTX` removal**

These defines are removed but appear to be unused (only defined in the header, never referenced). This is fine dead-code cleanup, though it's unrelated to the per-user limits feature and could be a separate patch.

**Summary**: The approach is reasonable and addresses a real resource exhaustion concern. The main issues to address are: (1) the off-by-one discrepancy between code and commit message in the SSID range, (2) replacing `kref_read()` flow control with a proper atomic counter, (3) using `kuid_t` instead of bare `u32` for the UID, and (4) splitting out unrelated whitespace changes.

---
Generated by Claude Code Patch Reviewer

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2026-02-27  3:00 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-25 18:06 [PATCH v2] accel/ivpu: Limit number of maximum contexts and doorbells per user Maciej Falkowski
2026-02-25 20:50 ` Lizhi Hou
2026-02-27  3:00 ` Claude review: " Claude Code Review Bot
2026-02-27  3:00 ` Claude Code Review Bot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox