From: Adrián Larumbe <adrian.larumbe@collabora.com>
To: linux-kernel@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org,
Steven Price <steven.price@arm.com>,
Boris Brezillon <boris.brezillon@collabora.com>,
Janne Grunau <j@jannau.net>,
kernel@collabora.com,
Adrián Larumbe <adrian.larumbe@collabora.com>,
Caterina Shablia <caterina.shablia@collabora.com>,
Liviu Dudau <liviu.dudau@arm.com>,
Maarten Lankhorst <maarten.lankhorst@linux.intel.com>,
Maxime Ripard <mripard@kernel.org>,
Thomas Zimmermann <tzimmermann@suse.de>,
David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
Daniel Almeida <daniel.almeida@collabora.com>,
Alice Ryhl <aliceryhl@google.com>
Subject: [PATCH v5 08/11] drm/panthor: Add support for repeated mappings
Date: Fri, 13 Mar 2026 15:09:45 +0000 [thread overview]
Message-ID: <20260313150956.1618635-9-adrian.larumbe@collabora.com> (raw)
In-Reply-To: <20260313150956.1618635-1-adrian.larumbe@collabora.com>
From: Boris Brezillon <boris.brezillon@collabora.com>
This allows us to optimize mapping of a relatively small
portion of a BO over and over in a large VA range, which
is useful to support Vulkan sparse bindings in an efficient
way.
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Co-developed-by: Caterina Shablia <caterina.shablia@collabora.com>
Signed-off-by: Caterina Shablia <caterina.shablia@collabora.com>
---
drivers/gpu/drm/panthor/panthor_mmu.c | 109 +++++++++++++++++++++++---
include/uapi/drm/panthor_drm.h | 20 +++++
2 files changed, 120 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 07c520475f14..a357063bb9f6 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -190,6 +190,9 @@ struct panthor_vm_op_ctx {
/** @map.bo_offset: Offset in the buffer object. */
u64 bo_offset;
+ /** @map.bo_repeat_range: Repeated BO range. */
+ u32 bo_repeat_range;
+
/**
* @map.sgt: sg-table pointing to pages backing the GEM object.
*
@@ -1003,6 +1006,29 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
return 0;
}
+static int
+panthor_vm_repeated_map_pages(struct panthor_vm *vm, u64 iova, int prot,
+ struct sg_table *sgt, u64 offset, u64 size,
+ u64 count)
+{
+ int ret;
+ u64 i;
+
+ /* FIXME: we really need to optimize this at the io_pgtable level. */
+ for (i = 0; i < count; i++) {
+ ret = panthor_vm_map_pages(vm, iova + (size * i), prot,
+ sgt, offset, size);
+ if (ret)
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ panthor_vm_unmap_pages(vm, iova, size * (i - 1));
+ return ret;
+}
+
static int flags_to_prot(u32 flags)
{
int prot = 0;
@@ -1184,12 +1210,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
(DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \
+ DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT | \
DRM_PANTHOR_VM_BIND_OP_TYPE_MASK)
static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
struct panthor_vm *vm,
struct panthor_gem_object *bo,
u64 offset,
+ u64 repeat_range,
u64 size, u64 va,
u32 flags)
{
@@ -1205,9 +1233,28 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
(flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
return -EINVAL;
- /* Make sure the VA and size are in-bounds. */
- if (size > bo->base.base.size || offset > bo->base.base.size - size)
- return -EINVAL;
+ if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT)) {
+ /* Make sure the VA and size are in-bounds. */
+ if (size > bo->base.base.size || offset > bo->base.base.size - size)
+ return -EINVAL;
+ } else {
+ /* Current drm api uses 32-bit for repeat range, */
+ if (repeat_range > U32_MAX)
+ return -EINVAL;
+
+ /* Make sure the repeat_range is in-bounds. */
+ if (repeat_range > bo->base.base.size || offset > bo->base.base.size - repeat_range)
+ return -EINVAL;
+
+ /* Repeat range must a multiple of the minimum GPU page size */
+ if (repeat_range & ((1u << (ffs(vm->ptdev->mmu_info.page_size_bitmap) - 1)) - 1))
+ return -EINVAL;
+
+ u64 repeat_count = size;
+
+ if (do_div(repeat_count, repeat_range))
+ return -EINVAL;
+ }
/* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */
if (bo->exclusive_vm_root_gem &&
@@ -1257,6 +1304,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
op_ctx->map.bo_offset = offset;
+ op_ctx->map.bo_repeat_range = repeat_range;
/* L1, L2 and L3 page tables.
* We could optimize L3 allocation by iterating over the sgt and merging
@@ -2088,9 +2136,29 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS);
- ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
- op_ctx->map.sgt, op->map.gem.offset,
- op->map.va.range);
+ if (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT) {
+ u64 repeat_count = op->map.va.range;
+
+ do_div(repeat_count, op->map.gem.repeat_range);
+
+ if (drm_WARN_ON(&vm->ptdev->base, !repeat_count))
+ return -EINVAL;
+
+ ret = panthor_vm_repeated_map_pages(vm, op->map.va.addr,
+ flags_to_prot(vma->flags),
+ op_ctx->map.sgt,
+ op->map.gem.offset,
+ op->map.gem.repeat_range,
+ repeat_count);
+ if (!ret)
+ vm->base.flags |= DRM_GPUVM_HAS_REPEAT_MAPS;
+ } else {
+ ret = panthor_vm_map_pages(vm, op->map.va.addr,
+ flags_to_prot(vma->flags),
+ op_ctx->map.sgt, op->map.gem.offset,
+ op->map.va.range);
+ }
+
if (ret) {
panthor_vm_op_ctx_return_vma(op_ctx, vma);
return ret;
@@ -2165,8 +2233,22 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
* page and then remap the difference between the huge page minus the requested
* unmap region. Calculating the right start address and range for the expanded
* unmap operation is the responsibility of the following function.
+ * However, we never allow partial unmaps of repeated regions.
*/
- unmap_hugepage_align(&op->remap, &unmap_start, &unmap_range);
+ if (op->remap.next && op->remap.prev) {
+ if (drm_WARN_ON(&vm->ptdev->base,
+ (op->remap.next->flags & DRM_GPUVA_REPEAT) !=
+ (op->remap.prev->flags & DRM_GPUVA_REPEAT)))
+ return -EINVAL;
+ if (drm_WARN_ON(&vm->ptdev->base,
+ op->remap.next->gem.repeat_range !=
+ op->remap.prev->gem.repeat_range))
+ return -EINVAL;
+ }
+
+ if (!(op->remap.next && (op->remap.next->flags & DRM_GPUVA_REPEAT)) &&
+ !(op->remap.prev && (op->remap.prev->flags & DRM_GPUVA_REPEAT)))
+ unmap_hugepage_align(&op->remap, &unmap_start, &unmap_range);
/* If the range changed, we might have to lock a wider region to guarantee
* atomicity. panthor_vm_lock_region() bails out early if the new region
@@ -2283,7 +2365,7 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
switch (op_type) {
case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
- const struct drm_gpuvm_map_req map_req = {
+ struct drm_gpuvm_map_req map_req = {
.map.va.addr = op->va.addr,
.map.va.range = op->va.range,
.map.gem.obj = op->map.vm_bo->obj,
@@ -2295,6 +2377,11 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
break;
}
+ if (op->flags & DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT) {
+ map_req.map.flags |= DRM_GPUVA_REPEAT;
+ map_req.map.gem.repeat_range = op->map.bo_repeat_range;
+ }
+
ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
}
@@ -2544,6 +2631,7 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm,
gem ? to_panthor_bo(gem) : NULL,
op->bo_offset,
+ op->bo_repeat_range,
op->size,
op->va,
op->flags);
@@ -2745,7 +2833,10 @@ int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo
struct panthor_vm_op_ctx op_ctx;
int ret;
- ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags);
+ if (drm_WARN_ON(&vm->ptdev->base, flags & DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT))
+ return -EINVAL;
+
+ ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, 0, size, va, flags);
if (ret)
return ret;
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index 4089271f3d36..46217ce2c0f5 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -555,6 +555,17 @@ enum drm_panthor_vm_bind_op_flags {
*/
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED = 1 << 2,
+ /**
+ * @DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT: Repeat a BO range
+ *
+ * Only valid with DRM_PANTHOR_VM_BIND_OP_TYPE_MAP.
+ *
+ * When this is set, a BO range is repeated over the VA range.
+ * drm_panthor_vm_bind_op::bo_repeat_range defines the size of the
+ * BO range to repeat.
+ */
+ DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT = 1 << 3,
+
/**
* @DRM_PANTHOR_VM_BIND_OP_TYPE_MASK: Mask used to determine the type of operation.
*/
@@ -619,6 +630,15 @@ struct drm_panthor_vm_bind_op {
*/
struct drm_panthor_obj_array syncs;
+ /**
+ * @bo_repeat_range: The size of the range to be repeated.
+ *
+ * Must be zero if DRM_PANTHOR_VM_BIND_OP_MAP_REPEAT is not set in
+ * flags.
+ *
+ * Size must be a multiple of bo_repeat_range.
+ */
+ __u64 bo_repeat_range;
};
/**
--
2.53.0
next prev parent reply other threads:[~2026-03-13 15:11 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-13 15:09 [PATCH v5 00/11] Support repeated mappings in GPUVM and Panthor Adrián Larumbe
2026-03-13 15:09 ` [PATCH v5 01/11] drm/panthor: Expose GPU page sizes to UM Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 02/11] drm/gpuvm: Remove dead code Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 03/11] drm/gpuvm: Fix comment to reflect remap operation operand status Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 04/11] drm/gpuvm: Add a helper to check if two VA can be merged Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 05/11] drm/gpuvm: Add a flags field to drm_gpuva_op_map Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 06/11] drm/gpuvm: Add DRM_GPUVA_REPEAT flag and logic Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 07/11] drm/gpuvm: Ensure correctness of unmap/remaps of repeated regions Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` Adrián Larumbe [this message]
2026-03-13 20:48 ` Claude review: drm/panthor: Add support for repeated mappings Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 09/11] drm/panthor: Handle remap case " Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 10/11] drm/panthor: Pass vm_bind_op to vm_prepare_map_op_ctx Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 15:09 ` [PATCH v5 11/11] drm/panthor: Bump the driver version to 1.8 Adrián Larumbe
2026-03-13 20:48 ` Claude review: " Claude Code Review Bot
2026-03-13 20:48 ` Claude review: Support repeated mappings in GPUVM and Panthor Claude Code Review Bot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260313150956.1618635-9-adrian.larumbe@collabora.com \
--to=adrian.larumbe@collabora.com \
--cc=airlied@gmail.com \
--cc=aliceryhl@google.com \
--cc=boris.brezillon@collabora.com \
--cc=caterina.shablia@collabora.com \
--cc=daniel.almeida@collabora.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=j@jannau.net \
--cc=kernel@collabora.com \
--cc=linux-kernel@vger.kernel.org \
--cc=liviu.dudau@arm.com \
--cc=maarten.lankhorst@linux.intel.com \
--cc=mripard@kernel.org \
--cc=simona@ffwll.ch \
--cc=steven.price@arm.com \
--cc=tzimmermann@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox