From: Boris Brezillon <boris.brezillon@collabora.com>
To: Boris Brezillon <boris.brezillon@collabora.com>,
Steven Price <steven.price@arm.com>,
Liviu Dudau <liviu.dudau@arm.com>,
Adrián Larumbe <adrian.larumbe@collabora.com>
Cc: dri-devel@lists.freedesktop.org, David Airlie <airlied@gmail.com>,
Simona Vetter <simona@ffwll.ch>, Akash Goel <akash.goel@arm.com>,
Rob Clark <robin.clark@oss.qualcomm.com>,
Sean Paul <sean@poorly.run>,
Konrad Dybcio <konradybcio@kernel.org>,
Akhil P Oommen <akhilpo@oss.qualcomm.com>,
Maarten Lankhorst <maarten.lankhorst@linux.intel.com>,
Maxime Ripard <mripard@kernel.org>,
Thomas Zimmermann <tzimmermann@suse.de>,
Dmitry Osipenko <dmitry.osipenko@collabora.com>,
Chris Diamand <chris.diamand@arm.com>,
Danilo Krummrich <dakr@kernel.org>,
Matthew Brost <matthew.brost@intel.com>,
Thomas Hellström <thomas.hellstrom@linux.intel.com>,
Alice Ryhl <aliceryhl@google.com>, Chia-I Wu <olvaffe@gmail.com>,
kernel@collabora.com
Subject: [PATCH v5 6/9] drm/panthor: Lazily allocate pages on mmap()
Date: Mon, 9 Mar 2026 16:11:16 +0100 [thread overview]
Message-ID: <20260309151119.290217-7-boris.brezillon@collabora.com> (raw)
In-Reply-To: <20260309151119.290217-1-boris.brezillon@collabora.com>
Defer pages allocation until their first access.
v2:
- Don't deal with FAULT_FLAG_INTERRUPTIBLE
- Make sure bo->backing.pages is never an ERR_PTR()
- Drop a useless vm_fault_t local var
- Fix comment in panthor_gem_fault()
v3:
- Collect R-bs
v4:
- No changes
v5:
- No changes
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
---
drivers/gpu/drm/panthor/panthor_gem.c | 115 ++++++++++++++++----------
1 file changed, 70 insertions(+), 45 deletions(-)
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index 86749e2dc993..a4ccf8d5cb61 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -604,15 +604,6 @@ static int panthor_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *v
if (is_cow_mapping(vma->vm_flags))
return -EINVAL;
- dma_resv_lock(obj->resv, NULL);
- ret = panthor_gem_backing_get_pages_locked(bo);
- if (!ret)
- ret = panthor_gem_prep_for_cpu_map_locked(bo);
- dma_resv_unlock(obj->resv);
-
- if (ret)
- return ret;
-
vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (should_map_wc(bo))
@@ -632,82 +623,116 @@ static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
return res;
}
-static bool try_map_pmd(struct vm_fault *vmf, unsigned long addr, struct page *page)
+static vm_fault_t insert_page(struct vm_fault *vmf, struct page *page)
{
+ struct vm_area_struct *vma = vmf->vma;
+
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
unsigned long pfn = page_to_pfn(page);
unsigned long paddr = pfn << PAGE_SHIFT;
- bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
+ bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
if (aligned &&
pmd_none(*vmf->pmd) &&
folio_test_pmd_mappable(page_folio(page))) {
pfn &= PMD_MASK >> PAGE_SHIFT;
if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
- return true;
+ return VM_FAULT_NOPAGE;
}
#endif
- return false;
+ return vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
-static vm_fault_t panthor_gem_fault(struct vm_fault *vmf)
+static vm_fault_t nonblocking_page_setup(struct vm_fault *vmf, pgoff_t page_offset)
{
struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
struct panthor_gem_object *bo = to_panthor_bo(vma->vm_private_data);
- loff_t num_pages = obj->size >> PAGE_SHIFT;
vm_fault_t ret;
- pgoff_t page_offset;
- unsigned long pfn;
- /* Offset to faulty address in the VMA. */
- page_offset = vmf->pgoff - vma->vm_pgoff;
+ if (!dma_resv_trylock(bo->base.resv))
+ return VM_FAULT_RETRY;
- dma_resv_lock(bo->base.resv, NULL);
+ if (bo->backing.pages)
+ ret = insert_page(vmf, bo->backing.pages[page_offset]);
+ else
+ ret = VM_FAULT_RETRY;
- if (page_offset >= num_pages ||
- drm_WARN_ON_ONCE(obj->dev, !bo->backing.pages)) {
- ret = VM_FAULT_SIGBUS;
- goto out;
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static vm_fault_t blocking_page_setup(struct vm_fault *vmf,
+ struct panthor_gem_object *bo,
+ pgoff_t page_offset, bool mmap_lock_held)
+{
+ vm_fault_t ret;
+ int err;
+
+ err = dma_resv_lock_interruptible(bo->base.resv, NULL);
+ if (err)
+ return mmap_lock_held ? VM_FAULT_NOPAGE : VM_FAULT_RETRY;
+
+ err = panthor_gem_backing_get_pages_locked(bo);
+ if (!err)
+ err = panthor_gem_prep_for_cpu_map_locked(bo);
+
+ if (err) {
+ ret = mmap_lock_held ? VM_FAULT_SIGBUS : VM_FAULT_RETRY;
+ } else {
+ struct page *page = bo->backing.pages[page_offset];
+
+ if (mmap_lock_held)
+ ret = insert_page(vmf, page);
+ else
+ ret = VM_FAULT_RETRY;
}
- if (try_map_pmd(vmf, vmf->address, bo->backing.pages[page_offset])) {
- ret = VM_FAULT_NOPAGE;
- goto out;
- }
-
- pfn = page_to_pfn(bo->backing.pages[page_offset]);
- ret = vmf_insert_pfn(vma, vmf->address, pfn);
-
- out:
dma_resv_unlock(bo->base.resv);
return ret;
}
-static void panthor_gem_vm_open(struct vm_area_struct *vma)
+static vm_fault_t panthor_gem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
struct panthor_gem_object *bo = to_panthor_bo(vma->vm_private_data);
+ loff_t num_pages = bo->base.size >> PAGE_SHIFT;
+ pgoff_t page_offset;
+ vm_fault_t ret;
- drm_WARN_ON(bo->base.dev, drm_gem_is_imported(&bo->base));
+ /* Offset to faulty address in the VMA. */
+ page_offset = vmf->pgoff - vma->vm_pgoff;
+ if (page_offset >= num_pages)
+ return VM_FAULT_SIGBUS;
- dma_resv_lock(bo->base.resv, NULL);
+ ret = nonblocking_page_setup(vmf, page_offset);
+ if (ret != VM_FAULT_RETRY)
+ return ret;
- /* We should have already pinned the pages when the buffer was first
- * mmap'd, vm_open() just grabs an additional reference for the new
- * mm the vma is getting copied into (ie. on fork()).
- */
- drm_WARN_ON_ONCE(bo->base.dev, !bo->backing.pages);
+ /* Check if we're allowed to retry. */
+ if (fault_flag_allow_retry_first(vmf->flags)) {
+ /* If we're allowed to retry but not wait here, return
+ * immediately, the wait will be done when the fault
+ * handler is called again, with the mmap_lock held.
+ */
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ return VM_FAULT_RETRY;
- dma_resv_unlock(bo->base.resv);
+ /* Wait with the mmap lock released, if we're allowed to. */
+ drm_gem_object_get(&bo->base);
+ mmap_read_unlock(vmf->vma->vm_mm);
+ ret = blocking_page_setup(vmf, bo, page_offset, false);
+ drm_gem_object_put(&bo->base);
+ return ret;
+ }
- drm_gem_vm_open(vma);
+ return blocking_page_setup(vmf, bo, page_offset, true);
}
static const struct vm_operations_struct panthor_gem_vm_ops = {
.fault = panthor_gem_fault,
- .open = panthor_gem_vm_open,
+ .open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
--
2.53.0
next prev parent reply other threads:[~2026-03-09 15:11 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-09 15:11 [PATCH v5 0/9] drm/panthor: Add a GEM shrinker Boris Brezillon
2026-03-09 15:11 ` [PATCH v5 1/9] drm/gem: Consider GEM object reclaimable if shrinking fails Boris Brezillon
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-09 15:11 ` [PATCH v5 2/9] drm/panthor: Move panthor_gems_debugfs_init() to panthor_gem.c Boris Brezillon
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-09 15:11 ` [PATCH v5 3/9] drm/panthor: Group panthor_kernel_bo_xxx() helpers Boris Brezillon
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-09 15:11 ` [PATCH v5 4/9] drm/panthor: Don't call drm_gpuvm_bo_extobj_add() if the object is private Boris Brezillon
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-09 15:11 ` [PATCH v5 5/9] drm/panthor: Part ways with drm_gem_shmem_object Boris Brezillon
2026-03-09 15:34 ` Steven Price
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-09 15:11 ` Boris Brezillon [this message]
2026-03-10 2:15 ` Claude review: drm/panthor: Lazily allocate pages on mmap() Claude Code Review Bot
2026-03-09 15:11 ` [PATCH v5 7/9] drm/panthor: Split panthor_vm_prepare_map_op_ctx() to prepare for reclaim Boris Brezillon
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-09 15:11 ` [PATCH v5 8/9] drm/panthor: Track the number of mmap on a BO Boris Brezillon
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-09 15:11 ` [PATCH v5 9/9] drm/panthor: Add a GEM shrinker Boris Brezillon
2026-03-10 2:15 ` Claude review: " Claude Code Review Bot
2026-03-10 2:15 ` Claude Code Review Bot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260309151119.290217-7-boris.brezillon@collabora.com \
--to=boris.brezillon@collabora.com \
--cc=adrian.larumbe@collabora.com \
--cc=airlied@gmail.com \
--cc=akash.goel@arm.com \
--cc=akhilpo@oss.qualcomm.com \
--cc=aliceryhl@google.com \
--cc=chris.diamand@arm.com \
--cc=dakr@kernel.org \
--cc=dmitry.osipenko@collabora.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=kernel@collabora.com \
--cc=konradybcio@kernel.org \
--cc=liviu.dudau@arm.com \
--cc=maarten.lankhorst@linux.intel.com \
--cc=matthew.brost@intel.com \
--cc=mripard@kernel.org \
--cc=olvaffe@gmail.com \
--cc=robin.clark@oss.qualcomm.com \
--cc=sean@poorly.run \
--cc=simona@ffwll.ch \
--cc=steven.price@arm.com \
--cc=thomas.hellstrom@linux.intel.com \
--cc=tzimmermann@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox