From: Jason Gunthorpe <jgg@nvidia.com>
To: Christian König <christian.koenig@amd.com>,
dri-devel@lists.freedesktop.org, linaro-mm-sig@lists.linaro.org,
linux-media@vger.kernel.org,
Sumit Semwal <sumit.semwal@linaro.org>
Cc: Dave Airlie <airlied@redhat.com>,
Andrew Morton <akpm@linux-foundation.org>,
Arnd Bergmann <arnd@arndb.de>,
Daniel Vetter <daniel.vetter@ffwll.ch>,
David Hildenbrand <david@redhat.com>,
Dongwon Kim <dongwon.kim@intel.com>,
Christoph Hellwig <hch@infradead.org>,
Christoph Hellwig <hch@lst.de>, Hugh Dickins <hughd@google.com>,
Julian Orth <ju.orth@gmail.com>,
Junxiao Chang <junxiao.chang@intel.com>,
Gerd Hoffmann <kraxel@redhat.com>,
Mike Kravetz <mike.kravetz@oracle.com>,
Oscar Salvador <osalvador@suse.de>,
patches@lists.linux.dev, Peter Xu <peterx@redhat.com>,
Shuah Khan <shuah@kernel.org>,
Vivek Kasireddy <vivek.kasireddy@intel.com>,
"Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH] udmabuf: Do not create malformed scatterlists
Date: Fri, 13 Mar 2026 14:41:05 -0300 [thread overview]
Message-ID: <0-v1-42779f29381a+4b9-udmabuf_sg_jgg@nvidia.com> (raw)
Using a sg_set_folio() loop for every 4K results in a malformed scatterlist
because sg_set_folio() has an issue with offsets > PAGE_SIZE and because
scatterlist expects the creator to build a list which consolidates any
physical contiguity.
sg_alloc_table_from_pages() creates a valid scatterlist directly from a
struct page array, so go back to that.
Remove the offsets allocation and just store an array of tail pages as it
did before the below commit. Everything wants that anyhow.
Fixes: 0c8b91ef5100 ("udmabuf: add back support for mapping hugetlb pages")
Reported-by: Julian Orth <ju.orth@gmail.com>
Closes: https://lore.kernel.org/all/20260308-scatterlist-v1-1-39c4566b0bba@gmail.com/
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/dma-buf/udmabuf.c | 49 +++++++++++----------------------------
1 file changed, 13 insertions(+), 36 deletions(-)
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 94b8ecb892bb17..5d687860445137 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -26,10 +26,10 @@ MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is
struct udmabuf {
pgoff_t pagecount;
- struct folio **folios;
+ struct page **pages;
/**
- * Unlike folios, pinned_folios is only used for unpin.
+ * Unlike pages, pinned_folios is only used for unpin.
* So, nr_pinned is not the same to pagecount, the pinned_folios
* only set each folio which already pinned when udmabuf_create.
* Note that, since a folio may be pinned multiple times, each folio
@@ -41,7 +41,6 @@ struct udmabuf {
struct sg_table *sg;
struct miscdevice *device;
- pgoff_t *offsets;
};
static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
@@ -55,8 +54,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
if (pgoff >= ubuf->pagecount)
return VM_FAULT_SIGBUS;
- pfn = folio_pfn(ubuf->folios[pgoff]);
- pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
ret = vmf_insert_pfn(vma, vmf->address, pfn);
if (ret & VM_FAULT_ERROR)
@@ -73,8 +71,7 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
if (WARN_ON(pgoff >= ubuf->pagecount))
break;
- pfn = folio_pfn(ubuf->folios[pgoff]);
- pfn += ubuf->offsets[pgoff] >> PAGE_SHIFT;
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
/**
* If the below vmf_insert_pfn() fails, we do not return an
@@ -109,22 +106,11 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
- struct page **pages;
void *vaddr;
- pgoff_t pg;
dma_resv_assert_held(buf->resv);
- pages = kvmalloc_objs(*pages, ubuf->pagecount);
- if (!pages)
- return -ENOMEM;
-
- for (pg = 0; pg < ubuf->pagecount; pg++)
- pages[pg] = folio_page(ubuf->folios[pg],
- ubuf->offsets[pg] >> PAGE_SHIFT);
-
- vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
- kvfree(pages);
+ vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
if (!vaddr)
return -EINVAL;
@@ -146,22 +132,18 @@ static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
{
struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
- struct scatterlist *sgl;
- unsigned int i = 0;
int ret;
sg = kzalloc_obj(*sg);
if (!sg)
return ERR_PTR(-ENOMEM);
- ret = sg_alloc_table(sg, ubuf->pagecount, GFP_KERNEL);
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount, 0,
+ ubuf->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
if (ret < 0)
goto err_alloc;
- for_each_sg(sg->sgl, sgl, ubuf->pagecount, i)
- sg_set_folio(sgl, ubuf->folios[i], PAGE_SIZE,
- ubuf->offsets[i]);
-
ret = dma_map_sgtable(dev, sg, direction, 0);
if (ret < 0)
goto err_map;
@@ -207,12 +189,8 @@ static void unpin_all_folios(struct udmabuf *ubuf)
static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
{
- ubuf->folios = kvmalloc_objs(*ubuf->folios, pgcnt);
- if (!ubuf->folios)
- return -ENOMEM;
-
- ubuf->offsets = kvzalloc_objs(*ubuf->offsets, pgcnt);
- if (!ubuf->offsets)
+ ubuf->pages = kvmalloc_objs(*ubuf->pages, pgcnt);
+ if (!ubuf->pages)
return -ENOMEM;
ubuf->pinned_folios = kvmalloc_objs(*ubuf->pinned_folios, pgcnt);
@@ -225,8 +203,7 @@ static __always_inline int init_udmabuf(struct udmabuf *ubuf, pgoff_t pgcnt)
static __always_inline void deinit_udmabuf(struct udmabuf *ubuf)
{
unpin_all_folios(ubuf);
- kvfree(ubuf->offsets);
- kvfree(ubuf->folios);
+ kvfree(ubuf->pages);
}
static void release_udmabuf(struct dma_buf *buf)
@@ -344,8 +321,8 @@ static long udmabuf_pin_folios(struct udmabuf *ubuf, struct file *memfd,
ubuf->pinned_folios[nr_pinned++] = folios[cur_folio];
for (; subpgoff < fsize; subpgoff += PAGE_SIZE) {
- ubuf->folios[upgcnt] = folios[cur_folio];
- ubuf->offsets[upgcnt] = subpgoff;
+ ubuf->pages[upgcnt] = folio_page(folios[cur_folio],
+ subpgoff >> PAGE_SHIFT);
++upgcnt;
if (++cur_pgcnt >= pgcnt)
base-commit: 1f318b96cc84d7c2ab792fcc0bfd42a7ca890681
--
2.43.0
next reply other threads:[~2026-03-13 17:41 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-13 17:41 Jason Gunthorpe [this message]
2026-03-13 20:43 ` Claude review: udmabuf: Do not create malformed scatterlists Claude Code Review Bot
2026-03-13 20:43 ` Claude Code Review Bot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0-v1-42779f29381a+4b9-udmabuf_sg_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=airlied@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=arnd@arndb.de \
--cc=christian.koenig@amd.com \
--cc=daniel.vetter@ffwll.ch \
--cc=david@redhat.com \
--cc=dongwon.kim@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=hch@infradead.org \
--cc=hch@lst.de \
--cc=hughd@google.com \
--cc=ju.orth@gmail.com \
--cc=junxiao.chang@intel.com \
--cc=kraxel@redhat.com \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-media@vger.kernel.org \
--cc=mike.kravetz@oracle.com \
--cc=osalvador@suse.de \
--cc=patches@lists.linux.dev \
--cc=peterx@redhat.com \
--cc=shuah@kernel.org \
--cc=sumit.semwal@linaro.org \
--cc=vivek.kasireddy@intel.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox