From: Jason Gunthorpe <jgg@nvidia.com>
Cc: Christian Koenig <christian.koenig@amd.com>,
Dongwon Kim <dongwon.kim@intel.com>,
dri-devel@lists.freedesktop.org, intel-xe@lists.freedesktop.org,
iommu@lists.linux.dev, Kevin Tian <kevin.tian@intel.com>,
Leon Romanovsky <leonro@nvidia.com>,
linaro-mm-sig@lists.linaro.org, linux-media@vger.kernel.org,
Matthew Brost <matthew.brost@intel.com>,
Simona Vetter <simona.vetter@ffwll.ch>,
Sumit Semwal <sumit.semwal@linaro.org>,
Thomas Hellstrom <thomas.hellstrom@linux.intel.com>,
Vivek Kasireddy <vivek.kasireddy@intel.com>
Subject: [PATCH RFC 23/26] iommufd: Use the PAL mapping type instead of a vfio function
Date: Tue, 17 Feb 2026 20:11:54 -0400 [thread overview]
Message-ID: <23-v1-b5cab63049c0+191af-dmabuf_map_type_jgg@nvidia.com> (raw)
In-Reply-To: <0-v1-b5cab63049c0+191af-dmabuf_map_type_jgg@nvidia.com>
Switch iommufd over to use the PAL mapping type. iommufd is the only
importer permitted to use this, and this is enforced by module name
restrictions.
If the exporter does not support PAL then the import will fail, same as
today.
If the exporter does offer PAL then the PAL functions are used to get a
phys_addr_t array for use in iommufd. The exporter must offer a single
entry list for now.
Remove everything related to vfio_pci_dma_buf_iommufd_map(). Call the new
unmap function.
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
drivers/iommu/iommufd/io_pagetable.h | 1 +
drivers/iommu/iommufd/iommufd_private.h | 8 ----
drivers/iommu/iommufd/pages.c | 58 +++++++++++-----------
drivers/iommu/iommufd/selftest.c | 64 ++++++++++++++-----------
drivers/vfio/pci/vfio_pci_dmabuf.c | 34 -------------
5 files changed, 64 insertions(+), 101 deletions(-)
diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h
index 14cd052fd3204e..fcd1a2c75dfa3d 100644
--- a/drivers/iommu/iommufd/io_pagetable.h
+++ b/drivers/iommu/iommufd/io_pagetable.h
@@ -202,6 +202,7 @@ struct iopt_pages_dmabuf_track {
struct iopt_pages_dmabuf {
struct dma_buf_attachment *attach;
+ struct dma_buf_phys_list *exp_phys;
struct dma_buf_phys_vec phys;
/* Always PAGE_SIZE aligned */
unsigned long start;
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index eb6d1a70f6732c..cfb8637cb143ac 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -717,8 +717,6 @@ bool iommufd_should_fail(void);
int __init iommufd_test_init(void);
void iommufd_test_exit(void);
bool iommufd_selftest_is_mock_dev(struct device *dev);
-int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys);
#else
static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id,
@@ -740,11 +738,5 @@ static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
{
return false;
}
-static inline int
-iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
-{
- return -EOPNOTSUPP;
-}
#endif
#endif
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index a487d93dacadab..9a23c3e30959a9 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -46,6 +46,7 @@
* ULONG_MAX so last_index + 1 cannot overflow.
*/
#include <linux/dma-buf.h>
+#include <linux/dma-buf-mapping.h>
#include <linux/dma-resv.h>
#include <linux/file.h>
#include <linux/highmem.h>
@@ -1447,6 +1448,8 @@ static void iopt_revoke_notify(struct dma_buf_attachment *attach)
iopt_area_last_index(area));
}
pages->dmabuf.phys.len = 0;
+ dma_buf_pal_unmap_phys(pages->dmabuf.attach, pages->dmabuf.exp_phys);
+ pages->dmabuf.exp_phys = NULL;
}
static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
@@ -1454,41 +1457,16 @@ static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
.move_notify = iopt_revoke_notify,
};
-/*
- * iommufd and vfio have a circular dependency. Future work for a phys
- * based private interconnect will remove this.
- */
-static int
-sym_vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
-{
- typeof(&vfio_pci_dma_buf_iommufd_map) fn;
- int rc;
-
- rc = iommufd_test_dma_buf_iommufd_map(attachment, phys);
- if (rc != -EOPNOTSUPP)
- return rc;
-
- if (!IS_ENABLED(CONFIG_VFIO_PCI_DMABUF))
- return -EOPNOTSUPP;
-
- fn = symbol_get(vfio_pci_dma_buf_iommufd_map);
- if (!fn)
- return -EOPNOTSUPP;
- rc = fn(attachment, phys);
- symbol_put(vfio_pci_dma_buf_iommufd_map);
- return rc;
-}
-
static int iopt_map_dmabuf(struct iommufd_ctx *ictx, struct iopt_pages *pages,
struct dma_buf *dmabuf)
{
+ struct dma_buf_mapping_match pal_match[] = { DMA_BUF_IMAPPING_PAL() };
struct dma_buf_attachment *attach;
int rc;
- attach = dma_buf_sgt_dynamic_attach(dmabuf, iommufd_global_device(),
- &iopt_dmabuf_attach_revoke_ops,
- pages);
+ attach = dma_buf_mapping_attach(dmabuf, pal_match,
+ ARRAY_SIZE(pal_match),
+ &iopt_dmabuf_attach_revoke_ops, pages);
if (IS_ERR(attach))
return PTR_ERR(attach);
@@ -1502,9 +1480,19 @@ static int iopt_map_dmabuf(struct iommufd_ctx *ictx, struct iopt_pages *pages,
mutex_unlock(&pages->mutex);
}
- rc = sym_vfio_pci_dma_buf_iommufd_map(attach, &pages->dmabuf.phys);
- if (rc)
+
+ pages->dmabuf.exp_phys = dma_buf_pal_map_phys(attach);
+ if (IS_ERR(pages->dmabuf.exp_phys)) {
+ rc = PTR_ERR(pages->dmabuf.exp_phys);
goto err_detach;
+ }
+
+ /* For now only works with single range exporters */
+ if (pages->dmabuf.exp_phys->length != 1) {
+ rc = -EINVAL;
+ goto err_unmap;
+ }
+ pages->dmabuf.phys = pages->dmabuf.exp_phys->phys[0];
dma_resv_unlock(dmabuf->resv);
@@ -1512,6 +1500,8 @@ static int iopt_map_dmabuf(struct iommufd_ctx *ictx, struct iopt_pages *pages,
pages->dmabuf.attach = attach;
return 0;
+err_unmap:
+ dma_buf_pal_unmap_phys(attach, pages->dmabuf.exp_phys);
err_detach:
dma_resv_unlock(dmabuf->resv);
dma_buf_detach(dmabuf, attach);
@@ -1657,6 +1647,12 @@ void iopt_release_pages(struct kref *kref)
if (iopt_is_dmabuf(pages) && pages->dmabuf.attach) {
struct dma_buf *dmabuf = pages->dmabuf.attach->dmabuf;
+ dma_resv_lock(dmabuf->resv, NULL);
+ if (pages->dmabuf.exp_phys)
+ dma_buf_pal_unmap_phys(pages->dmabuf.attach,
+ pages->dmabuf.exp_phys);
+ dma_resv_unlock(dmabuf->resv);
+
dma_buf_detach(dmabuf, pages->dmabuf.attach);
dma_buf_put(dmabuf);
WARN_ON(!list_empty(&pages->dmabuf.tracker));
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 7aa6a58a5705f7..06820a50d5d24c 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -1962,19 +1962,6 @@ struct iommufd_test_dma_buf {
bool revoked;
};
-static struct sg_table *
-iommufd_test_dma_buf_map(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static void iommufd_test_dma_buf_unmap(struct dma_buf_attachment *attachment,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
-}
-
static void iommufd_test_dma_buf_release(struct dma_buf *dmabuf)
{
struct iommufd_test_dma_buf *priv = dmabuf->priv;
@@ -1983,30 +1970,51 @@ static void iommufd_test_dma_buf_release(struct dma_buf *dmabuf)
kfree(priv);
}
-static const struct dma_buf_ops iommufd_test_dmabuf_ops = {
- .release = iommufd_test_dma_buf_release,
- DMA_BUF_SIMPLE_SGT_EXP_MATCH(iommufd_test_dma_buf_map,
- iommufd_test_dma_buf_unmap),
-};
-
-int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
+static struct dma_buf_phys_list *
+iommufd_dma_pal_map_phys(struct dma_buf_attachment *attachment)
{
struct iommufd_test_dma_buf *priv = attachment->dmabuf->priv;
+ struct dma_buf_phys_list *phys;
dma_resv_assert_held(attachment->dmabuf->resv);
- if (attachment->dmabuf->ops != &iommufd_test_dmabuf_ops)
- return -EOPNOTSUPP;
-
if (priv->revoked)
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
- phys->paddr = virt_to_phys(priv->memory);
- phys->len = priv->length;
- return 0;
+ phys = kvmalloc(struct_size(phys, phys, 1), GFP_KERNEL);
+ if (!phys)
+ return ERR_PTR(-ENOMEM);
+
+ phys->length = 1;
+ phys->phys[0].paddr = virt_to_phys(priv->memory);
+ phys->phys[0].len = priv->length;
+ return phys;
}
+static void iommufd_dma_pal_unmap_phys(struct dma_buf_attachment *attach,
+ struct dma_buf_phys_list *phys)
+{
+}
+
+static const struct dma_buf_mapping_pal_exp_ops iommufd_test_dma_buf_pal_ops = {
+ .map_phys = iommufd_dma_pal_map_phys,
+ .unmap_phys = iommufd_dma_pal_unmap_phys,
+};
+
+static int iommufd_dma_buf_match_mapping(struct dma_buf_match_args *args)
+{
+ struct dma_buf_mapping_match pal_match[] = {
+ DMA_BUF_EMAPPING_PAL(&iommufd_test_dma_buf_pal_ops),
+ };
+
+ return dma_buf_match_mapping(args, pal_match, ARRAY_SIZE(pal_match));
+}
+
+static const struct dma_buf_ops iommufd_test_dmabuf_ops = {
+ .release = iommufd_test_dma_buf_release,
+ .match_mapping = iommufd_dma_buf_match_mapping,
+};
+
static int iommufd_test_dmabuf_get(struct iommufd_ucmd *ucmd,
unsigned int open_flags,
size_t len)
diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c b/drivers/vfio/pci/vfio_pci_dmabuf.c
index f8d5848a47ff55..247c709541a937 100644
--- a/drivers/vfio/pci/vfio_pci_dmabuf.c
+++ b/drivers/vfio/pci/vfio_pci_dmabuf.c
@@ -133,40 +133,6 @@ static const struct dma_buf_ops vfio_pci_dmabuf_ops = {
.match_mapping = vfio_pci_dma_buf_match_mapping,
};
-/*
- * This is a temporary "private interconnect" between VFIO DMABUF and iommufd.
- * It allows the two co-operating drivers to exchange the physical address of
- * the BAR. This is to be replaced with a formal DMABUF system for negotiated
- * interconnect types.
- *
- * If this function succeeds the following are true:
- * - There is one physical range and it is pointing to MMIO
- * - When move_notify is called it means revoke, not move, vfio_dma_buf_map
- * will fail if it is currently revoked
- */
-int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
- struct dma_buf_phys_vec *phys)
-{
- struct vfio_pci_dma_buf *priv;
-
- dma_resv_assert_held(attachment->dmabuf->resv);
-
- if (attachment->dmabuf->ops != &vfio_pci_dmabuf_ops)
- return -EOPNOTSUPP;
-
- priv = attachment->dmabuf->priv;
- if (priv->revoked)
- return -ENODEV;
-
- /* More than one range to iommufd will require proper DMABUF support */
- if (priv->nr_ranges != 1)
- return -EOPNOTSUPP;
-
- *phys = priv->phys_vec[0];
- return 0;
-}
-EXPORT_SYMBOL_FOR_MODULES(vfio_pci_dma_buf_iommufd_map, "iommufd");
-
int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec,
struct vfio_region_dma_range *dma_ranges,
size_t nr_ranges, phys_addr_t start,
--
2.43.0
next prev parent reply other threads:[~2026-02-18 0:12 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-18 0:11 [PATCH RFC 00/26] Add DMA-buf mapping types and convert vfio/iommufd to use them Jason Gunthorpe
2026-02-18 0:11 ` [PATCH RFC 01/26] dma-buf: Introduce DMA-buf mapping types Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 02/26] dma-buf: Add the SGT DMA mapping type Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 03/26] dma-buf: Add dma_buf_mapping_attach() Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 04/26] dma-buf: Route SGT related actions through attach->map_type Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 05/26] dma-buf: Allow single exporter drivers to avoid the match_mapping function Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 06/26] drm: Check the SGT ops for drm_gem_map_dma_buf() Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 07/26] dma-buf: Convert all the simple exporters to use SGT mapping type Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 08/26] drm/vmwgfx: Use match_mapping instead of dummy calls Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 09/26] accel/habanalabs: Use the SGT mapping type Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 10/26] drm/xe/dma-buf: " Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 11/26] drm/amdgpu: " Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 12/26] vfio/pci: Change the DMA-buf exporter to use mapping_type Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 13/26] dma-buf: Update dma_buf_phys_vec_to_sgt() to use the SGT mapping type Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 14/26] iio: buffer: convert " Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 15/26] functionfs: " Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 16/26] dma-buf: Remove unused SGT stuff from the common structures Jason Gunthorpe
2026-02-18 1:37 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 17/26] treewide: Rename dma_buf_map_attachment(_unlocked) to dma_buf_sgt_ Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 18/26] treewide: Rename dma_buf_unmap_attachment(_unlocked) to dma_buf_sgt_* Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 19/26] treewide: Rename dma_buf_attach() to dma_buf_sgt_attach() Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 20/26] treewide: Rename dma_buf_dynamic_attach() to dma_buf_sgt_dynamic_attach() Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 21/26] dma-buf: Add the Physical Address List DMA mapping type Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 22/26] vfio/pci: Add physical address list support to DMABUF Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` Jason Gunthorpe [this message]
2026-02-18 1:38 ` Claude review: iommufd: Use the PAL mapping type instead of a vfio function Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 24/26] iommufd: Support DMA-bufs with multiple physical ranges Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 25/26] iommufd/selftest: Check multi-phys DMA-buf scenarios Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 0:11 ` [PATCH RFC 26/26] dma-buf: Add kunit tests for mapping type Jason Gunthorpe
2026-02-18 1:38 ` Claude review: " Claude Code Review Bot
2026-02-18 1:37 ` Claude review: Add DMA-buf mapping types and convert vfio/iommufd to use them Claude Code Review Bot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=23-v1-b5cab63049c0+191af-dmabuf_map_type_jgg@nvidia.com \
--to=jgg@nvidia.com \
--cc=christian.koenig@amd.com \
--cc=dongwon.kim@intel.com \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=iommu@lists.linux.dev \
--cc=kevin.tian@intel.com \
--cc=leonro@nvidia.com \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-media@vger.kernel.org \
--cc=matthew.brost@intel.com \
--cc=simona.vetter@ffwll.ch \
--cc=sumit.semwal@linaro.org \
--cc=thomas.hellstrom@linux.intel.com \
--cc=vivek.kasireddy@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox