public inbox for drm-ai-reviews@public-inbox.freedesktop.org
 help / color / mirror / Atom feed
From: Jason Gunthorpe <jgg@nvidia.com>
Cc: Christian Koenig <christian.koenig@amd.com>,
	Dongwon Kim <dongwon.kim@intel.com>,
	dri-devel@lists.freedesktop.org, intel-xe@lists.freedesktop.org,
	iommu@lists.linux.dev, Kevin Tian <kevin.tian@intel.com>,
	Leon Romanovsky <leonro@nvidia.com>,
	linaro-mm-sig@lists.linaro.org, linux-media@vger.kernel.org,
	Matthew Brost <matthew.brost@intel.com>,
	Simona Vetter <simona.vetter@ffwll.ch>,
	Sumit Semwal <sumit.semwal@linaro.org>,
	Thomas Hellstrom <thomas.hellstrom@linux.intel.com>,
	Vivek Kasireddy <vivek.kasireddy@intel.com>
Subject: [PATCH RFC 10/26] drm/xe/dma-buf: Use the SGT mapping type
Date: Tue, 17 Feb 2026 20:11:41 -0400	[thread overview]
Message-ID: <10-v1-b5cab63049c0+191af-dmabuf_map_type_jgg@nvidia.com> (raw)
In-Reply-To: <0-v1-b5cab63049c0+191af-dmabuf_map_type_jgg@nvidia.com>

Like habana, xe wants to check pci_p2pdma_distance(), but unlike
habana, it can migrate to system memory and support non-p2p DMAs as well.

Add two exporter SGT mapping types, one that matches P2P and one that
matches all of the non-p2p. The pin and map code will force migrate if
the non-p2p one is matched.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/gpu/drm/xe/xe_dma_buf.c | 58 +++++++++++++++++++++------------
 1 file changed, 37 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 7c74a31d448602..9968f37657d57d 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -7,7 +7,7 @@
 
 #include <kunit/test.h>
 #include <linux/dma-buf.h>
-#include <linux/pci-p2pdma.h>
+#include <linux/dma-buf-mapping.h>
 
 #include <drm/drm_device.h>
 #include <drm/drm_prime.h>
@@ -27,13 +27,6 @@ static int xe_dma_buf_attach(struct dma_buf *dmabuf,
 {
 	struct drm_gem_object *obj = attach->dmabuf->priv;
 
-	if (attach->peer2peer &&
-	    pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
-		attach->peer2peer = false;
-
-	if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
-		return -EOPNOTSUPP;
-
 	xe_pm_runtime_get(to_xe_device(obj->dev));
 	return 0;
 }
@@ -53,14 +46,12 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
 	struct xe_bo *bo = gem_to_xe_bo(obj);
 	struct xe_device *xe = xe_bo_device(bo);
 	struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
-	bool allow_vram = true;
+	bool allow_vram = dma_buf_sgt_p2p_allowed(attach);
 	int ret;
 
-	if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
-		allow_vram = false;
-	} else {
+	if (allow_vram) {
 		list_for_each_entry(attach, &dmabuf->attachments, node) {
-			if (!attach->peer2peer) {
+			if (!dma_buf_sgt_p2p_allowed(attach)) {
 				allow_vram = false;
 				break;
 			}
@@ -101,6 +92,8 @@ static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
 static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
 				       enum dma_data_direction dir)
 {
+	struct device *dma_dev = dma_buf_sgt_dma_device(attach);
+	bool peer2peer = dma_buf_sgt_p2p_allowed(attach);
 	struct dma_buf *dma_buf = attach->dmabuf;
 	struct drm_gem_object *obj = dma_buf->priv;
 	struct xe_bo *bo = gem_to_xe_bo(obj);
@@ -108,11 +101,11 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
 	struct sg_table *sgt;
 	int r = 0;
 
-	if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
+	if (!peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
 		return ERR_PTR(-EOPNOTSUPP);
 
 	if (!xe_bo_is_pinned(bo)) {
-		if (!attach->peer2peer)
+		if (!peer2peer)
 			r = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
 		else
 			r = xe_bo_validate(bo, NULL, false, exec);
@@ -128,7 +121,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
 		if (IS_ERR(sgt))
 			return sgt;
 
-		if (dma_map_sgtable(attach->dev, sgt, dir,
+		if (dma_map_sgtable(dma_dev, sgt, dir,
 				    DMA_ATTR_SKIP_CPU_SYNC))
 			goto error_free;
 		break;
@@ -137,7 +130,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
 	case XE_PL_VRAM1:
 		r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
 					      bo->ttm.resource, 0,
-					      bo->ttm.base.size, attach->dev,
+					      bo->ttm.base.size, dma_dev,
 					      dir, &sgt);
 		if (r)
 			return ERR_PTR(r);
@@ -158,12 +151,14 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
 			     struct sg_table *sgt,
 			     enum dma_data_direction dir)
 {
+	struct device *dma_dev = dma_buf_sgt_dma_device(attach);
+
 	if (sg_page(sgt->sgl)) {
-		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
+		dma_unmap_sgtable(dma_dev, sgt, dir, 0);
 		sg_free_table(sgt);
 		kfree(sgt);
 	} else {
-		xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
+		xe_ttm_vram_mgr_free_sgt(dma_dev, dir, sgt);
 	}
 }
 
@@ -197,18 +192,39 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 	return 0;
 }
 
+static const struct dma_buf_mapping_sgt_exp_ops xe_dma_buf_sgt_ops = {
+	.map_dma_buf = xe_dma_buf_map,
+	.unmap_dma_buf = xe_dma_buf_unmap,
+};
+
+static int xe_dma_buf_match_mapping(struct dma_buf_match_args *args)
+{
+	struct drm_gem_object *obj = args->dmabuf->priv;
+	struct dma_buf_mapping_match sgt_match[2];
+	unsigned int num_match = 0;
+
+	if (IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+		sgt_match[num_match++] = DMA_BUF_EMAPPING_SGT_P2P(
+			&xe_dma_buf_sgt_ops, to_pci_dev(obj->dev->dev));
+
+	if (xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
+		sgt_match[num_match++] =
+			DMA_BUF_EMAPPING_SGT(&xe_dma_buf_sgt_ops);
+
+	return dma_buf_match_mapping(args, sgt_match, ARRAY_SIZE(sgt_match));
+}
+
 static const struct dma_buf_ops xe_dmabuf_ops = {
 	.attach = xe_dma_buf_attach,
 	.detach = xe_dma_buf_detach,
 	.pin = xe_dma_buf_pin,
 	.unpin = xe_dma_buf_unpin,
-	.map_dma_buf = xe_dma_buf_map,
-	.unmap_dma_buf = xe_dma_buf_unmap,
 	.release = drm_gem_dmabuf_release,
 	.begin_cpu_access = xe_dma_buf_begin_cpu_access,
 	.mmap = drm_gem_dmabuf_mmap,
 	.vmap = drm_gem_dmabuf_vmap,
 	.vunmap = drm_gem_dmabuf_vunmap,
+	.match_mapping = xe_dma_buf_match_mapping,
 };
 
 struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
-- 
2.43.0


  parent reply	other threads:[~2026-02-18  0:12 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-18  0:11 [PATCH RFC 00/26] Add DMA-buf mapping types and convert vfio/iommufd to use them Jason Gunthorpe
2026-02-18  0:11 ` [PATCH RFC 01/26] dma-buf: Introduce DMA-buf mapping types Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 02/26] dma-buf: Add the SGT DMA mapping type Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 03/26] dma-buf: Add dma_buf_mapping_attach() Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 04/26] dma-buf: Route SGT related actions through attach->map_type Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 05/26] dma-buf: Allow single exporter drivers to avoid the match_mapping function Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 06/26] drm: Check the SGT ops for drm_gem_map_dma_buf() Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 07/26] dma-buf: Convert all the simple exporters to use SGT mapping type Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 08/26] drm/vmwgfx: Use match_mapping instead of dummy calls Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 09/26] accel/habanalabs: Use the SGT mapping type Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` Jason Gunthorpe [this message]
2026-02-18  1:37   ` Claude review: drm/xe/dma-buf: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 11/26] drm/amdgpu: " Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 12/26] vfio/pci: Change the DMA-buf exporter to use mapping_type Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 13/26] dma-buf: Update dma_buf_phys_vec_to_sgt() to use the SGT mapping type Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 14/26] iio: buffer: convert " Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 15/26] functionfs: " Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 16/26] dma-buf: Remove unused SGT stuff from the common structures Jason Gunthorpe
2026-02-18  1:37   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 17/26] treewide: Rename dma_buf_map_attachment(_unlocked) to dma_buf_sgt_ Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 18/26] treewide: Rename dma_buf_unmap_attachment(_unlocked) to dma_buf_sgt_* Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 19/26] treewide: Rename dma_buf_attach() to dma_buf_sgt_attach() Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 20/26] treewide: Rename dma_buf_dynamic_attach() to dma_buf_sgt_dynamic_attach() Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 21/26] dma-buf: Add the Physical Address List DMA mapping type Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 22/26] vfio/pci: Add physical address list support to DMABUF Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 23/26] iommufd: Use the PAL mapping type instead of a vfio function Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 24/26] iommufd: Support DMA-bufs with multiple physical ranges Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 25/26] iommufd/selftest: Check multi-phys DMA-buf scenarios Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  0:11 ` [PATCH RFC 26/26] dma-buf: Add kunit tests for mapping type Jason Gunthorpe
2026-02-18  1:38   ` Claude review: " Claude Code Review Bot
2026-02-18  1:37 ` Claude review: Add DMA-buf mapping types and convert vfio/iommufd to use them Claude Code Review Bot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=10-v1-b5cab63049c0+191af-dmabuf_map_type_jgg@nvidia.com \
    --to=jgg@nvidia.com \
    --cc=christian.koenig@amd.com \
    --cc=dongwon.kim@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=iommu@lists.linux.dev \
    --cc=kevin.tian@intel.com \
    --cc=leonro@nvidia.com \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-media@vger.kernel.org \
    --cc=matthew.brost@intel.com \
    --cc=simona.vetter@ffwll.ch \
    --cc=sumit.semwal@linaro.org \
    --cc=thomas.hellstrom@linux.intel.com \
    --cc=vivek.kasireddy@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox