diff options
| author | Jiri Pirko <jiri@nvidia.com> | 2026-03-25 22:23:51 +0300 |
|---|---|---|
| committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2026-04-02 08:29:33 +0300 |
| commit | f0548044a02630402d374df195ed3af4cc5e4711 (patch) | |
| tree | 98713be309d9a76bf648538dff3aa1548f258a8b | |
| parent | 27e2e9b9b49c5d5260969168b86cd238254b9105 (diff) | |
| download | linux-f0548044a02630402d374df195ed3af4cc5e4711.tar.xz | |
dma-mapping: introduce DMA_ATTR_CC_SHARED for shared memory
Current CC designs don't place a vIOMMU in front of untrusted devices.
Instead, the DMA API forces all untrusted device DMA through swiotlb
bounce buffers (is_swiotlb_force_bounce()) which copies data into
shared memory on behalf of the device.
When a caller has already arranged for the memory to be shared
via set_memory_decrypted(), the DMA API needs to know so it can map
directly using the unencrypted physical address rather than bounce
buffering. Following the pattern of DMA_ATTR_MMIO, add
DMA_ATTR_CC_SHARED for this purpose. Like the MMIO case, only the
caller knows what kind of memory it has and must inform the DMA API
for it to work correctly.
Signed-off-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Sumit Semwal <sumit.semwal@linaro.org>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20260325192352.437608-2-jiri@resnulli.us
| -rw-r--r-- | include/linux/dma-mapping.h | 10 | ||||
| -rw-r--r-- | include/trace/events/dma.h | 3 | ||||
| -rw-r--r-- | kernel/dma/direct.h | 14 | ||||
| -rw-r--r-- | kernel/dma/mapping.c | 13 |
4 files changed, 34 insertions, 6 deletions
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 677c51ab7510..db8ab24a54f4 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -92,6 +92,16 @@ * flushing. */ #define DMA_ATTR_REQUIRE_COHERENT (1UL << 12) +/* + * DMA_ATTR_CC_SHARED: Indicates the DMA mapping is shared (decrypted) for + * confidential computing guests. For normal system memory the caller must have + * called set_memory_decrypted(), and pgprot_decrypted must be used when + * creating CPU PTEs for the mapping. The same shared semantic may be passed + * to the vIOMMU when it sets up the IOPTE. For MMIO use together with + * DMA_ATTR_MMIO to indicate shared MMIO. Unless DMA_ATTR_MMIO is provided + * a struct page is required. + */ +#define DMA_ATTR_CC_SHARED (1UL << 13) /* * A dma_addr_t can hold any valid DMA or bus address for the platform. It can diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h index 63597b004424..31c9ddf72c9d 100644 --- a/include/trace/events/dma.h +++ b/include/trace/events/dma.h @@ -34,7 +34,8 @@ TRACE_DEFINE_ENUM(DMA_NONE); { DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \ { DMA_ATTR_MMIO, "MMIO" }, \ { DMA_ATTR_DEBUGGING_IGNORE_CACHELINES, "CACHELINES_OVERLAP" }, \ - { DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" }) + { DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" }, \ + { DMA_ATTR_CC_SHARED, "CC_SHARED" }) DECLARE_EVENT_CLASS(dma_map, TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, diff --git a/kernel/dma/direct.h b/kernel/dma/direct.h index b86ff65496fc..7140c208c123 100644 --- a/kernel/dma/direct.h +++ b/kernel/dma/direct.h @@ -89,16 +89,24 @@ static inline dma_addr_t dma_direct_map_phys(struct device *dev, dma_addr_t dma_addr; if (is_swiotlb_force_bounce(dev)) { - if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT)) - return DMA_MAPPING_ERROR; + if (!(attrs & DMA_ATTR_CC_SHARED)) { + if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT)) + return DMA_MAPPING_ERROR; - return swiotlb_map(dev, phys, size, dir, attrs); + return swiotlb_map(dev, phys, size, dir, attrs); + } + } else if (attrs & DMA_ATTR_CC_SHARED) { + return DMA_MAPPING_ERROR; } if (attrs & DMA_ATTR_MMIO) { dma_addr = phys; if (unlikely(!dma_capable(dev, dma_addr, size, false))) goto err_overflow; + } else if (attrs & DMA_ATTR_CC_SHARED) { + dma_addr = phys_to_dma_unencrypted(dev, phys); + if (unlikely(!dma_capable(dev, dma_addr, size, false))) + goto err_overflow; } else { dma_addr = phys_to_dma(dev, phys); if (unlikely(!dma_capable(dev, dma_addr, size, true)) || diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index df3eccc7d4ca..23ed8eb9233e 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -157,6 +157,7 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, { const struct dma_map_ops *ops = get_dma_ops(dev); bool is_mmio = attrs & DMA_ATTR_MMIO; + bool is_cc_shared = attrs & DMA_ATTR_CC_SHARED; dma_addr_t addr = DMA_MAPPING_ERROR; BUG_ON(!valid_dma_direction(dir)); @@ -168,8 +169,11 @@ dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, return DMA_MAPPING_ERROR; if (dma_map_direct(dev, ops) || - (!is_mmio && arch_dma_map_phys_direct(dev, phys + size))) + (!is_mmio && !is_cc_shared && + arch_dma_map_phys_direct(dev, phys + size))) addr = dma_direct_map_phys(dev, phys, size, dir, attrs, true); + else if (is_cc_shared) + return DMA_MAPPING_ERROR; else if (use_dma_iommu(dev)) addr = iommu_dma_map_phys(dev, phys, size, dir, attrs); else if (ops->map_phys) @@ -206,11 +210,16 @@ void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, { const struct dma_map_ops *ops = get_dma_ops(dev); bool is_mmio = attrs & DMA_ATTR_MMIO; + bool is_cc_shared = attrs & DMA_ATTR_CC_SHARED; BUG_ON(!valid_dma_direction(dir)); + if (dma_map_direct(dev, ops) || - (!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size))) + (!is_mmio && !is_cc_shared && + arch_dma_unmap_phys_direct(dev, addr + size))) dma_direct_unmap_phys(dev, addr, size, dir, attrs, true); + else if (is_cc_shared) + return; else if (use_dma_iommu(dev)) iommu_dma_unmap_phys(dev, addr, size, dir, attrs); else if (ops->unmap_phys) |
