diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-20 22:09:52 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-20 22:09:52 +0300 |
commit | ac60602a6d8f6830dee89f4b87ee005f62eb7171 (patch) | |
tree | ea8810e0d7abc82755c8db00904015ecbf99a8b4 /arch/x86/mm/mem_encrypt.c | |
parent | c6dd78fcb8eefa15dd861889e0f59d301cb5230c (diff) | |
parent | 449fa54d6815be8c2c1f68fa9dbbae9384a7c03e (diff) | |
download | linux-ac60602a6d8f6830dee89f4b87ee005f62eb7171.tar.xz |
Merge tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig:
"Fix various regressions:
- force unencrypted dma-coherent buffers if encryption bit can't fit
into the dma coherent mask (Tom Lendacky)
- avoid limiting request size if swiotlb is not used (me)
- fix swiotlb handling in dma_direct_sync_sg_for_cpu/device (Fugang
Duan)"
* tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping:
dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device
dma-direct: only limit the mapping size if swiotlb could be used
dma-mapping: add a dma_addressing_limited helper
dma-direct: Force unencrypted DMA under SME for certain DMA masks
Diffstat (limited to 'arch/x86/mm/mem_encrypt.c')
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index e94e0a62ba92..fece30ca8b0c 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -15,6 +15,10 @@ #include <linux/dma-direct.h> #include <linux/swiotlb.h> #include <linux/mem_encrypt.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/dma-mapping.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -348,6 +352,32 @@ bool sev_active(void) } EXPORT_SYMBOL(sev_active); +/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ +bool force_dma_unencrypted(struct device *dev) +{ + /* + * For SEV, all DMA must be to unencrypted addresses. + */ + if (sev_active()) + return true; + + /* + * For SME, all DMA must be to unencrypted addresses if the + * device does not support DMA to addresses that include the + * encryption mask. + */ + if (sme_active()) { + u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); + u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, + dev->bus_dma_mask); + + if (dma_dev_mask <= dma_enc_mask) + return true; + } + + return false; +} + /* Architecture __weak replacement functions */ void __init mem_encrypt_free_decrypted_mem(void) { |