diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-30 06:32:10 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-30 06:32:10 +0300 |
commit | 6c1b980a7e79e55e951b4b2c47eefebc75071209 (patch) | |
tree | 75fadaa5bc0b8bb1b488bd107926cdb6373c3946 /arch | |
parent | 3d3dfeb3aec7b612d266d500c82054f1fded4980 (diff) | |
parent | d069ed288ac74c24e2b1c294aa9445c80ed6c518 (diff) | |
download | linux-6c1b980a7e79e55e951b4b2c47eefebc75071209.tar.xz |
Merge tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-maping updates from Christoph Hellwig:
- allow dynamic sizing of the swiotlb buffer, to cater for secure
virtualization workloads that require all I/O to be bounce buffered
(Petr Tesarik)
- move a declaration to a header (Arnd Bergmann)
- check for memory region overlap in dma-contiguous (Binglei Wang)
- remove the somewhat dangerous runtime swiotlb-xen enablement and
unexport is_swiotlb_active (Christoph Hellwig, Juergen Gross)
- per-node CMA improvements (Yajun Deng)
* tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping:
swiotlb: optimize get_max_slots()
swiotlb: move slot allocation explanation comment where it belongs
swiotlb: search the software IO TLB only if the device makes use of it
swiotlb: allocate a new memory pool when existing pools are full
swiotlb: determine potential physical address limit
swiotlb: if swiotlb is full, fall back to a transient memory pool
swiotlb: add a flag whether SWIOTLB is allowed to grow
swiotlb: separate memory pool data from other allocator data
swiotlb: add documentation and rename swiotlb_do_find_slots()
swiotlb: make io_tlb_default_mem local to swiotlb.c
swiotlb: bail out of swiotlb_init_late() if swiotlb is already allocated
dma-contiguous: check for memory region overlap
dma-contiguous: support numa CMA for specified node
dma-contiguous: support per-numa CMA for all architectures
dma-mapping: move arch_dma_set_mask() declaration to header
swiotlb: unexport is_swiotlb_active
x86: always initialize xen-swiotlb when xen-pcifront is enabling
xen/pci: add flag for PCI passthrough being possible
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/xen/mm.c | 10 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 2 | ||||
-rw-r--r-- | arch/mips/pci/pci-octeon.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/dma-mask.c | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/xen/swiotlb-xen.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 29 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 6 |
7 files changed, 19 insertions, 37 deletions
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 3d826c0b5fee..882cd70c7a2f 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -125,12 +125,10 @@ static int __init xen_mm_init(void) return 0; /* we can work with the default swiotlb */ - if (!io_tlb_default_mem.nslabs) { - rc = swiotlb_init_late(swiotlb_size_or_default(), - xen_swiotlb_gfp(), NULL); - if (rc < 0) - return rc; - } + rc = swiotlb_init_late(swiotlb_size_or_default(), + xen_swiotlb_gfp(), NULL); + if (rc < 0) + return rc; cflush.op = 0; cflush.a.dev_bus_addr = 0; diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 4fcb88a445ef..8a0f8604348b 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -461,8 +461,6 @@ void __init bootmem_init(void) arm64_hugetlb_cma_reserve(); #endif - dma_pernuma_cma_reserve(); - kvm_hyp_reserve(); /* diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index e457a18cbdc5..d19d9d456309 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -664,7 +664,7 @@ static int __init octeon_pci_setup(void) /* BAR1 movable regions contiguous to cover the swiotlb */ octeon_bar1_pci_phys = - io_tlb_default_mem.start & ~((1ull << 22) - 1); + default_swiotlb_base() & ~((1ull << 22) - 1); for (index = 0; index < 32; index++) { union cvmx_pci_bar1_indexx bar1_index; diff --git a/arch/powerpc/kernel/dma-mask.c b/arch/powerpc/kernel/dma-mask.c index ffbbbc432612..5b07ca7b73aa 100644 --- a/arch/powerpc/kernel/dma-mask.c +++ b/arch/powerpc/kernel/dma-mask.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/dma-mapping.h> +#include <linux/dma-map-ops.h> #include <linux/export.h> #include <asm/machdep.h> diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h index 77a2d19cc990..abde0f44df57 100644 --- a/arch/x86/include/asm/xen/swiotlb-xen.h +++ b/arch/x86/include/asm/xen/swiotlb-xen.h @@ -2,12 +2,6 @@ #ifndef _ASM_X86_SWIOTLB_XEN_H #define _ASM_X86_SWIOTLB_XEN_H -#ifdef CONFIG_SWIOTLB_XEN -extern int pci_xen_swiotlb_init_late(void); -#else -static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } -#endif - int xen_swiotlb_fixup(void *buf, unsigned long nslabs); int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, unsigned int address_bits, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index de6be0a3965e..f323d83e40a7 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -72,9 +72,15 @@ static inline void __init pci_swiotlb_detect(void) #endif /* CONFIG_SWIOTLB */ #ifdef CONFIG_SWIOTLB_XEN +static bool xen_swiotlb_enabled(void) +{ + return xen_initial_domain() || x86_swiotlb_enable || + (IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible); +} + static void __init pci_xen_swiotlb_init(void) { - if (!xen_initial_domain() && !x86_swiotlb_enable) + if (!xen_swiotlb_enabled()) return; x86_swiotlb_enable = true; x86_swiotlb_flags |= SWIOTLB_ANY; @@ -83,27 +89,6 @@ static void __init pci_xen_swiotlb_init(void) if (IS_ENABLED(CONFIG_PCI)) pci_request_acs(); } - -int pci_xen_swiotlb_init_late(void) -{ - if (dma_ops == &xen_swiotlb_dma_ops) - return 0; - - /* we can work with the default swiotlb */ - if (!io_tlb_default_mem.nslabs) { - int rc = swiotlb_init_late(swiotlb_size_or_default(), - GFP_KERNEL, xen_swiotlb_fixup); - if (rc < 0) - return rc; - } - - /* XXX: this switches the dma ops under live devices! */ - dma_ops = &xen_swiotlb_dma_ops; - if (IS_ENABLED(CONFIG_PCI)) - pci_request_acs(); - return 0; -} -EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late); #else static inline void __init pci_xen_swiotlb_init(void) { diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 50c998b844fb..b3e37961065a 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -44,6 +44,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; /* Number of pages released from the initial allocation. */ unsigned long xen_released_pages; +/* Memory map would allow PCI passthrough. */ +bool xen_pv_pci_possible; + /* E820 map used during setting up memory. */ static struct e820_table xen_e820_table __initdata; @@ -814,6 +817,9 @@ char * __init xen_memory_setup(void) chunk_size = size; type = xen_e820_table.entries[i].type; + if (type == E820_TYPE_RESERVED) + xen_pv_pci_possible = true; + if (type == E820_TYPE_RAM) { if (addr < mem_end) { chunk_size = min(size, mem_end - addr); |