diff options
author | David S. Miller <davem@davemloft.net> | 2008-03-18 09:44:31 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-18 09:44:31 +0300 |
commit | 2f633928cbba8a5858bb39b11e7219a41b0fbef5 (patch) | |
tree | 9a82f4b7f2c3afe4b0208d8e44ea61bae90a7d22 /arch/alpha | |
parent | 5e226e4d9016daee170699f8a4188a5505021756 (diff) | |
parent | bde4f8fa8db2abd5ac9c542d76012d0fedab050f (diff) | |
download | linux-2f633928cbba8a5858bb39b11e7219a41b0fbef5.tar.xz |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/Kconfig | 3 | ||||
-rw-r--r-- | arch/alpha/kernel/pci_iommu.c | 64 |
2 files changed, 45 insertions, 22 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 002703b8c0b0..729cdbdf8036 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -330,6 +330,9 @@ config PCI_DOMAINS config PCI_SYSCALL def_bool PCI +config IOMMU_HELPER + def_bool PCI + config ALPHA_CORE_AGP bool depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 26d3789dfdd0..4e1c08636edd 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -10,6 +10,7 @@ #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/dma-mapping.h> +#include <linux/iommu-helper.h> #include <asm/io.h> #include <asm/hwrpb.h> @@ -31,7 +32,6 @@ #endif #define DEBUG_NODIRECT 0 -#define DEBUG_FORCEDAC 0 #define ISA_DMA_MASK 0x00ffffff @@ -128,37 +128,55 @@ iommu_arena_new(struct pci_controller *hose, dma_addr_t base, /* Must be called with the arena lock held */ static long -iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) +iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, + long n, long mask) { unsigned long *ptes; long i, p, nent; + int pass = 0; + unsigned long base; + unsigned long boundary_size; + + base = arena->dma_base >> PAGE_SHIFT; + if (dev) { + boundary_size = dma_get_seg_boundary(dev) + 1; + boundary_size >>= PAGE_SHIFT; + } else { + boundary_size = 1UL << (32 - PAGE_SHIFT); + } /* Search forward for the first mask-aligned sequence of N free ptes */ ptes = arena->ptes; nent = arena->size >> PAGE_SHIFT; - p = (arena->next_entry + mask) & ~mask; + p = ALIGN(arena->next_entry, mask + 1); i = 0; + +again: while (i < n && p+i < nent) { + if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) { + p = ALIGN(p + 1, mask + 1); + goto again; + } + if (ptes[p+i]) - p = (p + i + 1 + mask) & ~mask, i = 0; + p = ALIGN(p + i + 1, mask + 1), i = 0; else i = i + 1; } if (i < n) { - /* Reached the end. Flush the TLB and restart the - search from the beginning. */ - alpha_mv.mv_pci_tbi(arena->hose, 0, -1); - - p = 0, i = 0; - while (i < n && p+i < nent) { - if (ptes[p+i]) - p = (p + i + 1 + mask) & ~mask, i = 0; - else - i = i + 1; - } - - if (i < n) + if (pass < 1) { + /* + * Reached the end. Flush the TLB and restart + * the search from the beginning. + */ + alpha_mv.mv_pci_tbi(arena->hose, 0, -1); + + pass++; + p = 0; + i = 0; + goto again; + } else return -1; } @@ -168,7 +186,8 @@ iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask) } static long -iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) +iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, + unsigned int align) { unsigned long flags; unsigned long *ptes; @@ -179,7 +198,7 @@ iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align) /* Search for N empty ptes */ ptes = arena->ptes; mask = max(align, arena->align_entry) - 1; - p = iommu_arena_find_pages(arena, n, mask); + p = iommu_arena_find_pages(dev, arena, n, mask); if (p < 0) { spin_unlock_irqrestore(&arena->lock, flags); return -1; @@ -229,6 +248,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, unsigned long paddr; dma_addr_t ret; unsigned int align = 0; + struct device *dev = pdev ? &pdev->dev : NULL; paddr = __pa(cpu_addr); @@ -276,7 +296,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, /* Force allocation to 64KB boundary for ISA bridges. */ if (pdev && pdev == isa_bridge) align = 8; - dma_ofs = iommu_arena_alloc(arena, npages, align); + dma_ofs = iommu_arena_alloc(dev, arena, npages, align); if (dma_ofs < 0) { printk(KERN_WARNING "pci_map_single failed: " "could not allocate dma page tables\n"); @@ -563,7 +583,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, paddr &= ~PAGE_MASK; npages = calc_npages(paddr + size); - dma_ofs = iommu_arena_alloc(arena, npages, 0); + dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); if (dma_ofs < 0) { /* If we attempted a direct map above but failed, die. */ if (leader->dma_address == 0) @@ -830,7 +850,7 @@ iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) /* Search for N empty ptes. */ ptes = arena->ptes; - p = iommu_arena_find_pages(arena, pg_count, align_mask); + p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); if (p < 0) { spin_unlock_irqrestore(&arena->lock, flags); return -1; |