diff options
Diffstat (limited to 'arch/ia64/hp/common/sba_iommu.c')
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 92 |
1 files changed, 56 insertions, 36 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a94445422cc6..9409de5c9441 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -35,6 +35,7 @@ #include <linux/nodemask.h> #include <linux/bitops.h> /* hweight64() */ #include <linux/crash_dump.h> +#include <linux/iommu-helper.h> #include <asm/delay.h> /* ia64_get_itc() */ #include <asm/io.h> @@ -460,6 +461,13 @@ get_iovp_order (unsigned long size) return order; } +static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, + unsigned int bitshiftcnt) +{ + return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) + + bitshiftcnt; +} + /** * sba_search_bitmap - find free space in IO PDIR resource bitmap * @ioc: IO MMU structure which owns the pdir we are interested in. @@ -471,15 +479,25 @@ get_iovp_order (unsigned long size) * Cool perf optimization: search for log2(size) bits at a time. */ static SBA_INLINE unsigned long -sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) +sba_search_bitmap(struct ioc *ioc, struct device *dev, + unsigned long bits_wanted, int use_hint) { unsigned long *res_ptr; unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); - unsigned long flags, pide = ~0UL; + unsigned long flags, pide = ~0UL, tpide; + unsigned long boundary_size; + unsigned long shift; + int ret; ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(res_ptr < res_end); + boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1; + boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift; + + BUG_ON(ioc->ibase & ~iovp_mask); + shift = ioc->ibase >> iovp_shift; + spin_lock_irqsave(&ioc->res_lock, flags); /* Allow caller to force a search through the entire resource space */ @@ -504,9 +522,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) if (likely(*res_ptr != ~0UL)) { bitshiftcnt = ffz(*res_ptr); *res_ptr |= (1UL << bitshiftcnt); - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ - pide += bitshiftcnt; + pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } @@ -529,17 +545,19 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) base_mask = RESMAP_MASK(bits_wanted); mask = base_mask << bitshiftcnt; - DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); + DBG_RES("%s() o %ld %p", __func__, o, res_ptr); for(; res_ptr < res_end ; res_ptr++) { DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); ASSERT(0 != mask); for (; mask ; mask <<= o, bitshiftcnt += o) { - if(0 == ((*res_ptr) & mask)) { + tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); + ret = iommu_is_span_boundary(tpide, bits_wanted, + shift, + boundary_size); + if ((0 == ((*res_ptr) & mask)) && !ret) { *res_ptr |= mask; /* mark resources busy! */ - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ - pide += bitshiftcnt; + pide = tpide; ioc->res_bitshift = bitshiftcnt + bits_wanted; goto found_it; } @@ -560,6 +578,11 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) end = res_end - qwords; for (; res_ptr < end; res_ptr++) { + tpide = ptr_to_pide(ioc, res_ptr, 0); + ret = iommu_is_span_boundary(tpide, bits_wanted, + shift, boundary_size); + if (ret) + goto next_ptr; for (i = 0 ; i < qwords ; i++) { if (res_ptr[i] != 0) goto next_ptr; @@ -572,8 +595,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) res_ptr[i] = ~0UL; res_ptr[i] |= RESMAP_MASK(bits); - pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); - pide <<= 3; /* convert to bit address */ + pide = tpide; res_ptr += qwords; ioc->res_bitshift = bits; goto found_it; @@ -605,7 +627,7 @@ found_it: * resource bit map. */ static int -sba_alloc_range(struct ioc *ioc, size_t size) +sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) { unsigned int pages_needed = size >> iovp_shift; #ifdef PDIR_SEARCH_TIMING @@ -622,9 +644,9 @@ sba_alloc_range(struct ioc *ioc, size_t size) /* ** "seek and ye shall find"...praying never hurts either... */ - pide = sba_search_bitmap(ioc, pages_needed, 1); + pide = sba_search_bitmap(ioc, dev, pages_needed, 1); if (unlikely(pide >= (ioc->res_size << 3))) { - pide = sba_search_bitmap(ioc, pages_needed, 0); + pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) { #if DELAYED_RESOURCE_CNT > 0 unsigned long flags; @@ -653,7 +675,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) } spin_unlock_irqrestore(&ioc->saved_lock, flags); - pide = sba_search_bitmap(ioc, pages_needed, 0); + pide = sba_search_bitmap(ioc, dev, pages_needed, 0); if (unlikely(pide >= (ioc->res_size << 3))) panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", ioc->ioc_hpa); @@ -679,7 +701,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) #endif DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", - __FUNCTION__, size, pages_needed, pide, + __func__, size, pages_needed, pide, (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), ioc->res_bitshift ); @@ -722,8 +744,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); bits_not_wanted = 0; - DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, - bits_not_wanted, m, pide, res_ptr, *res_ptr); + DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size, + bits_not_wanted, m, pide, res_ptr, *res_ptr); ASSERT(m != 0); ASSERT(bits_not_wanted); @@ -936,12 +958,11 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) spin_unlock_irqrestore(&ioc->res_lock, flags); #endif - pide = sba_alloc_range(ioc, size); + pide = sba_alloc_range(ioc, dev, size); iovp = (dma_addr_t) pide << iovp_shift; - DBG_RUN("%s() 0x%p -> 0x%lx\n", - __FUNCTION__, addr, (long) iovp | offset); + DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset); pdir_start = &(ioc->pdir_base[pide]); @@ -1029,8 +1050,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) #endif offset = iova & ~iovp_mask; - DBG_RUN("%s() iovp 0x%lx/%x\n", - __FUNCTION__, (long) iova, size); + DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); iova ^= offset; /* clear offset bits */ size += offset; @@ -1375,7 +1395,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; ASSERT(dma_len <= DMA_CHUNK_SIZE); dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG - | (sba_alloc_range(ioc, dma_len) << iovp_shift) + | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) | dma_offset); n_mappings++; } @@ -1404,7 +1424,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di struct scatterlist *sg; #endif - DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); + DBG_RUN_SG("%s() START %d entries\n", __func__, nents); ioc = GET_IOC(dev); ASSERT(ioc); @@ -1468,7 +1488,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di #endif ASSERT(coalesced == filled); - DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); + DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled); return filled; } @@ -1491,7 +1511,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in #endif DBG_RUN_SG("%s() START %d entries, %p,%x\n", - __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); + __func__, nents, sba_sg_address(sglist), sglist->length); #ifdef ASSERT_PDIR_SANITY ioc = GET_IOC(dev); @@ -1509,7 +1529,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in nents--; } - DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); + DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); #ifdef ASSERT_PDIR_SANITY spin_lock_irqsave(&ioc->res_lock, flags); @@ -1546,7 +1566,7 @@ ioc_iova_init(struct ioc *ioc) ioc->iov_size = ~ioc->imask + 1; DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", - __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, + __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask, ioc->iov_size >> 20); switch (iovp_size) { @@ -1569,7 +1589,7 @@ ioc_iova_init(struct ioc *ioc) memset(ioc->pdir_base, 0, ioc->pdir_size); - DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, + DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__, iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); @@ -1612,7 +1632,7 @@ ioc_iova_init(struct ioc *ioc) prefetch_spill_page = virt_to_phys(addr); - DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); + DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page); } /* ** Set all the PDIR entries valid w/ the spill page as the target @@ -1641,7 +1661,7 @@ ioc_resource_init(struct ioc *ioc) /* resource map size dictated by pdir_size */ ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ ioc->res_size >>= 3; /* convert bit count to byte count */ - DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); + DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size); ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(ioc->res_size)); @@ -1664,7 +1684,7 @@ ioc_resource_init(struct ioc *ioc) | prefetch_spill_page); #endif - DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, + DBG_INIT("%s() res_map %x %p\n", __func__, ioc->res_size, (void *) ioc->res_map); } @@ -1767,7 +1787,7 @@ ioc_init(u64 hpa, void *handle) iovp_size = (1 << iovp_shift); iovp_mask = ~(iovp_size - 1); - DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, + DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__, PAGE_SIZE >> 10, iovp_size >> 10); if (!ioc->name) { @@ -2137,7 +2157,7 @@ sba_page_override(char *str) break; default: printk("%s: unknown/unsupported iommu page size %ld\n", - __FUNCTION__, page_size); + __func__, page_size); } return 1; |