diff options
author | Sebastian Ott <sebott@linux.vnet.ibm.com> | 2016-11-07 17:06:03 +0300 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-11-17 09:09:53 +0300 |
commit | 6b7df3ce92ac82ec3f4a2953b6fed77da7b38aaa (patch) | |
tree | 3ca9763fe01871318555da05b988e81271a09cb3 /arch/s390/pci | |
parent | 191ce9d1fde84190f18cd3faf0ef8594f442b313 (diff) | |
download | linux-6b7df3ce92ac82ec3f4a2953b6fed77da7b38aaa.tar.xz |
s390/pci: fix dma address calculation in map_sg
__s390_dma_map_sg maps a dma-contiguous area. Although we only map
whole pages we have to take into account that the area doesn't start
or stop at a page boundary because we use the dma address to loop
over the individual sg entries. Failing to do that might lead to an
access of the wrong sg entry.
Fixes: ee877b81c6b9 ("s390/pci_dma: improve map_sg")
Reported-and-tested-by: Christoph Raisch <raisch@de.ibm.com>
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/pci')
-rw-r--r-- | arch/s390/pci/pci_dma.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 7350c8bc13a2..47f4afbff0a5 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -419,6 +419,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, size_t size, dma_addr_t *handle, enum dma_data_direction dir) { + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); dma_addr_t dma_addr_base, dma_addr; int flags = ZPCI_PTE_VALID; @@ -426,8 +427,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long pa; int ret; - size = PAGE_ALIGN(size); - dma_addr_base = dma_alloc_address(dev, size >> PAGE_SHIFT); + dma_addr_base = dma_alloc_address(dev, nr_pages); if (dma_addr_base == DMA_ERROR_CODE) return -ENOMEM; @@ -436,26 +436,27 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg, flags |= ZPCI_TABLE_PROTECTED; for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) { - pa = page_to_phys(sg_page(s)) + s->offset; - ret = __dma_update_trans(zdev, pa, dma_addr, s->length, flags); + pa = page_to_phys(sg_page(s)); + ret = __dma_update_trans(zdev, pa, dma_addr, + s->offset + s->length, flags); if (ret) goto unmap; - dma_addr += s->length; + dma_addr += s->offset + s->length; } ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags); if (ret) goto unmap; *handle = dma_addr_base; - atomic64_add(size >> PAGE_SHIFT, &zdev->mapped_pages); + atomic64_add(nr_pages, &zdev->mapped_pages); return ret; unmap: dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base, ZPCI_PTE_INVALID); - dma_free_address(dev, dma_addr_base, size >> PAGE_SHIFT); + dma_free_address(dev, dma_addr_base, nr_pages); zpci_err("map error:\n"); zpci_err_dma(ret, pa); return ret; |