summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHenry Lin <henryl@nvidia.com>2019-05-22 14:33:57 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-06-09 10:18:13 +0300
commit418d0e4657949e62cff69b1b33730f6365591e73 (patch)
tree0ab9eac55eb10e9e33c483b51182a84f01e0353b
parent03f6cbbde2a760bdd73477c4463ad66e271e8258 (diff)
downloadlinux-418d0e4657949e62cff69b1b33730f6365591e73.tar.xz
xhci: update bounce buffer with correct sg num
commit 597c56e372dab2c7f79b8d700aad3a5deebf9d1b upstream. This change fixes a data corruption issue occurred on USB hard disk for the case that bounce buffer is used during transferring data. While updating data between sg list and bounce buffer, current implementation passes mapped sg number (urb->num_mapped_sgs) to sg_pcopy_from_buffer() and sg_pcopy_to_buffer(). This causes data not get copied if target buffer is located in the elements after mapped sg elements. This change passes sg number for full list to fix issue. Besides, for copying data from bounce buffer, calling dma_unmap_single() on the bounce buffer before copying data to sg list can avoid cache issue. Fixes: f9c589e142d0 ("xhci: TD-fragment, align the unsplittable case with a bounce buffer") Cc: <stable@vger.kernel.org> # v4.8+ Signed-off-by: Henry Lin <henryl@nvidia.com> Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/usb/host/xhci-ring.c17
1 files changed, 13 insertions, 4 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9a7e77a09080..9d6c78cedd56 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -667,6 +667,7 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct xhci_segment *seg = td->bounce_seg;
struct urb *urb = td->urb;
+ size_t len;
if (!ring || !seg || !urb)
return;
@@ -677,11 +678,14 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
return;
}
- /* for in tranfers we need to copy the data from bounce to sg */
- sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
+ /* for in tranfers we need to copy the data from bounce to sg */
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %ld != %d\n",
+ len, seg->bounce_len);
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
@@ -3186,6 +3190,7 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
unsigned int unalign;
unsigned int max_pkt;
u32 new_buff_len;
+ size_t len;
max_pkt = usb_endpoint_maxp(&urb->ep->desc);
unalign = (enqd_len + *trb_buff_len) % max_pkt;
@@ -3216,8 +3221,12 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci,
+ "WARN Wrong bounce buffer write length: %ld != %d\n",
+ len, seg->bounce_len);
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {