diff options
Diffstat (limited to 'drivers/usb/host')
-rw-r--r-- | drivers/usb/host/Kconfig | 2 | ||||
-rw-r--r-- | drivers/usb/host/ehci-hcd.c | 9 | ||||
-rw-r--r-- | drivers/usb/host/ehci-hub.c | 14 | ||||
-rw-r--r-- | drivers/usb/host/ehci-msm.c | 14 | ||||
-rw-r--r-- | drivers/usb/host/ehci-platform.c | 37 | ||||
-rw-r--r-- | drivers/usb/host/ehci-st.c | 6 | ||||
-rw-r--r-- | drivers/usb/host/ehci-tegra.c | 16 | ||||
-rw-r--r-- | drivers/usb/host/ohci-hcd.c | 1 | ||||
-rw-r--r-- | drivers/usb/host/ohci-platform.c | 43 | ||||
-rw-r--r-- | drivers/usb/host/ohci-q.c | 3 | ||||
-rw-r--r-- | drivers/usb/host/ohci-st.c | 6 | ||||
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 74 | ||||
-rw-r--r-- | drivers/usb/host/xhci-pci.c | 5 | ||||
-rw-r--r-- | drivers/usb/host/xhci-plat.c | 11 | ||||
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 488 | ||||
-rw-r--r-- | drivers/usb/host/xhci.c | 36 | ||||
-rw-r--r-- | drivers/usb/host/xhci.h | 10 |
17 files changed, 466 insertions, 309 deletions
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index d8f5674809e8..2e710a4cca52 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -180,7 +180,7 @@ config USB_EHCI_MXC config USB_EHCI_HCD_OMAP tristate "EHCI support for OMAP3 and later chips" depends on ARCH_OMAP - select NOP_USB_XCEIV + depends on NOP_USB_XCEIV default y ---help--- Enables support for the on-chip EHCI controller on diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index ae1b6e69eb96..a962b89b65a6 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); + /** + * Protect the system from crashing at system shutdown in cases where + * usb host is not added yet from OTG controller driver. + * As ehci_setup() not done yet, so stop accessing registers or + * variables initialized in ehci_setup() + */ + if (!ehci->sbrn) + return; + spin_lock_irq(&ehci->lock); ehci->shutdown = true; ehci->rh_state = EHCI_RH_STOPPING; diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index ffc90295a95f..74f62d68f013 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -872,15 +872,23 @@ int ehci_hub_control( ) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); int ports = HCS_N_PORTS (ehci->hcs_params); - u32 __iomem *status_reg = &ehci->regs->port_status[ - (wIndex & 0xff) - 1]; - u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1]; + u32 __iomem *status_reg, *hostpc_reg; u32 temp, temp1, status; unsigned long flags; int retval = 0; unsigned selector; /* + * Avoid underflow while calculating (wIndex & 0xff) - 1. + * The compiler might deduce that wIndex can never be 0 and then + * optimize away the tests for !wIndex below. + */ + temp = wIndex & 0xff; + temp -= (temp > 0); + status_reg = &ehci->regs->port_status[temp]; + hostpc_reg = &ehci->regs->hostpc[temp]; + + /* * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. * HCS_INDICATOR may say we can change LEDs to off/amber/green. * (track current state ourselves) ... blink for diagnostics, diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index d3afc89d00f5..2f8d3af811ce 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c @@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev) static int ehci_msm_pm_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); bool do_wakeup = device_may_wakeup(dev); dev_dbg(dev, "ehci-msm PM suspend\n"); - return ehci_suspend(hcd, do_wakeup); + /* Only call ehci_suspend if ehci_setup has been done */ + if (ehci->sbrn) + return ehci_suspend(hcd, do_wakeup); + + return 0; } static int ehci_msm_pm_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); dev_dbg(dev, "ehci-msm PM resume\n"); - ehci_resume(hcd, false); + + /* Only call ehci_resume if ehci_setup has been done */ + if (ehci->sbrn) + ehci_resume(hcd, false); return 0; } + #else #define ehci_msm_pm_suspend NULL #define ehci_msm_pm_resume NULL diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 1757ebb471b6..6816b8c371d0 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -39,11 +39,12 @@ #define DRIVER_DESC "EHCI generic platform driver" #define EHCI_MAX_CLKS 3 +#define EHCI_MAX_RSTS 3 #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv) struct ehci_platform_priv { struct clk *clks[EHCI_MAX_CLKS]; - struct reset_control *rst; + struct reset_control *rsts[EHCI_MAX_RSTS]; struct phy **phys; int num_phys; bool reset_on_resume; @@ -149,7 +150,7 @@ static int ehci_platform_probe(struct platform_device *dev) struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev); struct ehci_platform_priv *priv; struct ehci_hcd *ehci; - int err, irq, phy_num, clk = 0; + int err, irq, phy_num, clk = 0, rst; if (usb_disabled()) return -ENODEV; @@ -234,16 +235,20 @@ static int ehci_platform_probe(struct platform_device *dev) } } - priv->rst = devm_reset_control_get_optional(&dev->dev, NULL); - if (IS_ERR(priv->rst)) { - err = PTR_ERR(priv->rst); - if (err == -EPROBE_DEFER) - goto err_put_clks; - priv->rst = NULL; - } else { - err = reset_control_deassert(priv->rst); + for (rst = 0; rst < EHCI_MAX_RSTS; rst++) { + priv->rsts[rst] = devm_reset_control_get_shared_by_index( + &dev->dev, rst); + if (IS_ERR(priv->rsts[rst])) { + err = PTR_ERR(priv->rsts[rst]); + if (err == -EPROBE_DEFER) + goto err_reset; + priv->rsts[rst] = NULL; + break; + } + + err = reset_control_deassert(priv->rsts[rst]); if (err) - goto err_put_clks; + goto err_reset; } if (pdata->big_endian_desc) @@ -300,8 +305,8 @@ err_power: if (pdata->power_off) pdata->power_off(dev); err_reset: - if (priv->rst) - reset_control_assert(priv->rst); + while (--rst >= 0) + reset_control_assert(priv->rsts[rst]); err_put_clks: while (--clk >= 0) clk_put(priv->clks[clk]); @@ -319,15 +324,15 @@ static int ehci_platform_remove(struct platform_device *dev) struct usb_hcd *hcd = platform_get_drvdata(dev); struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev); struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd); - int clk; + int clk, rst; usb_remove_hcd(hcd); if (pdata->power_off) pdata->power_off(dev); - if (priv->rst) - reset_control_assert(priv->rst); + for (rst = 0; rst < EHCI_MAX_RSTS && priv->rsts[rst]; rst++) + reset_control_assert(priv->rsts[rst]); for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++) clk_put(priv->clks[clk]); diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c index a94ed677d937..be4a2788fc58 100644 --- a/drivers/usb/host/ehci-st.c +++ b/drivers/usb/host/ehci-st.c @@ -206,7 +206,8 @@ static int st_ehci_platform_probe(struct platform_device *dev) priv->clk48 = NULL; } - priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); + priv->pwr = + devm_reset_control_get_optional_shared(&dev->dev, "power"); if (IS_ERR(priv->pwr)) { err = PTR_ERR(priv->pwr); if (err == -EPROBE_DEFER) @@ -214,7 +215,8 @@ static int st_ehci_platform_probe(struct platform_device *dev) priv->pwr = NULL; } - priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); + priv->rst = + devm_reset_control_get_optional_shared(&dev->dev, "softreset"); if (IS_ERR(priv->rst)) { err = PTR_ERR(priv->rst); if (err == -EPROBE_DEFER) diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 4031b372008e..9a3d7db5be57 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev) struct usb_hcd *hcd = platform_get_drvdata(pdev); struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; + bool has_utmi_pad_registers = false; phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); if (!phy_np) return -ENOENT; + if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) + has_utmi_pad_registers = true; + if (!usb1_reset_attempted) { struct reset_control *usb1_reset; - usb1_reset = of_reset_control_get(phy_np, "usb"); + if (!has_utmi_pad_registers) + usb1_reset = of_reset_control_get(phy_np, "utmi-pads"); + else + usb1_reset = tegra->rst; + if (IS_ERR(usb1_reset)) { dev_warn(&pdev->dev, "can't get utmi-pads reset from the PHY\n"); @@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev) reset_control_assert(usb1_reset); udelay(1); reset_control_deassert(usb1_reset); + + if (!has_utmi_pad_registers) + reset_control_put(usb1_reset); } - reset_control_put(usb1_reset); usb1_reset_attempted = true; } - if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) { + if (!has_utmi_pad_registers) { reset_control_assert(tegra->rst); udelay(1); reset_control_deassert(tegra->rst); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 0449235d4f22..1700908b84ef 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci) setup_timer(&ohci->io_watchdog, io_watchdog_func, (unsigned long) ohci); - set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20)); ohci->hcca = dma_alloc_coherent (hcd->self.controller, sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL); diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c index ae1c988da146..898b74086c12 100644 --- a/drivers/usb/host/ohci-platform.c +++ b/drivers/usb/host/ohci-platform.c @@ -33,11 +33,12 @@ #define DRIVER_DESC "OHCI generic platform driver" #define OHCI_MAX_CLKS 3 +#define OHCI_MAX_RESETS 2 #define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv) struct ohci_platform_priv { struct clk *clks[OHCI_MAX_CLKS]; - struct reset_control *rst; + struct reset_control *resets[OHCI_MAX_RESETS]; struct phy **phys; int num_phys; }; @@ -117,7 +118,7 @@ static int ohci_platform_probe(struct platform_device *dev) struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev); struct ohci_platform_priv *priv; struct ohci_hcd *ohci; - int err, irq, phy_num, clk = 0; + int err, irq, phy_num, clk = 0, rst = 0; if (usb_disabled()) return -ENODEV; @@ -195,19 +196,21 @@ static int ohci_platform_probe(struct platform_device *dev) break; } } - - } - - priv->rst = devm_reset_control_get_optional(&dev->dev, NULL); - if (IS_ERR(priv->rst)) { - err = PTR_ERR(priv->rst); - if (err == -EPROBE_DEFER) - goto err_put_clks; - priv->rst = NULL; - } else { - err = reset_control_deassert(priv->rst); - if (err) - goto err_put_clks; + for (rst = 0; rst < OHCI_MAX_RESETS; rst++) { + priv->resets[rst] = + devm_reset_control_get_shared_by_index( + &dev->dev, rst); + if (IS_ERR(priv->resets[rst])) { + err = PTR_ERR(priv->resets[rst]); + if (err == -EPROBE_DEFER) + goto err_reset; + priv->resets[rst] = NULL; + break; + } + err = reset_control_deassert(priv->resets[rst]); + if (err) + goto err_reset; + } } if (pdata->big_endian_desc) @@ -265,8 +268,8 @@ err_power: if (pdata->power_off) pdata->power_off(dev); err_reset: - if (priv->rst) - reset_control_assert(priv->rst); + while (--rst >= 0) + reset_control_assert(priv->resets[rst]); err_put_clks: while (--clk >= 0) clk_put(priv->clks[clk]); @@ -284,15 +287,15 @@ static int ohci_platform_remove(struct platform_device *dev) struct usb_hcd *hcd = platform_get_drvdata(dev); struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev); struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd); - int clk; + int clk, rst; usb_remove_hcd(hcd); if (pdata->power_off) pdata->power_off(dev); - if (priv->rst) - reset_control_assert(priv->rst); + for (rst = 0; rst < OHCI_MAX_RESETS && priv->resets[rst]; rst++) + reset_control_assert(priv->resets[rst]); for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++) clk_put(priv->clks[clk]); diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index d029bbe9eb36..641fed609911 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) { int branch; - ed->state = ED_OPER; ed->ed_prev = NULL; ed->ed_next = NULL; ed->hwNextED = 0; @@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) /* the HC may not see the schedule updates yet, but if it does * then they'll be properly ordered. */ + + ed->state = ED_OPER; return 0; } diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c index acf2eb2a5676..02816a1515a1 100644 --- a/drivers/usb/host/ohci-st.c +++ b/drivers/usb/host/ohci-st.c @@ -188,13 +188,15 @@ static int st_ohci_platform_probe(struct platform_device *dev) priv->clk48 = NULL; } - priv->pwr = devm_reset_control_get_optional(&dev->dev, "power"); + priv->pwr = + devm_reset_control_get_optional_shared(&dev->dev, "power"); if (IS_ERR(priv->pwr)) { err = PTR_ERR(priv->pwr); goto err_put_clks; } - priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset"); + priv->rst = + devm_reset_control_get_optional_shared(&dev->dev, "softreset"); if (IS_ERR(priv->rst)) { err = PTR_ERR(priv->rst); goto err_put_clks; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index bad0d1f9a41d..6afe32381209 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -37,7 +37,9 @@ * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, - unsigned int cycle_state, gfp_t flags) + unsigned int cycle_state, + unsigned int max_packet, + gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; @@ -53,6 +55,14 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, return NULL; } + if (max_packet) { + seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); + if (!seg->bounce_buf) { + dma_pool_free(xhci->segment_pool, seg->trbs, dma); + kfree(seg); + return NULL; + } + } /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) @@ -70,6 +80,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg->trbs = NULL; } + kfree(seg->bounce_buf); kfree(seg); } @@ -317,11 +328,11 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring, static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, unsigned int num_segs, unsigned int cycle_state, - enum xhci_ring_type type, gfp_t flags) + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_segment *prev; - prev = xhci_segment_alloc(xhci, cycle_state, flags); + prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); if (!prev) return -ENOMEM; num_segs--; @@ -330,7 +341,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, while (num_segs > 0) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, cycle_state, flags); + next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); if (!next) { prev = *first; while (prev) { @@ -360,7 +371,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, */ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, unsigned int cycle_state, - enum xhci_ring_type type, gfp_t flags) + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_ring *ring; int ret; @@ -370,13 +381,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, return NULL; ring->num_segs = num_segs; + ring->bounce_buf_len = max_packet; INIT_LIST_HEAD(&ring->td_list); ring->type = type; if (num_segs == 0) return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, - &ring->last_seg, num_segs, cycle_state, type, flags); + &ring->last_seg, num_segs, cycle_state, type, + max_packet, flags); if (ret) goto fail; @@ -470,7 +483,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, ring->num_segs : num_segs_needed; ret = xhci_alloc_segments_for_ring(xhci, &first, &last, - num_segs, ring->cycle_state, ring->type, flags); + num_segs, ring->cycle_state, ring->type, + ring->bounce_buf_len, flags); if (ret) return -ENOMEM; @@ -652,7 +666,8 @@ struct xhci_ring *xhci_stream_id_to_ring( */ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, - unsigned int num_streams, gfp_t mem_flags) + unsigned int num_streams, + unsigned int max_packet, gfp_t mem_flags) { struct xhci_stream_info *stream_info; u32 cur_stream; @@ -704,9 +719,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, * and add their segment DMA addresses to the radix tree. * Stream 0 is reserved. */ + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = - xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags); + xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, + mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; @@ -1003,7 +1020,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, } /* Allocate endpoint 0 ring */ - dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags); + dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); if (!dev->eps[0].ring) goto fail; @@ -1434,22 +1451,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, return -EINVAL; ring_type = usb_endpoint_type(&ep->desc); - /* Set up the endpoint ring */ - virt_dev->eps[ep_index].new_ring = - xhci_ring_alloc(xhci, 2, 1, ring_type, mem_flags); - if (!virt_dev->eps[ep_index].new_ring) { - /* Attempt to use the ring cache */ - if (virt_dev->num_rings_cached == 0) - return -ENOMEM; - virt_dev->num_rings_cached--; - virt_dev->eps[ep_index].new_ring = - virt_dev->ring_cache[virt_dev->num_rings_cached]; - virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; - xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, - 1, ring_type); - } - virt_dev->eps[ep_index].skip = false; - ep_ring = virt_dev->eps[ep_index].new_ring; /* * Get values to fill the endpoint context, mostly from ep descriptor. @@ -1479,6 +1480,23 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2)) mult = 0; + /* Set up the endpoint ring */ + virt_dev->eps[ep_index].new_ring = + xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); + if (!virt_dev->eps[ep_index].new_ring) { + /* Attempt to use the ring cache */ + if (virt_dev->num_rings_cached == 0) + return -ENOMEM; + virt_dev->num_rings_cached--; + virt_dev->eps[ep_index].new_ring = + virt_dev->ring_cache[virt_dev->num_rings_cached]; + virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; + xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, + 1, ring_type); + } + virt_dev->eps[ep_index].skip = false; + ep_ring = virt_dev->eps[ep_index].new_ring; + /* Fill the endpoint context */ ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | EP_INTERVAL(interval) | @@ -2409,7 +2427,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; /* Set up the command ring to have one segments for now. */ - xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -2454,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) */ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, - flags); + 0, flags); if (!xhci->event_ring) goto fail; if (xhci_check_trb_in_td_math(xhci) < 0) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 48672fac7ff3..c10972fcc8e4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -37,6 +37,7 @@ /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 #define PCI_VENDOR_ID_ETRON 0x1b6f @@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; } + if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) + xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 676ea458148b..ed56bf9ed885 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -18,7 +18,6 @@ #include <linux/platform_device.h> #include <linux/usb/phy.h> #include <linux/slab.h> -#include <linux/usb/xhci_pdriver.h> #include <linux/acpi.h> #include "xhci.h" @@ -138,8 +137,6 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match); static int xhci_plat_probe(struct platform_device *pdev) { - struct device_node *node = pdev->dev.of_node; - struct usb_xhci_pdata *pdata = dev_get_platdata(&pdev->dev); const struct of_device_id *match; const struct hc_driver *driver; struct xhci_hcd *xhci; @@ -196,10 +193,13 @@ static int xhci_plat_probe(struct platform_device *pdev) ret = clk_prepare_enable(clk); if (ret) goto put_hcd; + } else if (PTR_ERR(clk) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto put_hcd; } xhci = hcd_to_xhci(hcd); - match = of_match_node(usb_xhci_of_match, node); + match = of_match_node(usb_xhci_of_match, pdev->dev.of_node); if (match) { const struct xhci_plat_priv *priv_match = match->data; struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); @@ -220,8 +220,7 @@ static int xhci_plat_probe(struct platform_device *pdev) goto disable_clk; } - if ((node && of_property_read_bool(node, "usb3-lpm-capable")) || - (pdata && pdata->usb3_lpm_capable)) + if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable")) xhci->quirks |= XHCI_LPM_SUPPORT; if (HCC_MAX_PSA(xhci->hcc_params) >= 4) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 52deae4b7eac..918e0c739b79 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -66,6 +66,7 @@ #include <linux/scatterlist.h> #include <linux/slab.h> +#include <linux/dma-mapping.h> #include "xhci.h" #include "xhci-trace.h" #include "xhci-mtk.h" @@ -88,36 +89,25 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, return seg->dma + (segment_offset * sizeof(*trb)); } -/* Does this link TRB point to the first segment in a ring, - * or was the previous TRB the last TRB on the last segment in the ERST? - */ -static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring, - struct xhci_segment *seg, union xhci_trb *trb) +static bool trb_is_link(union xhci_trb *trb) { - if (ring == xhci->event_ring) - return (trb == &seg->trbs[TRBS_PER_SEGMENT]) && - (seg->next == xhci->event_ring->first_seg); - else - return le32_to_cpu(trb->link.control) & LINK_TOGGLE; + return TRB_TYPE_LINK_LE32(trb->link.control); } -/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring - * segment? I.e. would the updated event TRB pointer step off the end of the - * event seg? - */ -static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, - struct xhci_segment *seg, union xhci_trb *trb) +static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) { - if (ring == xhci->event_ring) - return trb == &seg->trbs[TRBS_PER_SEGMENT]; - else - return TRB_TYPE_LINK_LE32(trb->link.control); + return trb == &seg->trbs[TRBS_PER_SEGMENT - 1]; } -static int enqueue_is_link_trb(struct xhci_ring *ring) +static bool last_trb_on_ring(struct xhci_ring *ring, + struct xhci_segment *seg, union xhci_trb *trb) { - struct xhci_link_trb *link = &ring->enqueue->link; - return TRB_TYPE_LINK_LE32(link->control); + return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg); +} + +static bool link_trb_toggles_cycle(union xhci_trb *trb) +{ + return le32_to_cpu(trb->link.control) & LINK_TOGGLE; } /* Updates trb to point to the next TRB in the ring, and updates seg if the next @@ -129,7 +119,7 @@ static void next_trb(struct xhci_hcd *xhci, struct xhci_segment **seg, union xhci_trb **trb) { - if (last_trb(xhci, ring, *seg, *trb)) { + if (trb_is_link(*trb)) { *seg = (*seg)->next; *trb = ((*seg)->trbs); } else { @@ -145,32 +135,29 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) { ring->deq_updates++; - /* - * If this is not event ring, and the dequeue pointer - * is not on a link TRB, there is one more usable TRB - */ - if (ring->type != TYPE_EVENT && - !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) - ring->num_trbs_free++; - - do { - /* - * Update the dequeue pointer further if that was a link TRB or - * we're at the end of an event ring segment (which doesn't have - * link TRBS) - */ - if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { - if (ring->type == TYPE_EVENT && - last_trb_on_last_seg(xhci, ring, - ring->deq_seg, ring->dequeue)) { - ring->cycle_state ^= 1; - } - ring->deq_seg = ring->deq_seg->next; - ring->dequeue = ring->deq_seg->trbs; - } else { + /* event ring doesn't have link trbs, check for last trb */ + if (ring->type == TYPE_EVENT) { + if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) { ring->dequeue++; + return; } - } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); + if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue)) + ring->cycle_state ^= 1; + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + return; + } + + /* All other rings have link trbs */ + if (!trb_is_link(ring->dequeue)) { + ring->dequeue++; + ring->num_trbs_free++; + } + while (trb_is_link(ring->dequeue)) { + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } + return; } /* @@ -198,50 +185,42 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN; /* If this is not event ring, there is one less usable TRB */ - if (ring->type != TYPE_EVENT && - !last_trb(xhci, ring, ring->enq_seg, ring->enqueue)) + if (!trb_is_link(ring->enqueue)) ring->num_trbs_free--; next = ++(ring->enqueue); ring->enq_updates++; - /* Update the dequeue pointer further if that was a link TRB or we're at - * the end of an event ring segment (which doesn't have link TRBS) - */ - while (last_trb(xhci, ring, ring->enq_seg, next)) { - if (ring->type != TYPE_EVENT) { - /* - * If the caller doesn't plan on enqueueing more - * TDs before ringing the doorbell, then we - * don't want to give the link TRB to the - * hardware just yet. We'll give the link TRB - * back in prepare_ring() just before we enqueue - * the TD at the top of the ring. - */ - if (!chain && !more_trbs_coming) - break; + /* Update the dequeue pointer further if that was a link TRB */ + while (trb_is_link(next)) { - /* If we're not dealing with 0.95 hardware or - * isoc rings on AMD 0.96 host, - * carry over the chain bit of the previous TRB - * (which may mean the chain bit is cleared). - */ - if (!(ring->type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST)) - && !xhci_link_trb_quirk(xhci)) { - next->link.control &= - cpu_to_le32(~TRB_CHAIN); - next->link.control |= - cpu_to_le32(chain); - } - /* Give this link TRB to the hardware */ - wmb(); - next->link.control ^= cpu_to_le32(TRB_CYCLE); + /* + * If the caller doesn't plan on enqueueing more TDs before + * ringing the doorbell, then we don't want to give the link TRB + * to the hardware just yet. We'll give the link TRB back in + * prepare_ring() just before we enqueue the TD at the top of + * the ring. + */ + if (!chain && !more_trbs_coming) + break; - /* Toggle the cycle bit after the last ring segment. */ - if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { - ring->cycle_state ^= 1; - } + /* If we're not dealing with 0.95 hardware or isoc rings on + * AMD 0.96 host, carry over the chain bit of the previous TRB + * (which may mean the chain bit is cleared). + */ + if (!(ring->type == TYPE_ISOC && + (xhci->quirks & XHCI_AMD_0x96_HOST)) && + !xhci_link_trb_quirk(xhci)) { + next->link.control &= cpu_to_le32(~TRB_CHAIN); + next->link.control |= cpu_to_le32(chain); } + /* Give this link TRB to the hardware */ + wmb(); + next->link.control ^= cpu_to_le32(TRB_CYCLE); + + /* Toggle the cycle bit after the last ring segment. */ + if (link_trb_toggles_cycle(next)) + ring->cycle_state ^= 1; + ring->enq_seg = ring->enq_seg->next; ring->enqueue = ring->enq_seg->trbs; next = ring->enqueue; @@ -290,6 +269,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; + + /* + * Writing the CMD_RING_ABORT bit should cause a cmd completion event, + * however on some host hw the CMD_RING_RUNNING bit is correctly cleared + * but the completion event in never sent. Use the cmd timeout timer to + * handle those cases. Use twice the time to cover the bit polling retry + */ + mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT)); xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); @@ -314,6 +301,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) xhci_err(xhci, "Stopped the command ring failed, " "maybe the host is dead\n"); + del_timer(&xhci->cmd_timer); xhci->xhc_state |= XHCI_STATE_DYING; xhci_quiesce(xhci); xhci_halt(xhci); @@ -617,6 +605,31 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, } } +void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring, + struct xhci_td *td) +{ + struct device *dev = xhci_to_hcd(xhci)->self.controller; + struct xhci_segment *seg = td->bounce_seg; + struct urb *urb = td->urb; + + if (!seg || !urb) + return; + + if (usb_urb_dir_out(urb)) { + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_TO_DEVICE); + return; + } + + /* for in tranfers we need to copy the data from bounce to sg */ + sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, + DMA_FROM_DEVICE); + seg->bounce_len = 0; + seg->bounce_offs = 0; +} + /* * When we get a command completion for a Stop Endpoint Command, we need to * unlink any cancelled TDs from the ring. There are two ways to do that: @@ -736,6 +749,9 @@ remove_finished_td: /* Doesn't matter what we pass for status, since the core will * just overwrite it (because the URB has been unlinked). */ + ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); + if (ep_ring && cur_td->bounce_seg) + xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td); xhci_giveback_urb_in_irq(xhci, cur_td, 0); /* Stop processing the cancelled list if the watchdog timer is @@ -758,6 +774,9 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring) list_del_init(&cur_td->td_list); if (!list_empty(&cur_td->cancelled_td_list)) list_del_init(&cur_td->cancelled_td_list); + + if (cur_td->bounce_seg) + xhci_unmap_td_bounce_buffer(xhci, ring, cur_td); xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN); } } @@ -908,7 +927,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, * the dequeue pointer one segment further, or we'll jump off * the segment into la-la-land. */ - if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) { + if (trb_is_link(ep_ring->dequeue)) { ep_ring->deq_seg = ep_ring->deq_seg->next; ep_ring->dequeue = ep_ring->deq_seg->trbs; } @@ -917,8 +936,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci, /* We have more usable TRBs */ ep_ring->num_trbs_free++; ep_ring->dequeue++; - if (last_trb(xhci, ep_ring, ep_ring->deq_seg, - ep_ring->dequeue)) { + if (trb_is_link(ep_ring->dequeue)) { if (ep_ring->dequeue == dev->eps[ep_index].queued_deq_ptr) break; @@ -1246,22 +1264,21 @@ void xhci_handle_command_timeout(unsigned long data) int ret; unsigned long flags; u64 hw_ring_state; - struct xhci_command *cur_cmd = NULL; + bool second_timeout = false; xhci = (struct xhci_hcd *) data; /* mark this command to be cancelled */ spin_lock_irqsave(&xhci->lock, flags); if (xhci->current_cmd) { - cur_cmd = xhci->current_cmd; - cur_cmd->status = COMP_CMD_ABORT; + if (xhci->current_cmd->status == COMP_CMD_ABORT) + second_timeout = true; + xhci->current_cmd->status = COMP_CMD_ABORT; } - /* Make sure command ring is running before aborting it */ hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && (hw_ring_state & CMD_RING_RUNNING)) { - spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "Command timeout\n"); ret = xhci_abort_cmd_ring(xhci); @@ -1273,6 +1290,15 @@ void xhci_handle_command_timeout(unsigned long data) } return; } + + /* command ring failed to restart, or host removed. Bail out */ + if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) { + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "command timed out twice, ring start fail?\n"); + xhci_cleanup_command_queue(xhci); + return; + } + /* command timeout on stopped ring, ring can't be aborted */ xhci_dbg(xhci, "Command timeout on stopped ring\n"); xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); @@ -1848,6 +1874,10 @@ td_cleanup: urb = td->urb; urb_priv = urb->hcpriv; + /* if a bounce buffer was used to align this td then unmap it */ + if (td->bounce_seg) + xhci_unmap_td_bounce_buffer(xhci, ep_ring, td); + /* Do one last check of the actual transfer length. * If the host controller said we transferred more data than the buffer * length, urb->actual_length will be a very big number (since it's @@ -2721,7 +2751,8 @@ hw_died: writel(irq_pending, &xhci->ir_set->irq_pending); } - if (xhci->xhc_state & XHCI_STATE_DYING) { + if (xhci->xhc_state & XHCI_STATE_DYING || + xhci->xhc_state & XHCI_STATE_HALTED) { xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " "Shouldn't IRQs be disabled?\n"); /* Clear the event handler busy flag (RW1C); @@ -2847,36 +2878,29 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } } - if (enqueue_is_link_trb(ep_ring)) { - struct xhci_ring *ring = ep_ring; - union xhci_trb *next; - - next = ring->enqueue; + while (trb_is_link(ep_ring->enqueue)) { + /* If we're not dealing with 0.95 hardware or isoc rings + * on AMD 0.96 host, clear the chain bit. + */ + if (!xhci_link_trb_quirk(xhci) && + !(ep_ring->type == TYPE_ISOC && + (xhci->quirks & XHCI_AMD_0x96_HOST))) + ep_ring->enqueue->link.control &= + cpu_to_le32(~TRB_CHAIN); + else + ep_ring->enqueue->link.control |= + cpu_to_le32(TRB_CHAIN); - while (last_trb(xhci, ring, ring->enq_seg, next)) { - /* If we're not dealing with 0.95 hardware or isoc rings - * on AMD 0.96 host, clear the chain bit. - */ - if (!xhci_link_trb_quirk(xhci) && - !(ring->type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST))) - next->link.control &= cpu_to_le32(~TRB_CHAIN); - else - next->link.control |= cpu_to_le32(TRB_CHAIN); + wmb(); + ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); - wmb(); - next->link.control ^= cpu_to_le32(TRB_CYCLE); + /* Toggle the cycle bit after the last ring segment. */ + if (link_trb_toggles_cycle(ep_ring->enqueue)) + ep_ring->cycle_state ^= 1; - /* Toggle the cycle bit after the last ring segment. */ - if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) { - ring->cycle_state ^= 1; - } - ring->enq_seg = ring->enq_seg->next; - ring->enqueue = ring->enq_seg->trbs; - next = ring->enqueue; - } + ep_ring->enq_seg = ep_ring->enq_seg->next; + ep_ring->enqueue = ep_ring->enq_seg->trbs; } - return 0; } @@ -3074,7 +3098,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, */ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, int trb_buff_len, unsigned int td_total_len, - struct urb *urb, unsigned int num_trbs_left) + struct urb *urb, bool more_trbs_coming) { u32 maxp, total_packet_count; @@ -3083,7 +3107,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, return ((td_total_len - transferred) >> 10); /* One TRB with a zero-length data packet. */ - if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) || + if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) || trb_buff_len == td_total_len) return 0; @@ -3098,37 +3122,103 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, return (total_packet_count - ((transferred + trb_buff_len) / maxp)); } + +static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, + u32 *trb_buff_len, struct xhci_segment *seg) +{ + struct device *dev = xhci_to_hcd(xhci)->self.controller; + unsigned int unalign; + unsigned int max_pkt; + u32 new_buff_len; + + max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc)); + unalign = (enqd_len + *trb_buff_len) % max_pkt; + + /* we got lucky, last normal TRB data on segment is packet aligned */ + if (unalign == 0) + return 0; + + xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n", + unalign, *trb_buff_len); + + /* is the last nornal TRB alignable by splitting it */ + if (*trb_buff_len > unalign) { + *trb_buff_len -= unalign; + xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len); + return 0; + } + + /* + * We want enqd_len + trb_buff_len to sum up to a number aligned to + * number which is divisible by the endpoint's wMaxPacketSize. IOW: + * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0. + */ + new_buff_len = max_pkt - (enqd_len % max_pkt); + + if (new_buff_len > (urb->transfer_buffer_length - enqd_len)) + new_buff_len = (urb->transfer_buffer_length - enqd_len); + + /* create a max max_pkt sized bounce buffer pointed to by last trb */ + if (usb_urb_dir_out(urb)) { + sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_TO_DEVICE); + } else { + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, + max_pkt, DMA_FROM_DEVICE); + } + + if (dma_mapping_error(dev, seg->bounce_dma)) { + /* try without aligning. Some host controllers survive */ + xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); + return 0; + } + *trb_buff_len = new_buff_len; + seg->bounce_len = new_buff_len; + seg->bounce_offs = enqd_len; + + xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len); + + return 1; +} + /* This is very similar to what ehci-q.c qtd_fill() does */ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) { - struct xhci_ring *ep_ring; + struct xhci_ring *ring; struct urb_priv *urb_priv; struct xhci_td *td; struct xhci_generic_trb *start_trb; struct scatterlist *sg = NULL; - bool more_trbs_coming; - bool zero_length_needed; - unsigned int num_trbs, last_trb_num, i; + bool more_trbs_coming = true; + bool need_zero_pkt = false; + bool first_trb = true; + unsigned int num_trbs; unsigned int start_cycle, num_sgs = 0; - unsigned int running_total, block_len, trb_buff_len; - unsigned int full_len; - int ret; + unsigned int enqd_len, block_len, trb_buff_len, full_len; + int sent_len, ret; u32 field, length_field, remainder; - u64 addr; + u64 addr, send_addr; - ep_ring = xhci_urb_to_transfer_ring(xhci, urb); - if (!ep_ring) + ring = xhci_urb_to_transfer_ring(xhci, urb); + if (!ring) return -EINVAL; + full_len = urb->transfer_buffer_length; /* If we have scatter/gather list, we use it. */ if (urb->num_sgs) { num_sgs = urb->num_mapped_sgs; sg = urb->sg; + addr = (u64) sg_dma_address(sg); + block_len = sg_dma_len(sg); num_trbs = count_sg_trbs_needed(urb); - } else + } else { num_trbs = count_trbs_needed(urb); - + addr = (u64) urb->transfer_dma; + block_len = full_len; + } ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, urb->stream_id, num_trbs, urb, 0, mem_flags); @@ -3137,20 +3227,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; - last_trb_num = num_trbs - 1; - /* Deal with URB_ZERO_PACKET - need one more td/trb */ - zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET && - urb_priv->length == 2; - if (zero_length_needed) { - num_trbs++; - xhci_dbg(xhci, "Creating zero length td.\n"); - ret = prepare_transfer(xhci, xhci->devs[slot_id], - ep_index, urb->stream_id, - 1, urb, 1, mem_flags); - if (unlikely(ret < 0)) - return ret; - } + if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1) + need_zero_pkt = true; td = urb_priv->td[0]; @@ -3159,102 +3238,97 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, * until we've finished creating all the other TRBs. The ring's cycle * state may change as we enqueue the other TRBs, so save it too. */ - start_trb = &ep_ring->enqueue->generic; - start_cycle = ep_ring->cycle_state; - - full_len = urb->transfer_buffer_length; - running_total = 0; - block_len = 0; + start_trb = &ring->enqueue->generic; + start_cycle = ring->cycle_state; + send_addr = addr; /* Queue the TRBs, even if they are zero-length */ - for (i = 0; i < num_trbs; i++) { + for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) { field = TRB_TYPE(TRB_NORMAL); - if (block_len == 0) { - /* A new contiguous block. */ - if (sg) { - addr = (u64) sg_dma_address(sg); - block_len = sg_dma_len(sg); - } else { - addr = (u64) urb->transfer_dma; - block_len = full_len; - } - /* TRB buffer should not cross 64KB boundaries */ - trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); - trb_buff_len = min_t(unsigned int, - trb_buff_len, - block_len); - } else { - /* Further through the contiguous block. */ - trb_buff_len = block_len; - if (trb_buff_len > TRB_MAX_BUFF_SIZE) - trb_buff_len = TRB_MAX_BUFF_SIZE; - } + /* TRB buffer should not cross 64KB boundaries */ + trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr); + trb_buff_len = min_t(unsigned int, trb_buff_len, block_len); - if (running_total + trb_buff_len > full_len) - trb_buff_len = full_len - running_total; + if (enqd_len + trb_buff_len > full_len) + trb_buff_len = full_len - enqd_len; /* Don't change the cycle bit of the first TRB until later */ - if (i == 0) { + if (first_trb) { + first_trb = false; if (start_cycle == 0) field |= TRB_CYCLE; } else - field |= ep_ring->cycle_state; + field |= ring->cycle_state; /* Chain all the TRBs together; clear the chain bit in the last * TRB to indicate it's the last TRB in the chain. */ - if (i < last_trb_num) { + if (enqd_len + trb_buff_len < full_len) { field |= TRB_CHAIN; - } else { - field |= TRB_IOC; - if (i == last_trb_num) - td->last_trb = ep_ring->enqueue; - else if (zero_length_needed) { - trb_buff_len = 0; - urb_priv->td[1]->last_trb = ep_ring->enqueue; + if (trb_is_link(ring->enqueue + 1)) { + if (xhci_align_td(xhci, urb, enqd_len, + &trb_buff_len, + ring->enq_seg)) { + send_addr = ring->enq_seg->bounce_dma; + /* assuming TD won't span 2 segs */ + td->bounce_seg = ring->enq_seg; + } } } + if (enqd_len + trb_buff_len >= full_len) { + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + td->last_trb = ring->enqueue; + } /* Only set interrupt on short packet for IN endpoints */ if (usb_urb_dir_in(urb)) field |= TRB_ISP; /* Set the TRB length, TD size, and interrupter fields. */ - remainder = xhci_td_remainder(xhci, running_total, - trb_buff_len, full_len, - urb, num_trbs - i - 1); + remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len, + full_len, urb, more_trbs_coming); length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); - if (i < num_trbs - 1) - more_trbs_coming = true; - else - more_trbs_coming = false; - queue_trb(xhci, ep_ring, more_trbs_coming, - lower_32_bits(addr), - upper_32_bits(addr), + queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, + lower_32_bits(send_addr), + upper_32_bits(send_addr), length_field, field); - running_total += trb_buff_len; addr += trb_buff_len; - block_len -= trb_buff_len; - - if (sg) { - if (block_len == 0) { - /* New sg entry */ - --num_sgs; - if (num_sgs == 0) - break; + sent_len = trb_buff_len; + + while (sg && sent_len >= block_len) { + /* New sg entry */ + --num_sgs; + sent_len -= block_len; + if (num_sgs != 0) { sg = sg_next(sg); + block_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + addr += sent_len; } } + block_len -= sent_len; + send_addr = addr; + } + + if (need_zero_pkt) { + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, + 1, urb, 1, mem_flags); + urb_priv->td[1]->last_trb = ring->enqueue; + field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; + queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); } - check_trb_math(urb, running_total); + check_trb_math(urb, enqd_len); giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, start_cycle, start_trb); return 0; @@ -3648,7 +3722,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* Set the TRB length, TD size, & interrupter fields. */ remainder = xhci_td_remainder(xhci, running_total, trb_buff_len, td_len, - urb, trbs_per_td - j - 1); + urb, more_trbs_coming); length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index fa7e1ef36cd9..01d96c9b3a75 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) xhci->comp_mode_recovery_timer.expires = jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); - set_timer_slack(&xhci->comp_mode_recovery_timer, - msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); add_timer(&xhci->comp_mode_recovery_timer); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Compliance mode recovery timer initialized"); @@ -685,20 +683,23 @@ void xhci_stop(struct usb_hcd *hcd) u32 temp; struct xhci_hcd *xhci = hcd_to_xhci(hcd); - if (xhci->xhc_state & XHCI_STATE_HALTED) - return; - mutex_lock(&xhci->mutex); - spin_lock_irq(&xhci->lock); - xhci->xhc_state |= XHCI_STATE_HALTED; - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - /* Make sure the xHC is halted for a USB3 roothub - * (xhci_stop() could be called as part of failed init). - */ - xhci_halt(xhci); - xhci_reset(xhci); - spin_unlock_irq(&xhci->lock); + if (!(xhci->xhc_state & XHCI_STATE_HALTED)) { + spin_lock_irq(&xhci->lock); + + xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + xhci_halt(xhci); + xhci_reset(xhci); + + spin_unlock_irq(&xhci->lock); + } + + if (!usb_hcd_is_primary_hcd(hcd)) { + mutex_unlock(&xhci->mutex); + return; + } xhci_cleanup_msix(xhci); @@ -3136,6 +3137,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_input_control_ctx *ctrl_ctx; unsigned int ep_index; unsigned int num_stream_ctxs; + unsigned int max_packet; unsigned long flags; u32 changed_ep_bitmask = 0; @@ -3209,9 +3211,11 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, for (i = 0; i < num_eps; i++) { ep_index = xhci_get_endpoint_index(&eps[i]->desc); + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc)); vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, num_stream_ctxs, - num_streams, mem_flags); + num_streams, + max_packet, mem_flags); if (!vdev->eps[ep_index].stream_info) goto cleanup; /* Set maxPstreams in endpoint context and update deq ptr to @@ -4886,7 +4890,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); xhci_print_registers(xhci); - xhci->quirks = quirks; + xhci->quirks |= quirks; get_quirks(dev, xhci); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index b0b8d0f8791a..b2c1dc5dc0f3 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1347,6 +1347,11 @@ struct xhci_segment { /* private to HCD */ struct xhci_segment *next; dma_addr_t dma; + /* Max packet sized bounce buffer for td-fragmant alignment */ + dma_addr_t bounce_dma; + void *bounce_buf; + unsigned int bounce_offs; + unsigned int bounce_len; }; struct xhci_td { @@ -1356,6 +1361,7 @@ struct xhci_td { struct xhci_segment *start_seg; union xhci_trb *first_trb; union xhci_trb *last_trb; + struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; }; @@ -1405,6 +1411,7 @@ struct xhci_ring { unsigned int num_segs; unsigned int num_trbs_free; unsigned int num_trbs_free_temp; + unsigned int bounce_buf_len; enum xhci_ring_type type; bool last_td_was_short; struct radix_tree_root *trb_address_map; @@ -1807,7 +1814,8 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, unsigned int ep_index); struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, - unsigned int num_streams, gfp_t flags); + unsigned int num_streams, + unsigned int max_packet, gfp_t flags); void xhci_free_stream_info(struct xhci_hcd *xhci, struct xhci_stream_info *stream_info); void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, |