summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c235
1 files changed, 168 insertions, 67 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c1ce5a..cfc1ad92473f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -380,10 +380,8 @@ static struct xhci_segment *find_trb_seg(
while (cur_seg->trbs > trb ||
&cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
- if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
- TRB_TYPE(TRB_LINK) &&
- (generic_trb->field[3] & LINK_TOGGLE))
- *cycle_state = ~(*cycle_state) & 0x1;
+ if (generic_trb->field[3] & LINK_TOGGLE)
+ *cycle_state ^= 0x1;
cur_seg = cur_seg->next;
if (cur_seg == start_seg)
/* Looped over the entire list. Oops! */
@@ -474,8 +472,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,24 +487,37 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
(trb->field[3] & LINK_TOGGLE))
- state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
+ state->new_cycle_state ^= 0x1;
next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
+ /*
+ * If there is only one segment in a ring, find_trb_seg()'s while loop
+ * will not run, and it will return before it has a chance to see if it
+ * needs to toggle the cycle bit. It can't tell if the stalled transfer
+ * ended just before the link TRB on a one-segment ring, or if the TD
+ * wrapped around the top of the ring, because it doesn't have the TD in
+ * question. Look for the one-segment case where stalled TRB's address
+ * is greater than the new dequeue pointer address.
+ */
+ if (ep_ring->first_seg == ep_ring->first_seg->next &&
+ state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
+ state->new_cycle_state ^= 0x1;
+ xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
+
/* Don't update the ring cycle state for the producer (us). */
xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
(unsigned long long) addr);
- xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
- ep_ring->dequeue = state->new_deq_ptr;
- ep_ring->deq_seg = state->new_deq_seg;
}
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
@@ -594,13 +608,14 @@ static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
struct xhci_td *cur_td, int status, char *adjective)
{
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *hcd;
struct urb *urb;
struct urb_priv *urb_priv;
urb = cur_td->urb;
urb_priv = urb->hcpriv;
urb_priv->td_cnt++;
+ hcd = bus_to_hcd(urb->dev->bus);
/* Only giveback urb when this is the last td in urb */
if (urb_priv->td_cnt == urb_priv->length) {
@@ -819,8 +834,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
if (ret < 0) {
/* This is bad; the host is not responding to commands and it's
* not allowing itself to be halted. At least interrupts are
- * disabled, so we can set HC_STATE_HALT and notify the
- * USB core. But if we call usb_hc_died(), it will attempt to
+ * disabled. If we call usb_hc_died(), it will attempt to
* disconnect all device drivers under this host. Those
* disconnect() methods will wait for all URBs to be unlinked,
* so we must complete them.
@@ -865,9 +879,8 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
}
}
spin_unlock(&xhci->lock);
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
xhci_dbg(xhci, "Calling usb_hc_died()\n");
- usb_hc_died(xhci_to_hcd(xhci));
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
xhci_dbg(xhci, "xHCI host controller is dead.\n");
}
@@ -946,9 +959,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
} else {
xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
ep_ctx->deq);
+ if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
+ dev->eps[ep_index].queued_deq_ptr) ==
+ (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) {
+ /* Update the ring's dequeue segment and dequeue pointer
+ * to reflect the new position.
+ */
+ ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
+ ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
+ } else {
+ xhci_warn(xhci, "Mismatch between completed Set TR Deq "
+ "Ptr command & xHCI internal state.\n");
+ xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
+ dev->eps[ep_index].queued_deq_seg,
+ dev->eps[ep_index].queued_deq_ptr);
+ }
}
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
+ dev->eps[ep_index].queued_deq_seg = NULL;
+ dev->eps[ep_index].queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
@@ -1113,7 +1143,6 @@ bandwidth_change:
handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
case TRB_TYPE(TRB_CMD_NOOP):
- ++xhci->noops_handled;
break;
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
@@ -1157,15 +1186,55 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
handle_cmd_completion(xhci, &event->event_cmd);
}
+/* @port_id: the one-based port ID from the hardware (indexed from array of all
+ * port registers -- USB 3.0 and USB 2.0).
+ *
+ * Returns a zero-based port number, which is suitable for indexing into each of
+ * the split roothubs' port arrays and bus state arrays.
+ */
+static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
+ struct xhci_hcd *xhci, u32 port_id)
+{
+ unsigned int i;
+ unsigned int num_similar_speed_ports = 0;
+
+ /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
+ * and usb2_ports are 0-based indexes. Count the number of similar
+ * speed ports, up to 1 port before this port.
+ */
+ for (i = 0; i < (port_id - 1); i++) {
+ u8 port_speed = xhci->port_array[i];
+
+ /*
+ * Skip ports that don't have known speeds, or have duplicate
+ * Extended Capabilities port speed entries.
+ */
+ if (port_speed == 0 || port_speed == -1)
+ continue;
+
+ /*
+ * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
+ * 1.1 ports are under the USB 2.0 hub. If the port speed
+ * matches the device speed, it's a similar speed port.
+ */
+ if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
+ num_similar_speed_ports++;
+ }
+ return num_similar_speed_ports;
+}
+
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *hcd;
u32 port_id;
u32 temp, temp1;
- u32 __iomem *addr;
- int ports;
+ int max_ports;
int slot_id;
+ unsigned int faked_port_index;
+ u8 major_revision;
+ struct xhci_bus_state *bus_state;
+ u32 __iomem **port_array;
/* Port status change events always have a successful completion code */
if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
@@ -1175,14 +1244,50 @@ static void handle_port_status(struct xhci_hcd *xhci,
port_id = GET_PORT_ID(event->generic.field[0]);
xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
- ports = HCS_MAX_PORTS(xhci->hcs_params1);
- if ((port_id <= 0) || (port_id > ports)) {
+ max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Invalid port id %d\n", port_id);
goto cleanup;
}
- addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
- temp = xhci_readl(xhci, addr);
+ /* Figure out which usb_hcd this port is attached to:
+ * is it a USB 3.0 port or a USB 2.0/1.1 port?
+ */
+ major_revision = xhci->port_array[port_id - 1];
+ if (major_revision == 0) {
+ xhci_warn(xhci, "Event for port %u not in "
+ "Extended Capabilities, ignoring.\n",
+ port_id);
+ goto cleanup;
+ }
+ if (major_revision == (u8) -1) {
+ xhci_warn(xhci, "Event for port %u duplicated in"
+ "Extended Capabilities, ignoring.\n",
+ port_id);
+ goto cleanup;
+ }
+
+ /*
+ * Hardware port IDs reported by a Port Status Change Event include USB
+ * 3.0 and USB 2.0 ports. We want to check if the port has reported a
+ * resume event, but we first need to translate the hardware port ID
+ * into the index into the ports on the correct split roothub, and the
+ * correct bus_state structure.
+ */
+ /* Find the right roothub. */
+ hcd = xhci_to_hcd(xhci);
+ if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
+ hcd = xhci->shared_hcd;
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+ if (hcd->speed == HCD_USB3)
+ port_array = xhci->usb3_ports;
+ else
+ port_array = xhci->usb2_ports;
+ /* Find the faked port hub number */
+ faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
+ port_id);
+
+ temp = xhci_readl(xhci, port_array[faked_port_index]);
if (hcd->state == HC_STATE_SUSPENDED) {
xhci_dbg(xhci, "resume root hub\n");
usb_hcd_resume_root_hub(hcd);
@@ -1202,8 +1307,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
temp = xhci_port_state_to_neutral(temp);
temp &= ~PORT_PLS_MASK;
temp |= PORT_LINK_STROBE | XDEV_U0;
- xhci_writel(xhci, temp, addr);
- slot_id = xhci_find_slot_id_by_port(xhci, port_id);
+ xhci_writel(xhci, temp, port_array[faked_port_index]);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ faked_port_index);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto cleanup;
@@ -1211,16 +1317,16 @@ static void handle_port_status(struct xhci_hcd *xhci,
xhci_ring_device(xhci, slot_id);
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
/* Clear PORT_PLC */
- temp = xhci_readl(xhci, addr);
+ temp = xhci_readl(xhci, port_array[faked_port_index]);
temp = xhci_port_state_to_neutral(temp);
temp |= PORT_PLC;
- xhci_writel(xhci, temp, addr);
+ xhci_writel(xhci, temp, port_array[faked_port_index]);
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
- xhci->resume_done[port_id - 1] = jiffies +
+ bus_state->resume_done[faked_port_index] = jiffies +
msecs_to_jiffies(20);
mod_timer(&hcd->rh_timer,
- xhci->resume_done[port_id - 1]);
+ bus_state->resume_done[faked_port_index]);
/* Do the rest in GetPortStatus */
}
}
@@ -1231,7 +1337,7 @@ cleanup:
spin_unlock(&xhci->lock);
/* Pass this up to the core */
- usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
+ usb_hcd_poll_rh_status(hcd);
spin_lock(&xhci->lock);
}
@@ -1985,12 +2091,12 @@ cleanup:
trb_comp_code != COMP_BABBLE))
xhci_urb_free_priv(xhci, urb_priv);
- usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
+ usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
xhci_dbg(xhci, "Giveback URB %p, len = %d, "
"status = %d\n",
urb, urb->actual_length, status);
spin_unlock(&xhci->lock);
- usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
+ usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
spin_lock(&xhci->lock);
}
@@ -2114,7 +2220,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
xhci_warn(xhci, "WARNING: Host System Error\n");
xhci_halt(xhci);
hw_died:
- xhci_to_hcd(xhci)->state = HC_STATE_HALT;
spin_unlock(&xhci->lock);
return -ESHUTDOWN;
}
@@ -2182,8 +2287,12 @@ hw_died:
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
irqreturn_t ret;
+ struct xhci_hcd *xhci;
+ xhci = hcd_to_xhci(hcd);
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+ if (xhci->shared_hcd)
+ set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
ret = xhci_irq(hcd);
@@ -2327,7 +2436,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
INIT_LIST_HEAD(&td->cancelled_td_list);
if (td_index == 0) {
- ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
+ ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
if (unlikely(ret)) {
xhci_urb_free_priv(xhci, urb_priv);
urb->hcpriv = NULL;
@@ -2363,12 +2472,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -2394,11 +2504,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2643,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2681,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2725,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -2656,7 +2765,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -2700,8 +2810,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@@ -2879,8 +2989,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
@@ -3125,24 +3235,6 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
return 0;
}
-/* Queue a no-op command on the command ring */
-static int queue_cmd_noop(struct xhci_hcd *xhci)
-{
- return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
-}
-
-/*
- * Place a no-op command on the command ring to test the command and
- * event ring.
- */
-void *xhci_setup_one_noop(struct xhci_hcd *xhci)
-{
- if (queue_cmd_noop(xhci) < 0)
- return NULL;
- xhci->noops_submitted++;
- return xhci_ring_cmd_db;
-}
-
/* Queue a slot enable or disable request on the command ring */
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
@@ -3223,6 +3315,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
u32 type = TRB_TYPE(TRB_SET_DEQ);
+ struct xhci_virt_ep *ep;
addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
if (addr == 0) {
@@ -3231,6 +3324,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
deq_seg, deq_ptr);
return 0;
}
+ ep = &xhci->devs[slot_id]->eps[ep_index];
+ if ((ep->ep_state & SET_DEQ_PENDING)) {
+ xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
+ xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
+ return 0;
+ }
+ ep->queued_deq_seg = deq_seg;
+ ep->queued_deq_ptr = deq_ptr;
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
upper_32_bits(addr), trb_stream_id,
trb_slot_id | trb_ep_index | type, false);