diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 587 |
1 files changed, 222 insertions, 365 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7a0e3c720c00..d67ff71209f5 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -69,10 +69,6 @@ #include "xhci.h" #include "xhci-trace.h" -static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, - struct xhci_virt_device *virt_dev, - struct xhci_event_cmd *event); - /* * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA * address of the TRB. @@ -123,16 +119,6 @@ static int enqueue_is_link_trb(struct xhci_ring *ring) return TRB_TYPE_LINK_LE32(link->control); } -union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring) -{ - /* Enqueue pointer can be left pointing to the link TRB, - * we must handle that - */ - if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control)) - return ring->enq_seg->next->trbs; - return ring->enqueue; -} - /* Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. @@ -301,17 +287,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) xhci_dbg(xhci, "Abort command ring\n"); - if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) { - xhci_dbg(xhci, "The command ring isn't running, " - "Have the command ring been stopped?\n"); - return 0; - } - temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - if (!(temp_64 & CMD_RING_RUNNING)) { - xhci_dbg(xhci, "Command ring had been stopped\n"); - return 0; - } xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring); @@ -337,71 +313,6 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) return 0; } -static int xhci_queue_cd(struct xhci_hcd *xhci, - struct xhci_command *command, - union xhci_trb *cmd_trb) -{ - struct xhci_cd *cd; - cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC); - if (!cd) - return -ENOMEM; - INIT_LIST_HEAD(&cd->cancel_cmd_list); - - cd->command = command; - cd->cmd_trb = cmd_trb; - list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list); - - return 0; -} - -/* - * Cancel the command which has issue. - * - * Some commands may hang due to waiting for acknowledgement from - * usb device. It is outside of the xHC's ability to control and - * will cause the command ring is blocked. When it occurs software - * should intervene to recover the command ring. - * See Section 4.6.1.1 and 4.6.1.2 - */ -int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, - union xhci_trb *cmd_trb) -{ - int retval = 0; - unsigned long flags; - - spin_lock_irqsave(&xhci->lock, flags); - - if (xhci->xhc_state & XHCI_STATE_DYING) { - xhci_warn(xhci, "Abort the command ring," - " but the xHCI is dead.\n"); - retval = -ESHUTDOWN; - goto fail; - } - - /* queue the cmd desriptor to cancel_cmd_list */ - retval = xhci_queue_cd(xhci, command, cmd_trb); - if (retval) { - xhci_warn(xhci, "Queuing command descriptor failed.\n"); - goto fail; - } - - /* abort command ring */ - retval = xhci_abort_cmd_ring(xhci); - if (retval) { - xhci_err(xhci, "Abort command ring failed\n"); - if (unlikely(retval == -ESHUTDOWN)) { - spin_unlock_irqrestore(&xhci->lock, flags); - usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); - xhci_dbg(xhci, "xHCI host controller is dead.\n"); - return retval; - } - } - -fail: - spin_unlock_irqrestore(&xhci->lock, flags); - return retval; -} - void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, @@ -684,12 +595,14 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } } -static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, +static int queue_set_tr_deq(struct xhci_hcd *xhci, + struct xhci_command *cmd, int slot_id, unsigned int ep_index, unsigned int stream_id, struct xhci_segment *deq_seg, union xhci_trb *deq_ptr, u32 cycle_state); void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, + struct xhci_command *cmd, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id, struct xhci_dequeue_state *deq_state) @@ -704,7 +617,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, deq_state->new_deq_ptr, (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), deq_state->new_cycle_state); - queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, + queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id, deq_state->new_deq_seg, deq_state->new_deq_ptr, (u32) deq_state->new_cycle_state); @@ -773,7 +686,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, union xhci_trb *trb, struct xhci_event_cmd *event) { unsigned int ep_index; - struct xhci_virt_device *virt_dev; struct xhci_ring *ep_ring; struct xhci_virt_ep *ep; struct list_head *entry; @@ -783,11 +695,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, struct xhci_dequeue_state deq_state; if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { - virt_dev = xhci->devs[slot_id]; - if (virt_dev) - handle_cmd_in_cmd_wait_list(xhci, virt_dev, - event); - else + if (!xhci->devs[slot_id]) xhci_warn(xhci, "Stop endpoint command " "completion for disabled slot %u\n", slot_id); @@ -858,7 +766,9 @@ remove_finished_td: /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { - xhci_queue_new_dequeue_state(xhci, + struct xhci_command *command; + command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); + xhci_queue_new_dequeue_state(xhci, command, slot_id, ep_index, ep->stopped_td->urb->stream_id, &deq_state); @@ -1206,9 +1116,11 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, * because the HW can't handle two commands being queued in a row. */ if (xhci->quirks & XHCI_RESET_EP_QUIRK) { + struct xhci_command *command; + command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Queueing configure endpoint command"); - xhci_queue_configure_endpoint(xhci, + xhci_queue_configure_endpoint(xhci, command, xhci->devs[slot_id]->in_ctx->dma, slot_id, false); xhci_ring_cmd_db(xhci); @@ -1219,187 +1131,6 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, } } -/* Complete the command and detele it from the devcie's command queue. - */ -static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, - struct xhci_command *command, u32 status) -{ - command->status = status; - list_del(&command->cmd_list); - if (command->completion) - complete(command->completion); - else - xhci_free_command(xhci, command); -} - - -/* Check to see if a command in the device's command queue matches this one. - * Signal the completion or free the command, and return 1. Return 0 if the - * completed command isn't at the head of the command list. - */ -static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, - struct xhci_virt_device *virt_dev, - struct xhci_event_cmd *event) -{ - struct xhci_command *command; - - if (list_empty(&virt_dev->cmd_list)) - return 0; - - command = list_entry(virt_dev->cmd_list.next, - struct xhci_command, cmd_list); - if (xhci->cmd_ring->dequeue != command->command_trb) - return 0; - - xhci_complete_cmd_in_cmd_wait_list(xhci, command, - GET_COMP_CODE(le32_to_cpu(event->status))); - return 1; -} - -/* - * Finding the command trb need to be cancelled and modifying it to - * NO OP command. And if the command is in device's command wait - * list, finishing and freeing it. - * - * If we can't find the command trb, we think it had already been - * executed. - */ -static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd) -{ - struct xhci_segment *cur_seg; - union xhci_trb *cmd_trb; - u32 cycle_state; - - if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) - return; - - /* find the current segment of command ring */ - cur_seg = find_trb_seg(xhci->cmd_ring->first_seg, - xhci->cmd_ring->dequeue, &cycle_state); - - if (!cur_seg) { - xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n", - xhci->cmd_ring->dequeue, - (unsigned long long) - xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue)); - xhci_debug_ring(xhci, xhci->cmd_ring); - xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); - return; - } - - /* find the command trb matched by cd from command ring */ - for (cmd_trb = xhci->cmd_ring->dequeue; - cmd_trb != xhci->cmd_ring->enqueue; - next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) { - /* If the trb is link trb, continue */ - if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3])) - continue; - - if (cur_cd->cmd_trb == cmd_trb) { - - /* If the command in device's command list, we should - * finish it and free the command structure. - */ - if (cur_cd->command) - xhci_complete_cmd_in_cmd_wait_list(xhci, - cur_cd->command, COMP_CMD_STOP); - - /* get cycle state from the origin command trb */ - cycle_state = le32_to_cpu(cmd_trb->generic.field[3]) - & TRB_CYCLE; - - /* modify the command trb to NO OP command */ - cmd_trb->generic.field[0] = 0; - cmd_trb->generic.field[1] = 0; - cmd_trb->generic.field[2] = 0; - cmd_trb->generic.field[3] = cpu_to_le32( - TRB_TYPE(TRB_CMD_NOOP) | cycle_state); - break; - } - } -} - -static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci) -{ - struct xhci_cd *cur_cd, *next_cd; - - if (list_empty(&xhci->cancel_cmd_list)) - return; - - list_for_each_entry_safe(cur_cd, next_cd, - &xhci->cancel_cmd_list, cancel_cmd_list) { - xhci_cmd_to_noop(xhci, cur_cd); - list_del(&cur_cd->cancel_cmd_list); - kfree(cur_cd); - } -} - -/* - * traversing the cancel_cmd_list. If the command descriptor according - * to cmd_trb is found, the function free it and return 1, otherwise - * return 0. - */ -static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci, - union xhci_trb *cmd_trb) -{ - struct xhci_cd *cur_cd, *next_cd; - - if (list_empty(&xhci->cancel_cmd_list)) - return 0; - - list_for_each_entry_safe(cur_cd, next_cd, - &xhci->cancel_cmd_list, cancel_cmd_list) { - if (cur_cd->cmd_trb == cmd_trb) { - if (cur_cd->command) - xhci_complete_cmd_in_cmd_wait_list(xhci, - cur_cd->command, COMP_CMD_STOP); - list_del(&cur_cd->cancel_cmd_list); - kfree(cur_cd); - return 1; - } - } - - return 0; -} - -/* - * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the - * trb pointed by the command ring dequeue pointer is the trb we want to - * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will - * traverse the cancel_cmd_list to trun the all of the commands according - * to command descriptor to NO-OP trb. - */ -static int handle_stopped_cmd_ring(struct xhci_hcd *xhci, - int cmd_trb_comp_code) -{ - int cur_trb_is_good = 0; - - /* Searching the cmd trb pointed by the command ring dequeue - * pointer in command descriptor list. If it is found, free it. - */ - cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci, - xhci->cmd_ring->dequeue); - - if (cmd_trb_comp_code == COMP_CMD_ABORT) - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - else if (cmd_trb_comp_code == COMP_CMD_STOP) { - /* traversing the cancel_cmd_list and canceling - * the command according to command descriptor - */ - xhci_cancel_cmd_in_cd_list(xhci); - - xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; - /* - * ring command ring doorbell again to restart the - * command ring - */ - if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) - xhci_ring_cmd_db(xhci); - } - return cur_trb_is_good; -} - static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, u32 cmd_comp_code) { @@ -1407,7 +1138,6 @@ static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, xhci->slot_id = slot_id; else xhci->slot_id = 0; - complete(&xhci->addr_dev); } static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) @@ -1432,9 +1162,6 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, unsigned int ep_state; u32 add_flags, drop_flags; - virt_dev = xhci->devs[slot_id]; - if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) - return; /* * Configure endpoint commands can come from the USB core * configuration or alt setting changes, or because the HW @@ -1443,6 +1170,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, * If the command was for a halted endpoint, the xHCI driver * is not waiting on the configure endpoint command. */ + virt_dev = xhci->devs[slot_id]; ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); if (!ctrl_ctx) { xhci_warn(xhci, "Could not get input context, bad type.\n"); @@ -1465,7 +1193,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, add_flags - SLOT_FLAG == drop_flags) { ep_state = virt_dev->eps[ep_index].ep_state; if (!(ep_state & EP_HALTED)) - goto bandwidth_change; + return; xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, "Completed config ep cmd - " "last ep index = %d, state = %d", @@ -1475,43 +1203,14 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, ring_doorbell_for_active_rings(xhci, slot_id, ep_index); return; } -bandwidth_change: - xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, - "Completed config ep cmd"); - virt_dev->cmd_status = cmd_comp_code; - complete(&virt_dev->cmd_completion); return; } -static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id, - struct xhci_event_cmd *event, u32 cmd_comp_code) -{ - struct xhci_virt_device *virt_dev; - - virt_dev = xhci->devs[slot_id]; - if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event)) - return; - virt_dev->cmd_status = cmd_comp_code; - complete(&virt_dev->cmd_completion); -} - -static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id, - u32 cmd_comp_code) -{ - xhci->devs[slot_id]->cmd_status = cmd_comp_code; - complete(&xhci->addr_dev); -} - static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id, struct xhci_event_cmd *event) { - struct xhci_virt_device *virt_dev; - xhci_dbg(xhci, "Completed reset device command.\n"); - virt_dev = xhci->devs[slot_id]; - if (virt_dev) - handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); - else + if (!xhci->devs[slot_id]) xhci_warn(xhci, "Reset device command completion " "for disabled slot %u\n", slot_id); } @@ -1529,6 +1228,116 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, NEC_FW_MINOR(le32_to_cpu(event->status))); } +static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) +{ + list_del(&cmd->cmd_list); + + if (cmd->completion) { + cmd->status = status; + complete(cmd->completion); + } else { + kfree(cmd); + } +} + +void xhci_cleanup_command_queue(struct xhci_hcd *xhci) +{ + struct xhci_command *cur_cmd, *tmp_cmd; + list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) + xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT); +} + +/* + * Turn all commands on command ring with status set to "aborted" to no-op trbs. + * If there are other commands waiting then restart the ring and kick the timer. + * This must be called with command ring stopped and xhci->lock held. + */ +static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, + struct xhci_command *cur_cmd) +{ + struct xhci_command *i_cmd, *tmp_cmd; + u32 cycle_state; + + /* Turn all aborted commands in list to no-ops, then restart */ + list_for_each_entry_safe(i_cmd, tmp_cmd, &xhci->cmd_list, + cmd_list) { + + if (i_cmd->status != COMP_CMD_ABORT) + continue; + + i_cmd->status = COMP_CMD_STOP; + + xhci_dbg(xhci, "Turn aborted command %p to no-op\n", + i_cmd->command_trb); + /* get cycle state from the original cmd trb */ + cycle_state = le32_to_cpu( + i_cmd->command_trb->generic.field[3]) & TRB_CYCLE; + /* modify the command trb to no-op command */ + i_cmd->command_trb->generic.field[0] = 0; + i_cmd->command_trb->generic.field[1] = 0; + i_cmd->command_trb->generic.field[2] = 0; + i_cmd->command_trb->generic.field[3] = cpu_to_le32( + TRB_TYPE(TRB_CMD_NOOP) | cycle_state); + + /* + * caller waiting for completion is called when command + * completion event is received for these no-op commands + */ + } + + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; + + /* ring command ring doorbell to restart the command ring */ + if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && + !(xhci->xhc_state & XHCI_STATE_DYING)) { + xhci->current_cmd = cur_cmd; + mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); + xhci_ring_cmd_db(xhci); + } + return; +} + + +void xhci_handle_command_timeout(unsigned long data) +{ + struct xhci_hcd *xhci; + int ret; + unsigned long flags; + u64 hw_ring_state; + struct xhci_command *cur_cmd = NULL; + xhci = (struct xhci_hcd *) data; + + /* mark this command to be cancelled */ + spin_lock_irqsave(&xhci->lock, flags); + if (xhci->current_cmd) { + cur_cmd = xhci->current_cmd; + cur_cmd->status = COMP_CMD_ABORT; + } + + + /* Make sure command ring is running before aborting it */ + hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && + (hw_ring_state & CMD_RING_RUNNING)) { + + spin_unlock_irqrestore(&xhci->lock, flags); + xhci_dbg(xhci, "Command timeout\n"); + ret = xhci_abort_cmd_ring(xhci); + if (unlikely(ret == -ESHUTDOWN)) { + xhci_err(xhci, "Abort command ring failed\n"); + xhci_cleanup_command_queue(xhci); + usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); + xhci_dbg(xhci, "xHCI host controller is dead.\n"); + } + return; + } + /* command timeout on stopped ring, ring can't be aborted */ + xhci_dbg(xhci, "Command timeout on stopped ring\n"); + xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); + spin_unlock_irqrestore(&xhci->lock, flags); + return; +} + static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { @@ -1537,6 +1346,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, dma_addr_t cmd_dequeue_dma; u32 cmd_comp_code; union xhci_trb *cmd_trb; + struct xhci_command *cmd; u32 cmd_type; cmd_dma = le64_to_cpu(event->cmd_trb); @@ -1554,26 +1364,35 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, return; } + cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); + + if (cmd->command_trb != xhci->cmd_ring->dequeue) { + xhci_err(xhci, + "Command completion event does not match command\n"); + return; + } + + del_timer(&xhci->cmd_timer); + trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); - if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) { - /* If the return value is 0, we think the trb pointed by - * command ring dequeue pointer is a good trb. The good - * trb means we don't want to cancel the trb, but it have - * been stopped by host. So we should handle it normally. - * Otherwise, driver should invoke inc_deq() and return. - */ - if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) { - inc_deq(xhci, xhci->cmd_ring); - return; - } - /* There is no command to handle if we get a stop event when the - * command ring is empty, event->cmd_trb points to the next - * unset command - */ - if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) - return; + + /* If CMD ring stopped we own the trbs between enqueue and dequeue */ + if (cmd_comp_code == COMP_CMD_STOP) { + xhci_handle_stopped_cmd_ring(xhci, cmd); + return; + } + /* + * Host aborted the command ring, check if the current command was + * supposed to be aborted, otherwise continue normally. + * The command ring is stopped now, but the xHC will issue a Command + * Ring Stopped event which will cause us to restart it. + */ + if (cmd_comp_code == COMP_CMD_ABORT) { + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + if (cmd->status == COMP_CMD_ABORT) + goto event_handled; } cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); @@ -1585,13 +1404,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_cmd_disable_slot(xhci, slot_id); break; case TRB_CONFIG_EP: - xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code); + if (!cmd->completion) + xhci_handle_cmd_config_ep(xhci, slot_id, event, + cmd_comp_code); break; case TRB_EVAL_CONTEXT: - xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code); break; case TRB_ADDR_DEV: - xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code); break; case TRB_STOP_RING: WARN_ON(slot_id != TRB_TO_SLOT_ID( @@ -1604,6 +1423,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_CMD_NOOP: + /* Is this an aborted command turned to NO-OP? */ + if (cmd->status == COMP_CMD_STOP) + cmd_comp_code = COMP_CMD_STOP; break; case TRB_RESET_EP: WARN_ON(slot_id != TRB_TO_SLOT_ID( @@ -1623,6 +1445,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci->error_bitmask |= 1 << 6; break; } + + /* restart timer if this wasn't the last command */ + if (cmd->cmd_list.next != &xhci->cmd_list) { + xhci->current_cmd = list_entry(cmd->cmd_list.next, + struct xhci_command, cmd_list); + mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); + } + +event_handled: + xhci_complete_del_and_free_cmd(cmd, cmd_comp_code); + inc_deq(xhci, xhci->cmd_ring); } @@ -1938,11 +1771,16 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *event_trb) { struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; + struct xhci_command *command; + command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); + if (!command) + return; + ep->ep_state |= EP_HALTED; ep->stopped_td = td; ep->stopped_stream = stream_id; - xhci_queue_reset_ep(xhci, slot_id, ep_index); + xhci_queue_reset_ep(xhci, command, slot_id, ep_index); xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); ep->stopped_td = NULL; @@ -2654,7 +2492,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, * successful event after a short transfer. * Ignore it. */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && ep_ring->last_td_was_short) { ep_ring->last_td_was_short = false; ret = 0; @@ -3996,11 +3834,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB * because the command event handler may want to resubmit a failed command. */ -static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, - u32 field3, u32 field4, bool command_must_succeed) +static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 field1, u32 field2, + u32 field3, u32 field4, bool command_must_succeed) { int reserved_trbs = xhci->cmd_ring_reserved_trbs; int ret; + if (xhci->xhc_state & XHCI_STATE_DYING) + return -ESHUTDOWN; if (!command_must_succeed) reserved_trbs++; @@ -4014,57 +3855,71 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, "unfailable commands failed.\n"); return ret; } + + cmd->command_trb = xhci->cmd_ring->enqueue; + list_add_tail(&cmd->cmd_list, &xhci->cmd_list); + + /* if there are no other commands queued we start the timeout timer */ + if (xhci->cmd_list.next == &cmd->cmd_list && + !timer_pending(&xhci->cmd_timer)) { + xhci->current_cmd = cmd; + mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT); + } + queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3, field4 | xhci->cmd_ring->cycle_state); return 0; } /* Queue a slot enable or disable request on the command ring */ -int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) +int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 trb_type, u32 slot_id) { - return queue_command(xhci, 0, 0, 0, + return queue_command(xhci, cmd, 0, 0, 0, TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false); } /* Queue an address device command TRB */ -int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, - u32 slot_id, enum xhci_setup_dev setup) +int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false); } -int xhci_queue_vendor_command(struct xhci_hcd *xhci, +int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd, u32 field1, u32 field2, u32 field3, u32 field4) { - return queue_command(xhci, field1, field2, field3, field4, false); + return queue_command(xhci, cmd, field1, field2, field3, field4, false); } /* Queue a reset device command TRB */ -int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) +int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd, + u32 slot_id) { - return queue_command(xhci, 0, 0, 0, + return queue_command(xhci, cmd, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), false); } /* Queue a configure endpoint command TRB */ -int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, +int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, + struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), command_must_succeed); } /* Queue an evaluate context command TRB */ -int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, - u32 slot_id, bool command_must_succeed) +int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, + dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { - return queue_command(xhci, lower_32_bits(in_ctx_ptr), + return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), command_must_succeed); @@ -4074,25 +3929,26 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop * activity on an endpoint that is about to be suspended. */ -int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, int suspend) +int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index, int suspend) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_STOP_RING); u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); - return queue_command(xhci, 0, 0, 0, + return queue_command(xhci, cmd, 0, 0, 0, trb_slot_id | trb_ep_index | type | trb_suspend, false); } /* Set Transfer Ring Dequeue Pointer command. * This should not be used for endpoints that have streams enabled. */ -static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index, unsigned int stream_id, - struct xhci_segment *deq_seg, - union xhci_trb *deq_ptr, u32 cycle_state) +static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, + unsigned int ep_index, unsigned int stream_id, + struct xhci_segment *deq_seg, + union xhci_trb *deq_ptr, u32 cycle_state) { dma_addr_t addr; u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); @@ -4119,18 +3975,19 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, ep->queued_deq_ptr = deq_ptr; if (stream_id) trb_sct = SCT_FOR_TRB(SCT_PRI_TR); - return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state, + return queue_command(xhci, cmd, + lower_32_bits(addr) | trb_sct | cycle_state, upper_32_bits(addr), trb_stream_id, trb_slot_id | trb_ep_index | type, false); } -int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, - unsigned int ep_index) +int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, + int slot_id, unsigned int ep_index) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_RESET_EP); - return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type, - false); + return queue_command(xhci, cmd, 0, 0, 0, + trb_slot_id | trb_ep_index | type, false); } |