summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c187
1 files changed, 73 insertions, 114 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index df6978abd7e6..b150360d1e78 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -26,6 +26,7 @@
#include <linux/dmapool.h>
#include "xhci.h"
+#include "xhci-trace.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
@@ -347,7 +348,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
return -ENOMEM;
xhci_link_rings(xhci, ring, first, last, num_segs);
- xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
+ "ring expansion succeed, now has %d segments",
ring->num_segs);
return 0;
@@ -481,17 +483,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
return ep->ring;
}
-/* Only use this when you know stream_info is valid */
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-static struct xhci_ring *dma_to_stream_ring(
- struct xhci_stream_info *stream_info,
- u64 address)
-{
- return radix_tree_lookup(&stream_info->trb_address_map,
- address >> TRB_SEGMENT_SHIFT);
-}
-#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
-
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
@@ -509,58 +500,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
return ep->stream_info->stream_rings[stream_id];
}
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-static int xhci_test_radix_tree(struct xhci_hcd *xhci,
- unsigned int num_streams,
- struct xhci_stream_info *stream_info)
-{
- u32 cur_stream;
- struct xhci_ring *cur_ring;
- u64 addr;
-
- for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
- struct xhci_ring *mapped_ring;
- int trb_size = sizeof(union xhci_trb);
-
- cur_ring = stream_info->stream_rings[cur_stream];
- for (addr = cur_ring->first_seg->dma;
- addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE;
- addr += trb_size) {
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- if (cur_ring != mapped_ring) {
- xhci_warn(xhci, "WARN: DMA address 0x%08llx "
- "didn't map to stream ID %u; "
- "mapped to ring %p\n",
- (unsigned long long) addr,
- cur_stream,
- mapped_ring);
- return -EINVAL;
- }
- }
- /* One TRB after the end of the ring segment shouldn't return a
- * pointer to the current ring (although it may be a part of a
- * different ring).
- */
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- if (mapped_ring != cur_ring) {
- /* One TRB before should also fail */
- addr = cur_ring->first_seg->dma - trb_size;
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- }
- if (mapped_ring == cur_ring) {
- xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
- "mapped to valid stream ID %u; "
- "mapped ring = %p\n",
- (unsigned long long) addr,
- cur_stream,
- mapped_ring);
- return -EINVAL;
- }
- }
- return 0;
-}
-#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
-
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
@@ -687,13 +626,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
* was any other way, the host controller would assume the ring is
* "empty" and wait forever for data to be queued to that stream ID).
*/
-#if XHCI_DEBUG
- /* Do a little test on the radix tree to make sure it returns the
- * correct values.
- */
- if (xhci_test_radix_tree(xhci, num_streams, stream_info))
- goto cleanup_rings;
-#endif
return stream_info;
@@ -731,7 +663,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
- xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Setting number of stream ctx array entries to %u",
1 << (max_primary_streams + 1));
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
@@ -1613,7 +1546,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
- xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocating %d scratchpad buffers", num_sp);
if (!num_sp)
return 0;
@@ -1770,11 +1704,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
dma_free_coherent(&pdev->dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
- xhci_dbg(xhci, "Freed ERST\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
- xhci_dbg(xhci, "Freed event ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
@@ -1782,7 +1716,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
- xhci_dbg(xhci, "Freed command ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
list_for_each_entry_safe(cur_cd, next_cd,
&xhci->cancel_cmd_list, cancel_cmd_list) {
list_del(&cur_cd->cancel_cmd_list);
@@ -1795,22 +1729,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
- xhci_dbg(xhci, "Freed segment pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
if (xhci->device_pool)
dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
- xhci_dbg(xhci, "Freed device context pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
if (xhci->small_streams_pool)
dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
- xhci_dbg(xhci, "Freed small stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed small stream array pool");
if (xhci->medium_streams_pool)
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
- xhci_dbg(xhci, "Freed medium stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed medium stream array pool");
if (xhci->dcbaa)
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
@@ -2036,8 +1972,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
* there might be more events to service.
*/
temp &= ~ERST_EHB;
- xhci_dbg(xhci, "// Write event ring dequeue pointer, "
- "preserving EHB bit\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write event ring dequeue pointer, "
+ "preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
@@ -2060,8 +1997,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
temp = xhci_readl(xhci, addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
- xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
- "count = %u, revision = 0x%x\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Ext Cap %p, port offset = %u, "
+ "count = %u, revision = 0x%x",
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
@@ -2075,15 +2013,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
/* Check the host's USB2 LPM capability */
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
(temp & XHCI_L1C)) {
- xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 0.96: support USB2 software lpm");
xhci->sw_lpm_support = 1;
}
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
- xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 software lpm");
xhci->sw_lpm_support = 1;
if (temp & XHCI_HLC) {
- xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 hardware lpm");
xhci->hw_lpm_support = 1;
}
}
@@ -2207,18 +2148,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
- xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->num_usb2_ports, xhci->num_usb3_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->num_usb3_ports > 15) {
- xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 3.0 roothub ports to 15.");
xhci->num_usb3_ports = 15;
}
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
- xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN);
xhci->num_usb2_ports = USB_MAXCHILDREN;
}
@@ -2243,8 +2187,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb2_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
- xhci_dbg(xhci, "USB 2.0 port at index %u, "
- "addr = %p\n", i,
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 2.0 port at index %u, "
+ "addr = %p", i,
xhci->usb2_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb2_ports)
@@ -2263,8 +2208,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb3_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
- xhci_dbg(xhci, "USB 3.0 port at index %u, "
- "addr = %p\n", i,
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 3.0 port at index %u, "
+ "addr = %p", i,
xhci->usb3_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb3_ports)
@@ -2288,32 +2234,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
- xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0)
break;
page_size = page_size >> 1;
}
if (i < 16)
- xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size of %iK", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "HCD page size set to %iK", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
- xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
- (unsigned int) val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// xHC can handle at most %d device slots.", val);
val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
- xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
- (unsigned int) val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting Max device slots reg = 0x%x.", val);
xhci_writel(xhci, val, &xhci->op_regs->config_reg);
/*
@@ -2326,7 +2275,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
- xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
@@ -2365,8 +2315,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
- xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
- xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocated command ring at %p", xhci->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
@@ -2374,7 +2325,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
- xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting command ring address to 0x%x", val);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
@@ -2390,8 +2342,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
val &= DBOFF_MASK;
- xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
- " from cap regs base addr\n", val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
@@ -2402,7 +2355,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
- xhci_dbg(xhci, "// Allocating event ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
flags);
if (!xhci->event_ring)
@@ -2415,13 +2368,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
GFP_KERNEL);
if (!xhci->erst.entries)
goto fail;
- xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Allocated event ring segment table at 0x%llx",
(unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma;
- xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
xhci->erst.num_entries,
xhci->erst.entries,
(unsigned long long)xhci->erst.erst_dma_addr);
@@ -2439,13 +2394,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val = xhci_readl(xhci, &xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
- xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write ERST size = %i to ir_set 0 (some bits preserved)",
val);
xhci_writel(xhci, val, &xhci->ir_set->erst_size);
- xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST entries to point to event ring.");
/* set the segment table base address */
- xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
@@ -2454,7 +2412,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
- xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0);
/*