summaryrefslogtreecommitdiff
path: root/drivers/usb/cdns3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/cdns3')
-rw-r--r--drivers/usb/cdns3/Kconfig60
-rw-r--r--drivers/usb/cdns3/Makefile43
-rw-r--r--drivers/usb/cdns3/cdns3-debug.h (renamed from drivers/usb/cdns3/debug.h)0
-rw-r--r--drivers/usb/cdns3/cdns3-ep0.c (renamed from drivers/usb/cdns3/ep0.c)8
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c (renamed from drivers/usb/cdns3/gadget.c)34
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.h (renamed from drivers/usb/cdns3/gadget.h)0
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c2
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c315
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c1
-rw-r--r--drivers/usb/cdns3/cdns3-trace.c (renamed from drivers/usb/cdns3/trace.c)2
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h (renamed from drivers/usb/cdns3/trace.h)6
-rw-r--r--drivers/usb/cdns3/cdnsp-debug.h583
-rw-r--r--drivers/usb/cdns3/cdnsp-ep0.c489
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2009
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h1601
-rw-r--r--drivers/usb/cdns3/cdnsp-mem.c1336
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c254
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c2438
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.c12
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h830
-rw-r--r--drivers/usb/cdns3/core.c455
-rw-r--r--drivers/usb/cdns3/core.h65
-rw-r--r--drivers/usb/cdns3/drd.c224
-rw-r--r--drivers/usb/cdns3/drd.h94
-rw-r--r--drivers/usb/cdns3/gadget-export.h22
-rw-r--r--drivers/usb/cdns3/host-export.h18
-rw-r--r--drivers/usb/cdns3/host.c26
27 files changed, 10404 insertions, 523 deletions
diff --git a/drivers/usb/cdns3/Kconfig b/drivers/usb/cdns3/Kconfig
index 84716d216ae5..b98ca0a1352a 100644
--- a/drivers/usb/cdns3/Kconfig
+++ b/drivers/usb/cdns3/Kconfig
@@ -1,14 +1,28 @@
-config USB_CDNS3
- tristate "Cadence USB3 Dual-Role Controller"
+config USB_CDNS_SUPPORT
+ tristate "Cadence USB Support"
depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA
select USB_XHCI_PLATFORM if USB_XHCI_HCD
select USB_ROLE_SWITCH
help
+ Say Y here if your system has a Cadence USBSS or USBSSP
+ dual-role controller.
+ It supports: dual-role switch, Host-only, and Peripheral-only.
+
+config USB_CDNS_HOST
+ bool
+
+if USB_CDNS_SUPPORT
+
+config USB_CDNS3
+ tristate "Cadence USB3 Dual-Role Controller"
+ depends on USB_CDNS_SUPPORT
+ help
Say Y here if your system has a Cadence USB3 dual-role controller.
It supports: dual-role switch, Host-only, and Peripheral-only.
If you choose to build this driver is a dynamically linked
as module, the module will be called cdns3.ko.
+endif
if USB_CDNS3
@@ -25,6 +39,7 @@ config USB_CDNS3_GADGET
config USB_CDNS3_HOST
bool "Cadence USB3 host controller"
depends on USB=y || USB=USB_CDNS3
+ select USB_CDNS_HOST
help
Say Y here to enable host controller functionality of the
Cadence driver.
@@ -64,3 +79,44 @@ config USB_CDNS3_IMX
For example, imx8qm and imx8qxp.
endif
+
+if USB_CDNS_SUPPORT
+
+config USB_CDNSP_PCI
+ tristate "Cadence CDNSP Dual-Role Controller"
+ depends on USB_CDNS_SUPPORT && USB_PCI && ACPI
+ help
+ Say Y here if your system has a Cadence CDNSP dual-role controller.
+ It supports: dual-role switch Host-only, and Peripheral-only.
+
+ If you choose to build this driver is a dynamically linked
+ module, the module will be called cdnsp.ko.
+endif
+
+if USB_CDNSP_PCI
+
+config USB_CDNSP_GADGET
+ bool "Cadence CDNSP device controller"
+ depends on USB_GADGET=y || USB_GADGET=USB_CDNSP_PCI
+ help
+ Say Y here to enable device controller functionality of the
+ Cadence CDNSP-DEV driver.
+
+ Cadence CDNSP Device Controller in device mode is
+ very similar to XHCI controller. Therefore some algorithms
+ used has been taken from host driver.
+ This controller supports FF, HS, SS and SSP mode.
+ It doesn't support LS.
+
+config USB_CDNSP_HOST
+ bool "Cadence CDNSP host controller"
+ depends on USB=y || USB=USB_CDNSP_PCI
+ select USB_CDNS_HOST
+ help
+ Say Y here to enable host controller functionality of the
+ Cadence driver.
+
+ Host controller is compliant with XHCI so it uses
+ standard XHCI driver.
+
+endif
diff --git a/drivers/usb/cdns3/Makefile b/drivers/usb/cdns3/Makefile
index d47e341a6f39..61edb2f89276 100644
--- a/drivers/usb/cdns3/Makefile
+++ b/drivers/usb/cdns3/Makefile
@@ -1,18 +1,43 @@
# SPDX-License-Identifier: GPL-2.0
# define_trace.h needs to know how to find our header
-CFLAGS_trace.o := -I$(src)
+CFLAGS_cdns3-trace.o := -I$(src)
+CFLAGS_cdnsp-trace.o := -I$(src)
-cdns3-y := core.o drd.o
+cdns-usb-common-y := core.o drd.o
+cdns3-y := cdns3-plat.o
-obj-$(CONFIG_USB_CDNS3) += cdns3.o
-cdns3-$(CONFIG_USB_CDNS3_GADGET) += gadget.o ep0.o
+ifeq ($(CONFIG_USB),m)
+obj-m += cdns-usb-common.o
+obj-m += cdns3.o
+else
+obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns-usb-common.o
+obj-$(CONFIG_USB_CDNS3) += cdns3.o
+endif
+
+cdns-usb-common-$(CONFIG_USB_CDNS_HOST) += host.o
+cdns3-$(CONFIG_USB_CDNS3_GADGET) += cdns3-gadget.o cdns3-ep0.o
ifneq ($(CONFIG_USB_CDNS3_GADGET),)
-cdns3-$(CONFIG_TRACING) += trace.o
+cdns3-$(CONFIG_TRACING) += cdns3-trace.o
+endif
+
+obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
+obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
+obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
+
+cdnsp-udc-pci-y := cdnsp-pci.o
+
+ifdef CONFIG_USB_CDNSP_PCI
+ifeq ($(CONFIG_USB),m)
+obj-m += cdnsp-udc-pci.o
+else
+obj-$(CONFIG_USB_CDNSP_PCI) += cdnsp-udc-pci.o
+endif
endif
-cdns3-$(CONFIG_USB_CDNS3_HOST) += host.o
+cdnsp-udc-pci-$(CONFIG_USB_CDNSP_GADGET) += cdnsp-ring.o cdnsp-gadget.o \
+ cdnsp-mem.o cdnsp-ep0.o
-obj-$(CONFIG_USB_CDNS3_PCI_WRAP) += cdns3-pci-wrap.o
-obj-$(CONFIG_USB_CDNS3_TI) += cdns3-ti.o
-obj-$(CONFIG_USB_CDNS3_IMX) += cdns3-imx.o
+ifneq ($(CONFIG_USB_CDNSP_GADGET),)
+cdnsp-udc-pci-$(CONFIG_TRACING) += cdnsp-trace.o
+endif
diff --git a/drivers/usb/cdns3/debug.h b/drivers/usb/cdns3/cdns3-debug.h
index a5c6a29e1340..a5c6a29e1340 100644
--- a/drivers/usb/cdns3/debug.h
+++ b/drivers/usb/cdns3/cdns3-debug.h
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/cdns3-ep0.c
index d3121a32cc68..9a17802275d5 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/cdns3-ep0.c
@@ -13,8 +13,8 @@
#include <linux/usb/composite.h>
#include <linux/iopoll.h>
-#include "gadget.h"
-#include "trace.h"
+#include "cdns3-gadget.h"
+#include "cdns3-trace.h"
static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
@@ -364,7 +364,7 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev,
if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT)
return -EINVAL;
- if (!(ctrl->wIndex & ~USB_DIR_IN))
+ if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN))
return 0;
index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
@@ -789,7 +789,7 @@ int cdns3_gadget_ep_set_wedge(struct usb_ep *ep)
return 0;
}
-const struct usb_ep_ops cdns3_gadget_ep0_ops = {
+static const struct usb_ep_ops cdns3_gadget_ep0_ops = {
.enable = cdns3_gadget_ep0_enable,
.disable = cdns3_gadget_ep0_disable,
.alloc_request = cdns3_gadget_ep_alloc_request,
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 08a4e693c470..582bfeceedb4 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -63,8 +63,8 @@
#include "core.h"
#include "gadget-export.h"
-#include "gadget.h"
-#include "trace.h"
+#include "cdns3-gadget.h"
+#include "cdns3-trace.h"
#include "drd.h"
static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
@@ -1200,7 +1200,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
td_size = DIV_ROUND_UP(request->length,
priv_ep->endpoint.maxpacket);
if (priv_dev->gadget.speed == USB_SPEED_SUPER)
- trb->length = TRB_TDL_SS_SIZE(td_size);
+ trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
else
control |= TRB_TDL_HS_SIZE(td_size);
}
@@ -1247,10 +1247,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
priv_req->trb->control = cpu_to_le32(control);
if (sg_supported) {
- trb->control |= TRB_ISP;
+ trb->control |= cpu_to_le32(TRB_ISP);
/* Don't set chain bit for last TRB */
if (sg_iter < num_trb - 1)
- trb->control |= TRB_CHAIN;
+ trb->control |= cpu_to_le32(TRB_CHAIN);
s = sg_next(s);
}
@@ -1844,7 +1844,7 @@ __must_hold(&priv_dev->lock)
static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
{
struct cdns3_device *priv_dev = data;
- struct cdns3 *cdns = dev_get_drvdata(priv_dev->dev);
+ struct cdns *cdns = dev_get_drvdata(priv_dev->dev);
irqreturn_t ret = IRQ_NONE;
u32 reg;
@@ -3084,7 +3084,7 @@ static void cdns3_gadget_release(struct device *dev)
kfree(priv_dev);
}
-static void cdns3_gadget_exit(struct cdns3 *cdns)
+static void cdns3_gadget_exit(struct cdns *cdns)
{
struct cdns3_device *priv_dev;
@@ -3117,10 +3117,10 @@ static void cdns3_gadget_exit(struct cdns3 *cdns)
kfree(priv_dev->zlp_buf);
usb_put_gadget(&priv_dev->gadget);
cdns->gadget_dev = NULL;
- cdns3_drd_gadget_off(cdns);
+ cdns_drd_gadget_off(cdns);
}
-static int cdns3_gadget_start(struct cdns3 *cdns)
+static int cdns3_gadget_start(struct cdns *cdns)
{
struct cdns3_device *priv_dev;
u32 max_speed;
@@ -3240,7 +3240,7 @@ err1:
return ret;
}
-static int __cdns3_gadget_init(struct cdns3 *cdns)
+static int __cdns3_gadget_init(struct cdns *cdns)
{
int ret = 0;
@@ -3251,7 +3251,7 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
return ret;
}
- cdns3_drd_gadget_on(cdns);
+ cdns_drd_gadget_on(cdns);
pm_runtime_get_sync(cdns->dev);
ret = cdns3_gadget_start(cdns);
@@ -3277,7 +3277,7 @@ err0:
return ret;
}
-static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
+static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup)
__must_hold(&cdns->lock)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
@@ -3296,7 +3296,7 @@ __must_hold(&cdns->lock)
return 0;
}
-static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
+static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
{
struct cdns3_device *priv_dev = cdns->gadget_dev;
@@ -3311,13 +3311,13 @@ static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
/**
* cdns3_gadget_init - initialize device structure
*
- * @cdns: cdns3 instance
+ * @cdns: cdns instance
*
* This function initializes the gadget.
*/
-int cdns3_gadget_init(struct cdns3 *cdns)
+int cdns3_gadget_init(struct cdns *cdns)
{
- struct cdns3_role_driver *rdrv;
+ struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
@@ -3327,7 +3327,7 @@ int cdns3_gadget_init(struct cdns3 *cdns)
rdrv->stop = cdns3_gadget_exit;
rdrv->suspend = cdns3_gadget_suspend;
rdrv->resume = cdns3_gadget_resume;
- rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "gadget";
cdns->roles[USB_ROLE_DEVICE] = rdrv;
diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/cdns3-gadget.h
index 21fa461c518e..21fa461c518e 100644
--- a/drivers/usb/cdns3/gadget.h
+++ b/drivers/usb/cdns3/cdns3-gadget.h
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index 7990fee03fe4..8f88eec0b0ea 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -250,7 +250,7 @@ static void cdns3_set_wakeup(struct cdns_imx *data, bool enable)
static int cdns_imx_platform_suspend(struct device *dev,
bool suspend, bool wakeup)
{
- struct cdns3 *cdns = dev_get_drvdata(dev);
+ struct cdns *cdns = dev_get_drvdata(dev);
struct device *parent = dev->parent;
struct cdns_imx *data = dev_get_drvdata(parent);
void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs);
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
new file mode 100644
index 000000000000..4b18e1c6a4bb
--- /dev/null
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver.
+ *
+ * Copyright (C) 2018-2020 Cadence.
+ * Copyright (C) 2017-2018 NXP
+ * Copyright (C) 2019 Texas Instruments
+ *
+ *
+ * Author: Peter Chen <peter.chen@nxp.com>
+ * Pawel Laszczak <pawell@cadence.com>
+ * Roger Quadros <rogerq@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "core.h"
+#include "gadget-export.h"
+
+static int set_phy_power_on(struct cdns *cdns)
+{
+ int ret;
+
+ ret = phy_power_on(cdns->usb2_phy);
+ if (ret)
+ return ret;
+
+ ret = phy_power_on(cdns->usb3_phy);
+ if (ret)
+ phy_power_off(cdns->usb2_phy);
+
+ return ret;
+}
+
+static void set_phy_power_off(struct cdns *cdns)
+{
+ phy_power_off(cdns->usb3_phy);
+ phy_power_off(cdns->usb2_phy);
+}
+
+/**
+ * cdns3_plat_probe - probe for cdns3 core device
+ * @pdev: Pointer to cdns3 core platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_plat_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct cdns *cdns;
+ void __iomem *regs;
+ int ret;
+
+ cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
+ if (!cdns)
+ return -ENOMEM;
+
+ cdns->dev = dev;
+ cdns->pdata = dev_get_platdata(dev);
+
+ platform_set_drvdata(pdev, cdns);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
+ if (!res) {
+ dev_err(dev, "missing host IRQ\n");
+ return -ENODEV;
+ }
+
+ cdns->xhci_res[0] = *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
+ if (!res) {
+ dev_err(dev, "couldn't get xhci resource\n");
+ return -ENXIO;
+ }
+
+ cdns->xhci_res[1] = *res;
+
+ cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
+
+ if (cdns->dev_irq < 0)
+ return cdns->dev_irq;
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "dev");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+ cdns->dev_regs = regs;
+
+ cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
+ if (cdns->otg_irq < 0)
+ return cdns->otg_irq;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
+ if (!res) {
+ dev_err(dev, "couldn't get otg resource\n");
+ return -ENXIO;
+ }
+
+ cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
+
+ cdns->otg_res = *res;
+
+ cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
+ if (cdns->wakeup_irq == -EPROBE_DEFER)
+ return cdns->wakeup_irq;
+ else if (cdns->wakeup_irq == 0)
+ return -EINVAL;
+
+ if (cdns->wakeup_irq < 0) {
+ dev_dbg(dev, "couldn't get wakeup irq\n");
+ cdns->wakeup_irq = 0x0;
+ }
+
+ cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
+ if (IS_ERR(cdns->usb2_phy))
+ return PTR_ERR(cdns->usb2_phy);
+
+ ret = phy_init(cdns->usb2_phy);
+ if (ret)
+ return ret;
+
+ cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
+ if (IS_ERR(cdns->usb3_phy))
+ return PTR_ERR(cdns->usb3_phy);
+
+ ret = phy_init(cdns->usb3_phy);
+ if (ret)
+ goto err_phy3_init;
+
+ ret = set_phy_power_on(cdns);
+ if (ret)
+ goto err_phy_power_on;
+
+ cdns->gadget_init = cdns3_gadget_init;
+
+ ret = cdns_init(cdns);
+ if (ret)
+ goto err_cdns_init;
+
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)))
+ pm_runtime_forbid(dev);
+
+ /*
+ * The controller needs less time between bus and controller suspend,
+ * and we also needs a small delay to avoid frequently entering low
+ * power mode.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 20);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_use_autosuspend(dev);
+
+ return 0;
+
+err_cdns_init:
+ set_phy_power_off(cdns);
+err_phy_power_on:
+ phy_exit(cdns->usb3_phy);
+err_phy3_init:
+ phy_exit(cdns->usb2_phy);
+
+ return ret;
+}
+
+/**
+ * cdns3_remove - unbind drd driver and clean up
+ * @pdev: Pointer to Linux platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_plat_remove(struct platform_device *pdev)
+{
+ struct cdns *cdns = platform_get_drvdata(pdev);
+ struct device *dev = cdns->dev;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ cdns_remove(cdns);
+ set_phy_power_off(cdns);
+ phy_exit(cdns->usb2_phy);
+ phy_exit(cdns->usb3_phy);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int cdns3_set_platform_suspend(struct device *dev,
+ bool suspend, bool wakeup)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (cdns->pdata && cdns->pdata->platform_suspend)
+ ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
+
+ return ret;
+}
+
+static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ bool wakeup;
+ unsigned long flags;
+
+ if (cdns->in_lpm)
+ return 0;
+
+ if (PMSG_IS_AUTO(msg))
+ wakeup = true;
+ else
+ wakeup = device_may_wakeup(dev);
+
+ cdns3_set_platform_suspend(cdns->dev, true, wakeup);
+ set_phy_power_off(cdns);
+ spin_lock_irqsave(&cdns->lock, flags);
+ cdns->in_lpm = true;
+ spin_unlock_irqrestore(&cdns->lock, flags);
+ dev_dbg(cdns->dev, "%s ends\n", __func__);
+
+ return 0;
+}
+
+static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ int ret;
+ unsigned long flags;
+
+ if (!cdns->in_lpm)
+ return 0;
+
+ ret = set_phy_power_on(cdns);
+ if (ret)
+ return ret;
+
+ cdns3_set_platform_suspend(cdns->dev, false, false);
+
+ spin_lock_irqsave(&cdns->lock, flags);
+ cdns_resume(cdns, !PMSG_IS_AUTO(msg));
+ cdns->in_lpm = false;
+ spin_unlock_irqrestore(&cdns->lock, flags);
+ if (cdns->wakeup_pending) {
+ cdns->wakeup_pending = false;
+ enable_irq(cdns->wakeup_irq);
+ }
+ dev_dbg(cdns->dev, "%s ends\n", __func__);
+
+ return ret;
+}
+
+static int cdns3_plat_runtime_suspend(struct device *dev)
+{
+ return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
+}
+
+static int cdns3_plat_runtime_resume(struct device *dev)
+{
+ return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int cdns3_plat_suspend(struct device *dev)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+
+ cdns_suspend(cdns);
+
+ return cdns3_controller_suspend(dev, PMSG_SUSPEND);
+}
+
+static int cdns3_plat_resume(struct device *dev)
+{
+ return cdns3_controller_resume(dev, PMSG_RESUME);
+}
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops cdns3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cdns3_plat_suspend, cdns3_plat_resume)
+ SET_RUNTIME_PM_OPS(cdns3_plat_runtime_suspend,
+ cdns3_plat_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cdns3_match[] = {
+ { .compatible = "cdns,usb3" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_cdns3_match);
+#endif
+
+static struct platform_driver cdns3_driver = {
+ .probe = cdns3_plat_probe,
+ .remove = cdns3_plat_remove,
+ .driver = {
+ .name = "cdns-usb3",
+ .of_match_table = of_match_ptr(of_cdns3_match),
+ .pm = &cdns3_pm_ops,
+ },
+};
+
+module_platform_driver(cdns3_driver);
+
+MODULE_ALIAS("platform:cdns3");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 90e246601537..eccb1c766bba 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -214,6 +214,7 @@ static int cdns_ti_remove(struct platform_device *pdev)
static const struct of_device_id cdns_ti_of_match[] = {
{ .compatible = "ti,j721e-usb", },
+ { .compatible = "ti,am64-usb", },
{},
};
MODULE_DEVICE_TABLE(of, cdns_ti_of_match);
diff --git a/drivers/usb/cdns3/trace.c b/drivers/usb/cdns3/cdns3-trace.c
index 459fa72d9c74..b9858acaef02 100644
--- a/drivers/usb/cdns3/trace.c
+++ b/drivers/usb/cdns3/cdns3-trace.c
@@ -8,4 +8,4 @@
*/
#define CREATE_TRACE_POINTS
-#include "trace.h"
+#include "cdns3-trace.h"
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/cdns3-trace.h
index 0a2a3269bfac..8648c7a7a9dd 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/cdns3-trace.h
@@ -19,8 +19,8 @@
#include <asm/byteorder.h>
#include <linux/usb/ch9.h>
#include "core.h"
-#include "gadget.h"
-#include "debug.h"
+#include "cdns3-gadget.h"
+#include "cdns3-debug.h"
#define CDNS3_MSG_MAX 500
@@ -565,6 +565,6 @@ DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled,
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
+#define TRACE_INCLUDE_FILE cdns3-trace
#include <trace/define_trace.h>
diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h
new file mode 100644
index 000000000000..a8776df2d4e0
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-debug.h
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+#ifndef __LINUX_CDNSP_DEBUG
+#define __LINUX_CDNSP_DEBUG
+
+static inline const char *cdnsp_trb_comp_code_string(u8 status)
+{
+ switch (status) {
+ case COMP_INVALID:
+ return "Invalid";
+ case COMP_SUCCESS:
+ return "Success";
+ case COMP_DATA_BUFFER_ERROR:
+ return "Data Buffer Error";
+ case COMP_BABBLE_DETECTED_ERROR:
+ return "Babble Detected";
+ case COMP_TRB_ERROR:
+ return "TRB Error";
+ case COMP_RESOURCE_ERROR:
+ return "Resource Error";
+ case COMP_NO_SLOTS_AVAILABLE_ERROR:
+ return "No Slots Available Error";
+ case COMP_INVALID_STREAM_TYPE_ERROR:
+ return "Invalid Stream Type Error";
+ case COMP_SLOT_NOT_ENABLED_ERROR:
+ return "Slot Not Enabled Error";
+ case COMP_ENDPOINT_NOT_ENABLED_ERROR:
+ return "Endpoint Not Enabled Error";
+ case COMP_SHORT_PACKET:
+ return "Short Packet";
+ case COMP_RING_UNDERRUN:
+ return "Ring Underrun";
+ case COMP_RING_OVERRUN:
+ return "Ring Overrun";
+ case COMP_VF_EVENT_RING_FULL_ERROR:
+ return "VF Event Ring Full Error";
+ case COMP_PARAMETER_ERROR:
+ return "Parameter Error";
+ case COMP_CONTEXT_STATE_ERROR:
+ return "Context State Error";
+ case COMP_EVENT_RING_FULL_ERROR:
+ return "Event Ring Full Error";
+ case COMP_INCOMPATIBLE_DEVICE_ERROR:
+ return "Incompatible Device Error";
+ case COMP_MISSED_SERVICE_ERROR:
+ return "Missed Service Error";
+ case COMP_COMMAND_RING_STOPPED:
+ return "Command Ring Stopped";
+ case COMP_COMMAND_ABORTED:
+ return "Command Aborted";
+ case COMP_STOPPED:
+ return "Stopped";
+ case COMP_STOPPED_LENGTH_INVALID:
+ return "Stopped - Length Invalid";
+ case COMP_STOPPED_SHORT_PACKET:
+ return "Stopped - Short Packet";
+ case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
+ return "Max Exit Latency Too Large Error";
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ return "Isoch Buffer Overrun";
+ case COMP_EVENT_LOST_ERROR:
+ return "Event Lost Error";
+ case COMP_UNDEFINED_ERROR:
+ return "Undefined Error";
+ case COMP_INVALID_STREAM_ID_ERROR:
+ return "Invalid Stream ID Error";
+ default:
+ return "Unknown!!";
+ }
+}
+
+static inline const char *cdnsp_trb_type_string(u8 type)
+{
+ switch (type) {
+ case TRB_NORMAL:
+ return "Normal";
+ case TRB_SETUP:
+ return "Setup Stage";
+ case TRB_DATA:
+ return "Data Stage";
+ case TRB_STATUS:
+ return "Status Stage";
+ case TRB_ISOC:
+ return "Isoch";
+ case TRB_LINK:
+ return "Link";
+ case TRB_EVENT_DATA:
+ return "Event Data";
+ case TRB_TR_NOOP:
+ return "No-Op";
+ case TRB_ENABLE_SLOT:
+ return "Enable Slot Command";
+ case TRB_DISABLE_SLOT:
+ return "Disable Slot Command";
+ case TRB_ADDR_DEV:
+ return "Address Device Command";
+ case TRB_CONFIG_EP:
+ return "Configure Endpoint Command";
+ case TRB_EVAL_CONTEXT:
+ return "Evaluate Context Command";
+ case TRB_RESET_EP:
+ return "Reset Endpoint Command";
+ case TRB_STOP_RING:
+ return "Stop Ring Command";
+ case TRB_SET_DEQ:
+ return "Set TR Dequeue Pointer Command";
+ case TRB_RESET_DEV:
+ return "Reset Device Command";
+ case TRB_FORCE_HEADER:
+ return "Force Header Command";
+ case TRB_CMD_NOOP:
+ return "No-Op Command";
+ case TRB_TRANSFER:
+ return "Transfer Event";
+ case TRB_COMPLETION:
+ return "Command Completion Event";
+ case TRB_PORT_STATUS:
+ return "Port Status Change Event";
+ case TRB_HC_EVENT:
+ return "Device Controller Event";
+ case TRB_MFINDEX_WRAP:
+ return "MFINDEX Wrap Event";
+ case TRB_ENDPOINT_NRDY:
+ return "Endpoint Not ready";
+ case TRB_HALT_ENDPOINT:
+ return "Halt Endpoint";
+ case TRB_FLUSH_ENDPOINT:
+ return "FLush Endpoint";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline const char *cdnsp_ring_type_string(enum cdnsp_ring_type type)
+{
+ switch (type) {
+ case TYPE_CTRL:
+ return "CTRL";
+ case TYPE_ISOC:
+ return "ISOC";
+ case TYPE_BULK:
+ return "BULK";
+ case TYPE_INTR:
+ return "INTR";
+ case TYPE_STREAM:
+ return "STREAM";
+ case TYPE_COMMAND:
+ return "CMD";
+ case TYPE_EVENT:
+ return "EVENT";
+ }
+
+ return "UNKNOWN";
+}
+
+static inline char *cdnsp_slot_state_string(u32 state)
+{
+ switch (state) {
+ case SLOT_STATE_ENABLED:
+ return "enabled/disabled";
+ case SLOT_STATE_DEFAULT:
+ return "default";
+ case SLOT_STATE_ADDRESSED:
+ return "addressed";
+ case SLOT_STATE_CONFIGURED:
+ return "configured";
+ default:
+ return "reserved";
+ }
+}
+
+static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
+ u32 field1, u32 field2, u32 field3)
+{
+ int ep_id = TRB_TO_EP_INDEX(field3) - 1;
+ int type = TRB_FIELD_TO_TYPE(field3);
+ unsigned int ep_num;
+ int ret = 0;
+ u32 temp;
+
+ ep_num = DIV_ROUND_UP(ep_id, 2);
+
+ switch (type) {
+ case TRB_LINK:
+ ret += snprintf(str, size,
+ "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
+ field1, field0, GET_INTR_TARGET(field2),
+ cdnsp_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_TC ? 'T' : 't',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_TRANSFER:
+ case TRB_COMPLETION:
+ case TRB_PORT_STATUS:
+ case TRB_HC_EVENT:
+ ret += snprintf(str, size,
+ "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
+ " len %ld slot %ld flags %c:%c",
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3),
+ cdnsp_trb_type_string(type), field1, field0,
+ cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
+ EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
+ field3 & EVENT_DATA ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_MFINDEX_WRAP:
+ ret += snprintf(str, size, "%s: flags %c",
+ cdnsp_trb_type_string(type),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_SETUP:
+ ret += snprintf(str, size,
+ "type '%s' bRequestType %02x bRequest %02x "
+ "wValue %02x%02x wIndex %02x%02x wLength %d "
+ "length %ld TD size %ld intr %ld Setup ID %ld "
+ "flags %c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field0 & 0xff,
+ (field0 & 0xff00) >> 8,
+ (field0 & 0xff000000) >> 24,
+ (field0 & 0xff0000) >> 16,
+ (field1 & 0xff00) >> 8,
+ field1 & 0xff,
+ (field1 & 0xff000000) >> 16 |
+ (field1 & 0xff0000) >> 16,
+ TRB_LEN(field2), GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ TRB_SETUPID_TO_TYPE(field3),
+ field3 & TRB_IDT ? 'D' : 'd',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_DATA:
+ ret += snprintf(str, size,
+ "type '%s' Buffer %08x%08x length %ld TD size %ld "
+ "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ field3 & TRB_IDT ? 'D' : 'i',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_STATUS:
+ ret += snprintf(str, size,
+ "Buffer %08x%08x length %ld TD size %ld intr"
+ "%ld type '%s' flags %c:%c:%c:%c",
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ cdnsp_trb_type_string(type),
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_NORMAL:
+ case TRB_ISOC:
+ case TRB_EVENT_DATA:
+ case TRB_TR_NOOP:
+ ret += snprintf(str, size,
+ "type '%s' Buffer %08x%08x length %ld "
+ "TD size %ld intr %ld "
+ "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
+ cdnsp_trb_type_string(type),
+ field1, field0, TRB_LEN(field2),
+ GET_TD_SIZE(field2),
+ GET_INTR_TARGET(field2),
+ field3 & TRB_BEI ? 'B' : 'b',
+ field3 & TRB_IDT ? 'T' : 't',
+ field3 & TRB_IOC ? 'I' : 'i',
+ field3 & TRB_CHAIN ? 'C' : 'c',
+ field3 & TRB_NO_SNOOP ? 'S' : 's',
+ field3 & TRB_ISP ? 'I' : 'i',
+ field3 & TRB_ENT ? 'E' : 'e',
+ field3 & TRB_CYCLE ? 'C' : 'c',
+ !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
+ break;
+ case TRB_CMD_NOOP:
+ case TRB_ENABLE_SLOT:
+ ret += snprintf(str, size, "%s: flags %c",
+ cdnsp_trb_type_string(type),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_DISABLE_SLOT:
+ ret += snprintf(str, size, "%s: slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_ADDR_DEV:
+ ret += snprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c:%c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_BSR ? 'B' : 'b',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_CONFIG_EP:
+ ret += snprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c:%c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_DC ? 'D' : 'd',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_EVAL_CONTEXT:
+ ret += snprintf(str, size,
+ "%s: ctx %08x%08x slot %ld flags %c",
+ cdnsp_trb_type_string(type), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_RESET_EP:
+ case TRB_HALT_ENDPOINT:
+ case TRB_FLUSH_ENDPOINT:
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), field1, field0,
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_STOP_RING:
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) slot %ld sp %d flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3),
+ TRB_TO_SLOT_ID(field3),
+ TRB_TO_SUSPEND_PORT(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_SET_DEQ:
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), field1, field0,
+ TRB_TO_STREAM_ID(field2),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_RESET_DEV:
+ ret += snprintf(str, size, "%s: slot %ld flags %c",
+ cdnsp_trb_type_string(type),
+ TRB_TO_SLOT_ID(field3),
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ case TRB_ENDPOINT_NRDY:
+ temp = TRB_TO_HOST_STREAM(field2);
+
+ ret += snprintf(str, size,
+ "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
+ cdnsp_trb_type_string(type),
+ ep_num, ep_id % 2 ? "out" : "in",
+ TRB_TO_EP_INDEX(field3), temp,
+ temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
+ temp == STREAM_REJECTED ? "(REJECTED)" : "",
+ TRB_TO_DEV_STREAM(field0),
+ field3 & TRB_STAT ? 'S' : 's',
+ field3 & TRB_CYCLE ? 'C' : 'c');
+ break;
+ default:
+ ret += snprintf(str, size,
+ "type '%s' -> raw %08x %08x %08x %08x",
+ cdnsp_trb_type_string(type),
+ field0, field1, field2, field3);
+ }
+
+ return str;
+}
+
+static inline const char *cdnsp_decode_slot_context(u32 info, u32 info2,
+ u32 int_target, u32 state)
+{
+ static char str[1024];
+ int ret = 0;
+ u32 speed;
+ char *s;
+
+ speed = info & DEV_SPEED;
+
+ switch (speed) {
+ case SLOT_SPEED_FS:
+ s = "full-speed";
+ break;
+ case SLOT_SPEED_HS:
+ s = "high-speed";
+ break;
+ case SLOT_SPEED_SS:
+ s = "super-speed";
+ break;
+ case SLOT_SPEED_SSP:
+ s = "super-speed plus";
+ break;
+ default:
+ s = "UNKNOWN speed";
+ }
+
+ ret = sprintf(str, "%s Ctx Entries %d",
+ s, (info & LAST_CTX_MASK) >> 27);
+
+ ret += sprintf(str + ret, " [Intr %ld] Addr %ld State %s",
+ GET_INTR_TARGET(int_target), state & DEV_ADDR_MASK,
+ cdnsp_slot_state_string(GET_SLOT_STATE(state)));
+
+ return str;
+}
+
+static inline const char *cdnsp_portsc_link_state_string(u32 portsc)
+{
+ switch (portsc & PORT_PLS_MASK) {
+ case XDEV_U0:
+ return "U0";
+ case XDEV_U1:
+ return "U1";
+ case XDEV_U2:
+ return "U2";
+ case XDEV_U3:
+ return "U3";
+ case XDEV_DISABLED:
+ return "Disabled";
+ case XDEV_RXDETECT:
+ return "RxDetect";
+ case XDEV_INACTIVE:
+ return "Inactive";
+ case XDEV_POLLING:
+ return "Polling";
+ case XDEV_RECOVERY:
+ return "Recovery";
+ case XDEV_HOT_RESET:
+ return "Hot Reset";
+ case XDEV_COMP_MODE:
+ return "Compliance mode";
+ case XDEV_TEST_MODE:
+ return "Test mode";
+ case XDEV_RESUME:
+ return "Resume";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+static inline const char *cdnsp_decode_portsc(char *str, size_t size,
+ u32 portsc)
+{
+ int ret;
+
+ ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
+ portsc & PORT_POWER ? "Powered" : "Powered-off",
+ portsc & PORT_CONNECT ? "Connected" : "Not-connected",
+ portsc & PORT_PED ? "Enabled" : "Disabled",
+ cdnsp_portsc_link_state_string(portsc),
+ DEV_PORT_SPEED(portsc));
+
+ if (portsc & PORT_RESET)
+ ret += snprintf(str + ret, size - ret, "In-Reset ");
+
+ ret += snprintf(str + ret, size - ret, "Change: ");
+ if (portsc & PORT_CSC)
+ ret += snprintf(str + ret, size - ret, "CSC ");
+ if (portsc & PORT_WRC)
+ ret += snprintf(str + ret, size - ret, "WRC ");
+ if (portsc & PORT_RC)
+ ret += snprintf(str + ret, size - ret, "PRC ");
+ if (portsc & PORT_PLC)
+ ret += snprintf(str + ret, size - ret, "PLC ");
+ if (portsc & PORT_CEC)
+ ret += snprintf(str + ret, size - ret, "CEC ");
+ ret += snprintf(str + ret, size - ret, "Wake: ");
+ if (portsc & PORT_WKCONN_E)
+ ret += snprintf(str + ret, size - ret, "WCE ");
+ if (portsc & PORT_WKDISC_E)
+ ret += snprintf(str + ret, size - ret, "WDE ");
+
+ return str;
+}
+
+static inline const char *cdnsp_ep_state_string(u8 state)
+{
+ switch (state) {
+ case EP_STATE_DISABLED:
+ return "disabled";
+ case EP_STATE_RUNNING:
+ return "running";
+ case EP_STATE_HALTED:
+ return "halted";
+ case EP_STATE_STOPPED:
+ return "stopped";
+ case EP_STATE_ERROR:
+ return "error";
+ default:
+ return "INVALID";
+ }
+}
+
+static inline const char *cdnsp_ep_type_string(u8 type)
+{
+ switch (type) {
+ case ISOC_OUT_EP:
+ return "Isoc OUT";
+ case BULK_OUT_EP:
+ return "Bulk OUT";
+ case INT_OUT_EP:
+ return "Int OUT";
+ case CTRL_EP:
+ return "Ctrl";
+ case ISOC_IN_EP:
+ return "Isoc IN";
+ case BULK_IN_EP:
+ return "Bulk IN";
+ case INT_IN_EP:
+ return "Int IN";
+ default:
+ return "INVALID";
+ }
+}
+
+static inline const char *cdnsp_decode_ep_context(char *str, size_t size,
+ u32 info, u32 info2,
+ u64 deq, u32 tx_info)
+{
+ u8 max_pstr, ep_state, interval, ep_type, burst, cerr, mult;
+ bool lsa, hid;
+ u16 maxp, avg;
+ u32 esit;
+ int ret;
+
+ esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+ CTX_TO_MAX_ESIT_PAYLOAD_LO(tx_info);
+
+ ep_state = info & EP_STATE_MASK;
+ max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
+ interval = CTX_TO_EP_INTERVAL(info);
+ mult = CTX_TO_EP_MULT(info) + 1;
+ lsa = !!(info & EP_HAS_LSA);
+
+ cerr = (info2 & (3 << 1)) >> 1;
+ ep_type = CTX_TO_EP_TYPE(info2);
+ hid = !!(info2 & (1 << 7));
+ burst = CTX_TO_MAX_BURST(info2);
+ maxp = MAX_PACKET_DECODED(info2);
+
+ avg = EP_AVG_TRB_LENGTH(tx_info);
+
+ ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s",
+ cdnsp_ep_state_string(ep_state), mult,
+ max_pstr, lsa ? "LSA " : "");
+
+ ret += snprintf(str + ret, size - ret,
+ "interval %d us max ESIT payload %d CErr %d ",
+ (1 << interval) * 125, esit, cerr);
+
+ ret += snprintf(str + ret, size - ret,
+ "Type %s %sburst %d maxp %d deq %016llx ",
+ cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
+ burst, maxp, deq);
+
+ ret += snprintf(str + ret, size - ret, "avg trb len %d", avg);
+
+ return str;
+}
+
+#endif /*__LINUX_CDNSP_DEBUG*/
diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
new file mode 100644
index 000000000000..9b8325f82499
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-ep0.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/list.h>
+
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+static void cdnsp_ep0_stall(struct cdnsp_device *pdev)
+{
+ struct cdnsp_request *preq;
+ struct cdnsp_ep *pep;
+
+ pep = &pdev->eps[0];
+ preq = next_request(&pep->pending_list);
+
+ if (pdev->three_stage_setup) {
+ cdnsp_halt_endpoint(pdev, pep, true);
+
+ if (preq)
+ cdnsp_gadget_giveback(pep, preq, -ECONNRESET);
+ } else {
+ pep->ep_state |= EP0_HALTED_STATUS;
+
+ if (preq)
+ list_del(&preq->list);
+
+ cdnsp_status_stage(pdev);
+ }
+}
+
+static int cdnsp_ep0_delegate_req(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ spin_unlock(&pdev->lock);
+ ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl);
+ spin_lock(&pdev->lock);
+
+ return ret;
+}
+
+static int cdnsp_ep0_set_config(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = pdev->gadget.state;
+ u32 cfg;
+ int ret;
+
+ cfg = le16_to_cpu(ctrl->wValue);
+
+ switch (state) {
+ case USB_STATE_ADDRESS:
+ trace_cdnsp_ep0_set_config("from Address state");
+ break;
+ case USB_STATE_CONFIGURED:
+ trace_cdnsp_ep0_set_config("from Configured state");
+ break;
+ default:
+ dev_err(pdev->dev, "Set Configuration - bad device state\n");
+ return -EINVAL;
+ }
+
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ if (ret)
+ return ret;
+
+ if (!cfg)
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
+
+ return 0;
+}
+
+static int cdnsp_ep0_set_address(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = pdev->gadget.state;
+ struct cdnsp_slot_ctx *slot_ctx;
+ unsigned int slot_state;
+ int ret;
+ u32 addr;
+
+ addr = le16_to_cpu(ctrl->wValue);
+
+ if (addr > 127) {
+ dev_err(pdev->dev, "Invalid device address %d\n", addr);
+ return -EINVAL;
+ }
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+
+ if (state == USB_STATE_CONFIGURED) {
+ dev_err(pdev->dev, "Can't Set Address from Configured State\n");
+ return -EINVAL;
+ }
+
+ pdev->device_address = le16_to_cpu(ctrl->wValue);
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+ if (slot_state == SLOT_STATE_ADDRESSED)
+ cdnsp_reset_device(pdev);
+
+ /*set device address*/
+ ret = cdnsp_setup_device(pdev, SETUP_CONTEXT_ADDRESS);
+ if (ret)
+ return ret;
+
+ if (addr)
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
+
+ return 0;
+}
+
+int cdnsp_status_stage(struct cdnsp_device *pdev)
+{
+ pdev->ep0_stage = CDNSP_STATUS_STAGE;
+ pdev->ep0_preq.request.length = 0;
+
+ return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static int cdnsp_w_index_to_ep_index(u16 wIndex)
+{
+ if (!(wIndex & USB_ENDPOINT_NUMBER_MASK))
+ return 0;
+
+ return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) +
+ (wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1;
+}
+
+static int cdnsp_ep0_handle_status(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct cdnsp_ep *pep;
+ __le16 *response;
+ int ep_sts = 0;
+ u16 status = 0;
+ u32 recipient;
+
+ recipient = ctrl->bRequestType & USB_RECIP_MASK;
+
+ switch (recipient) {
+ case USB_RECIP_DEVICE:
+ status = pdev->gadget.is_selfpowered;
+ status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+
+ if (pdev->gadget.speed >= USB_SPEED_SUPER) {
+ status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED;
+ status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED;
+ }
+ break;
+ case USB_RECIP_INTERFACE:
+ /*
+ * Function Remote Wake Capable D0
+ * Function Remote Wakeup D1
+ */
+ return cdnsp_ep0_delegate_req(pdev, ctrl);
+ case USB_RECIP_ENDPOINT:
+ ep_sts = cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex));
+ pep = &pdev->eps[ep_sts];
+ ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
+
+ /* check if endpoint is stalled */
+ if (ep_sts == EP_STATE_HALTED)
+ status = BIT(USB_ENDPOINT_HALT);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ response = (__le16 *)pdev->setup_buf;
+ *response = cpu_to_le16(status);
+
+ pdev->ep0_preq.request.length = sizeof(*response);
+ pdev->ep0_preq.request.buf = pdev->setup_buf;
+
+ return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static void cdnsp_enter_test_mode(struct cdnsp_device *pdev)
+{
+ u32 temp;
+
+ temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28);
+ temp |= PORT_TEST_MODE(pdev->test_mode);
+ writel(temp, &pdev->active_port->regs->portpmsc);
+}
+
+static int cdnsp_ep0_handle_feature_device(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ enum usb_device_state state;
+ enum usb_device_speed speed;
+ u16 tmode;
+
+ state = pdev->gadget.state;
+ speed = pdev->gadget.speed;
+
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ pdev->may_wakeup = !!set;
+ trace_cdnsp_may_wakeup(set);
+ break;
+ case USB_DEVICE_U1_ENABLE:
+ if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
+ return -EINVAL;
+
+ pdev->u1_allowed = !!set;
+ trace_cdnsp_u1(set);
+ break;
+ case USB_DEVICE_U2_ENABLE:
+ if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
+ return -EINVAL;
+
+ pdev->u2_allowed = !!set;
+ trace_cdnsp_u2(set);
+ break;
+ case USB_DEVICE_LTM_ENABLE:
+ return -EINVAL;
+ case USB_DEVICE_TEST_MODE:
+ if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
+ return -EINVAL;
+
+ tmode = le16_to_cpu(ctrl->wIndex);
+
+ if (!set || (tmode & 0xff) != 0)
+ return -EINVAL;
+
+ tmode = tmode >> 8;
+
+ if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J)
+ return -EINVAL;
+
+ pdev->test_mode = tmode;
+
+ /*
+ * Test mode must be set before Status Stage but controller
+ * will start testing sequence after Status Stage.
+ */
+ cdnsp_enter_test_mode(pdev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdnsp_ep0_handle_feature_intf(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ u16 wValue, wIndex;
+ int ret;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ wIndex = le16_to_cpu(ctrl->wIndex);
+
+ switch (wValue) {
+ case USB_INTRF_FUNC_SUSPEND:
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ if (ret)
+ return ret;
+
+ /*
+ * Remote wakeup is enabled when any function within a device
+ * is enabled for function remote wakeup.
+ */
+ if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
+ pdev->may_wakeup++;
+ else
+ if (pdev->may_wakeup > 0)
+ pdev->may_wakeup--;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdnsp_ep0_handle_feature_endpoint(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ struct cdnsp_ep *pep;
+ u16 wValue;
+
+ wValue = le16_to_cpu(ctrl->wValue);
+ pep = &pdev->eps[cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))];
+
+ switch (wValue) {
+ case USB_ENDPOINT_HALT:
+ if (!set && (pep->ep_state & EP_WEDGE)) {
+ /* Resets Sequence Number */
+ cdnsp_halt_endpoint(pdev, pep, 0);
+ cdnsp_halt_endpoint(pdev, pep, 1);
+ break;
+ }
+
+ return cdnsp_halt_endpoint(pdev, pep, set);
+ default:
+ dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdnsp_ep0_handle_feature(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl,
+ int set)
+{
+ switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ return cdnsp_ep0_handle_feature_device(pdev, ctrl, set);
+ case USB_RECIP_INTERFACE:
+ return cdnsp_ep0_handle_feature_intf(pdev, ctrl, set);
+ case USB_RECIP_ENDPOINT:
+ return cdnsp_ep0_handle_feature_endpoint(pdev, ctrl, set);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int cdnsp_ep0_set_sel(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = pdev->gadget.state;
+ u16 wLength;
+
+ if (state == USB_STATE_DEFAULT)
+ return -EINVAL;
+
+ wLength = le16_to_cpu(ctrl->wLength);
+
+ if (wLength != 6) {
+ dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n",
+ wLength);
+ return -EINVAL;
+ }
+
+ /*
+ * To handle Set SEL we need to receive 6 bytes from Host. So let's
+ * queue a usb_request for 6 bytes.
+ */
+ pdev->ep0_preq.request.length = 6;
+ pdev->ep0_preq.request.buf = pdev->setup_buf;
+
+ return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static int cdnsp_ep0_set_isoch_delay(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength))
+ return -EINVAL;
+
+ pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue);
+
+ return 0;
+}
+
+static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+ struct usb_ctrlrequest *ctrl)
+{
+ int ret;
+
+ switch (ctrl->bRequest) {
+ case USB_REQ_GET_STATUS:
+ ret = cdnsp_ep0_handle_status(pdev, ctrl);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ ret = cdnsp_ep0_handle_feature(pdev, ctrl, 0);
+ break;
+ case USB_REQ_SET_FEATURE:
+ ret = cdnsp_ep0_handle_feature(pdev, ctrl, 1);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ ret = cdnsp_ep0_set_address(pdev, ctrl);
+ break;
+ case USB_REQ_SET_CONFIGURATION:
+ ret = cdnsp_ep0_set_config(pdev, ctrl);
+ break;
+ case USB_REQ_SET_SEL:
+ ret = cdnsp_ep0_set_sel(pdev, ctrl);
+ break;
+ case USB_REQ_SET_ISOCH_DELAY:
+ ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
+ break;
+ case USB_REQ_SET_INTERFACE:
+ /*
+ * Add request into pending list to block sending status stage
+ * by libcomposite.
+ */
+ list_add_tail(&pdev->ep0_preq.list,
+ &pdev->ep0_preq.pep->pending_list);
+
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ if (ret == -EBUSY)
+ ret = 0;
+
+ list_del(&pdev->ep0_preq.list);
+ break;
+ default:
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+ break;
+ }
+
+ return ret;
+}
+
+void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+{
+ struct usb_ctrlrequest *ctrl = &pdev->setup;
+ int ret = 0;
+ u16 len;
+
+ trace_cdnsp_ctrl_req(ctrl);
+
+ if (!pdev->gadget_driver)
+ goto out;
+
+ if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
+ dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Restore the ep0 to Stopped/Running state. */
+ if (pdev->eps[0].ep_state & EP_HALTED) {
+ trace_cdnsp_ep0_halted("Restore to normal state");
+ cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
+ }
+
+ /*
+ * Finishing previous SETUP transfer by removing request from
+ * list and informing upper layer
+ */
+ if (!list_empty(&pdev->eps[0].pending_list)) {
+ struct cdnsp_request *req;
+
+ trace_cdnsp_ep0_request("Remove previous");
+ req = next_request(&pdev->eps[0].pending_list);
+ cdnsp_ep_dequeue(&pdev->eps[0], req);
+ }
+
+ len = le16_to_cpu(ctrl->wLength);
+ if (!len) {
+ pdev->three_stage_setup = false;
+ pdev->ep0_expect_in = false;
+ } else {
+ pdev->three_stage_setup = true;
+ pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
+ }
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+ ret = cdnsp_ep0_std_request(pdev, ctrl);
+ else
+ ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+
+ if (!len)
+ pdev->ep0_stage = CDNSP_STATUS_STAGE;
+
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ trace_cdnsp_ep0_status_stage("delayed");
+ return;
+ }
+out:
+ if (ret < 0)
+ cdnsp_ep0_stall(pdev);
+ else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
+ cdnsp_status_stage(pdev);
+}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
new file mode 100644
index 000000000000..f2ebbacd932e
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -0,0 +1,2009 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/dmi.h>
+
+#include "core.h"
+#include "gadget-export.h"
+#include "drd.h"
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+unsigned int cdnsp_port_speed(unsigned int port_status)
+{
+ /*Detect gadget speed based on PORTSC register*/
+ if (DEV_SUPERSPEEDPLUS(port_status))
+ return USB_SPEED_SUPER_PLUS;
+ else if (DEV_SUPERSPEED(port_status))
+ return USB_SPEED_SUPER;
+ else if (DEV_HIGHSPEED(port_status))
+ return USB_SPEED_HIGH;
+ else if (DEV_FULLSPEED(port_status))
+ return USB_SPEED_FULL;
+
+ /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
+ return USB_SPEED_UNKNOWN;
+}
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+u32 cdnsp_port_state_to_neutral(u32 state)
+{
+ /* Save read-only status and port state. */
+ return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ * @base: PCI MMIO registers base address.
+ * @start: Address at which to start looking, (0 or HCC_PARAMS to start at
+ * beginning of list)
+ * @id: Extended capability ID to search for.
+ *
+ * Returns the offset of the next matching extended capability structure.
+ * Some capabilities can occur several times,
+ * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
+ */
+int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
+{
+ u32 offset = start;
+ u32 next;
+ u32 val;
+
+ if (!start || start == HCC_PARAMS_OFFSET) {
+ val = readl(base + HCC_PARAMS_OFFSET);
+ if (val == ~0)
+ return 0;
+
+ offset = HCC_EXT_CAPS(val) << 2;
+ if (!offset)
+ return 0;
+ };
+
+ do {
+ val = readl(base + offset);
+ if (val == ~0)
+ return 0;
+
+ if (EXT_CAPS_ID(val) == id && offset != start)
+ return offset;
+
+ next = EXT_CAPS_NEXT(val);
+ offset += next << 2;
+ } while (next);
+
+ return 0;
+}
+
+void cdnsp_set_link_state(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs,
+ u32 link_state)
+{
+ int port_num = 0xFF;
+ u32 temp;
+
+ temp = readl(port_regs);
+ temp = cdnsp_port_state_to_neutral(temp);
+ temp |= PORT_WKCONN_E | PORT_WKDISC_E;
+ writel(temp, port_regs);
+
+ temp &= ~PORT_PLS_MASK;
+ temp |= PORT_LINK_STROBE | link_state;
+
+ if (pdev->active_port)
+ port_num = pdev->active_port->port_num;
+
+ trace_cdnsp_handle_port_status(port_num, readl(port_regs));
+ writel(temp, port_regs);
+ trace_cdnsp_link_state_changed(port_num, readl(port_regs));
+}
+
+static void cdnsp_disable_port(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs)
+{
+ u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
+
+ writel(temp | PORT_PED, port_regs);
+}
+
+static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs)
+{
+ u32 portsc = readl(port_regs);
+
+ writel(cdnsp_port_state_to_neutral(portsc) |
+ (portsc & PORT_CHANGE_BITS), port_regs);
+}
+
+static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+{
+ __le32 __iomem *reg;
+ void __iomem *base;
+ u32 offset = 0;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+ reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
+
+ bit = readl(reg) | bit;
+ writel(bit, reg);
+}
+
+static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+{
+ __le32 __iomem *reg;
+ void __iomem *base;
+ u32 offset = 0;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+ reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
+
+ bit = readl(reg) & ~bit;
+ writel(bit, reg);
+}
+
+/*
+ * Disable interrupts and begin the controller halting process.
+ */
+static void cdnsp_quiesce(struct cdnsp_device *pdev)
+{
+ u32 halted;
+ u32 mask;
+ u32 cmd;
+
+ mask = ~(u32)(CDNSP_IRQS);
+
+ halted = readl(&pdev->op_regs->status) & STS_HALT;
+ if (!halted)
+ mask &= ~(CMD_R_S | CMD_DEVEN);
+
+ cmd = readl(&pdev->op_regs->command);
+ cmd &= mask;
+ writel(cmd, &pdev->op_regs->command);
+}
+
+/*
+ * Force controller into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * Controller will complete any current and actively pipelined transactions, and
+ * should halt within 16 ms of the run/stop bit being cleared.
+ * Read controller Halted bit in the status register to see when the
+ * controller is finished.
+ */
+int cdnsp_halt(struct cdnsp_device *pdev)
+{
+ int ret;
+ u32 val;
+
+ cdnsp_quiesce(pdev);
+
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
+ val & STS_HALT, 1,
+ CDNSP_MAX_HALT_USEC);
+ if (ret) {
+ dev_err(pdev->dev, "ERROR: Device halt failed\n");
+ return ret;
+ }
+
+ pdev->cdnsp_state |= CDNSP_STATE_HALTED;
+
+ return 0;
+}
+
+/*
+ * device controller died, register read returns 0xffffffff, or command never
+ * ends.
+ */
+void cdnsp_died(struct cdnsp_device *pdev)
+{
+ dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
+ pdev->cdnsp_state |= CDNSP_STATE_DYING;
+ cdnsp_halt(pdev);
+}
+
+/*
+ * Set the run bit and wait for the device to be running.
+ */
+static int cdnsp_start(struct cdnsp_device *pdev)
+{
+ u32 temp;
+ int ret;
+
+ temp = readl(&pdev->op_regs->command);
+ temp |= (CMD_R_S | CMD_DEVEN);
+ writel(temp, &pdev->op_regs->command);
+
+ pdev->cdnsp_state = 0;
+
+ /*
+ * Wait for the STS_HALT Status bit to be 0 to indicate the device is
+ * running.
+ */
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
+ !(temp & STS_HALT), 1,
+ CDNSP_MAX_HALT_USEC);
+ if (ret) {
+ pdev->cdnsp_state = CDNSP_STATE_DYING;
+ dev_err(pdev->dev, "ERROR: Controller run failed\n");
+ }
+
+ return ret;
+}
+
+/*
+ * Reset a halted controller.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int cdnsp_reset(struct cdnsp_device *pdev)
+{
+ u32 command;
+ u32 temp;
+ int ret;
+
+ temp = readl(&pdev->op_regs->status);
+
+ if (temp == ~(u32)0) {
+ dev_err(pdev->dev, "Device not accessible, reset failed.\n");
+ return -ENODEV;
+ }
+
+ if ((temp & STS_HALT) == 0) {
+ dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
+ return -EINVAL;
+ }
+
+ command = readl(&pdev->op_regs->command);
+ command |= CMD_RESET;
+ writel(command, &pdev->op_regs->command);
+
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
+ !(temp & CMD_RESET), 1,
+ 10 * 1000);
+ if (ret) {
+ dev_err(pdev->dev, "ERROR: Controller reset failed\n");
+ return ret;
+ }
+
+ /*
+ * CDNSP cannot write any doorbells or operational registers other
+ * than status until the "Controller Not Ready" flag is cleared.
+ */
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
+ !(temp & STS_CNR), 1,
+ 10 * 1000);
+
+ if (ret) {
+ dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
+ return ret;
+ }
+
+ dev_dbg(pdev->dev, "Controller ready to work");
+
+ return ret;
+}
+
+/*
+ * cdnsp_get_endpoint_index - Find the index for an endpoint given its
+ * descriptor.Use the return value to right shift 1 for the bitmask.
+ *
+ * Index = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+static unsigned int
+ cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
+{
+ unsigned int index = (unsigned int)usb_endpoint_num(desc);
+
+ if (usb_endpoint_xfer_control(desc))
+ return index * 2;
+
+ return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+}
+
+/*
+ * Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+static unsigned int
+ cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
+{
+ return 1 << (cdnsp_get_endpoint_index(desc) + 1);
+}
+
+int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
+{
+ struct cdnsp_device *pdev = pep->pdev;
+ struct usb_request *request;
+ int ret;
+
+ if (preq->epnum == 0 && !list_empty(&pep->pending_list)) {
+ trace_cdnsp_request_enqueue_busy(preq);
+ return -EBUSY;
+ }
+
+ request = &preq->request;
+ request->actual = 0;
+ request->status = -EINPROGRESS;
+ preq->direction = pep->direction;
+ preq->epnum = pep->number;
+ preq->td.drbl = 0;
+
+ ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
+ if (ret) {
+ trace_cdnsp_request_enqueue_error(preq);
+ return ret;
+ }
+
+ list_add_tail(&preq->list, &pep->pending_list);
+
+ trace_cdnsp_request_enqueue(preq);
+
+ switch (usb_endpoint_type(pep->endpoint.desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ ret = cdnsp_queue_ctrl_tx(pdev, preq);
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ ret = cdnsp_queue_bulk_tx(pdev, preq);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
+ }
+
+ if (ret)
+ goto unmap;
+
+ return 0;
+
+unmap:
+ usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
+ pep->direction);
+ list_del(&preq->list);
+ trace_cdnsp_request_enqueue_error(preq);
+
+ return ret;
+}
+
+/*
+ * Remove the request's TD from the endpoint ring. This may cause the
+ * controller to stop USB transfers, potentially stopping in the middle of a
+ * TRB buffer. The controller should pick up where it left off in the TD,
+ * unless a Set Transfer Ring Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled request will be "removed"
+ * from the ring. Since the ring is a contiguous structure, they can't be
+ * physically removed. Instead, there are two options:
+ *
+ * 1) If the controller is in the middle of processing the request to be
+ * canceled, we simply move the ring's dequeue pointer past those TRBs
+ * using the Set Transfer Ring Dequeue Pointer command. This will be
+ * the common case, when drivers timeout on the last submitted request
+ * and attempt to cancel.
+ *
+ * 2) If the controller is in the middle of a different TD, we turn the TRBs
+ * into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
+ * The controller will need to invalidate the any TRBs it has cached after
+ * the stop endpoint command.
+ *
+ * 3) The TD may have completed by the time the Stop Endpoint Command
+ * completes, so software needs to handle that case too.
+ *
+ */
+int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
+{
+ struct cdnsp_device *pdev = pep->pdev;
+ int ret;
+
+ trace_cdnsp_request_dequeue(preq);
+
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
+ ret = cdnsp_cmd_stop_ep(pdev, pep);
+ if (ret)
+ return ret;
+ }
+
+ return cdnsp_remove_request(pdev, preq, pep);
+}
+
+static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_slot_ctx *slot_ctx;
+ struct cdnsp_ep_ctx *ep_ctx;
+ int i;
+
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+ /*
+ * When a device's add flag and drop flag are zero, any subsequent
+ * configure endpoint command will leave that endpoint's state
+ * untouched. Make sure we don't leave any old state in the input
+ * endpoint contexts.
+ */
+ ctrl_ctx->drop_flags = 0;
+ ctrl_ctx->add_flags = 0;
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+
+ /* Endpoint 0 is always valid */
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+ for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
+ ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
+ ep_ctx->ep_info = 0;
+ ep_ctx->ep_info2 = 0;
+ ep_ctx->deq = 0;
+ ep_ctx->tx_info = 0;
+ }
+}
+
+/* Issue a configure endpoint command and wait for it to finish. */
+static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
+{
+ int ret;
+
+ cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ if (ret) {
+ dev_err(pdev->dev,
+ "ERR: unexpected command completion code 0x%x.\n", ret);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_segment *segment;
+ union cdnsp_trb *event;
+ u32 cycle_state;
+ u32 data;
+
+ event = pdev->event_ring->dequeue;
+ segment = pdev->event_ring->deq_seg;
+ cycle_state = pdev->event_ring->cycle_state;
+
+ while (1) {
+ data = le32_to_cpu(event->trans_event.flags);
+
+ /* Check the owner of the TRB. */
+ if ((data & TRB_CYCLE) != cycle_state)
+ break;
+
+ if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
+ TRB_TO_EP_ID(data) == (pep->idx + 1)) {
+ data |= TRB_EVENT_INVALIDATE;
+ event->trans_event.flags = cpu_to_le32(data);
+ }
+
+ if (cdnsp_last_trb_on_seg(segment, event)) {
+ cycle_state ^= 1;
+ segment = pdev->event_ring->deq_seg->next;
+ event = segment->trbs;
+ } else {
+ event++;
+ }
+ }
+}
+
+int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
+{
+ struct cdnsp_segment *event_deq_seg;
+ union cdnsp_trb *cmd_trb;
+ dma_addr_t cmd_deq_dma;
+ union cdnsp_trb *event;
+ u32 cycle_state;
+ int ret, val;
+ u64 cmd_dma;
+ u32 flags;
+
+ cmd_trb = pdev->cmd.command_trb;
+ pdev->cmd.status = 0;
+
+ trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic);
+
+ ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
+ !CMD_RING_BUSY(val), 1,
+ CDNSP_CMD_TIMEOUT);
+ if (ret) {
+ dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
+ trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic);
+ pdev->cdnsp_state = CDNSP_STATE_DYING;
+ return -ETIMEDOUT;
+ }
+
+ event = pdev->event_ring->dequeue;
+ event_deq_seg = pdev->event_ring->deq_seg;
+ cycle_state = pdev->event_ring->cycle_state;
+
+ cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
+ if (!cmd_deq_dma)
+ return -EINVAL;
+
+ while (1) {
+ flags = le32_to_cpu(event->event_cmd.flags);
+
+ /* Check the owner of the TRB. */
+ if ((flags & TRB_CYCLE) != cycle_state)
+ return -EINVAL;
+
+ cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
+
+ /*
+ * Check whether the completion event is for last queued
+ * command.
+ */
+ if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
+ cmd_dma != (u64)cmd_deq_dma) {
+ if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
+ event++;
+ continue;
+ }
+
+ if (cdnsp_last_trb_on_ring(pdev->event_ring,
+ event_deq_seg, event))
+ cycle_state ^= 1;
+
+ event_deq_seg = event_deq_seg->next;
+ event = event_deq_seg->trbs;
+ continue;
+ }
+
+ trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic);
+
+ pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
+ if (pdev->cmd.status == COMP_SUCCESS)
+ return 0;
+
+ return -pdev->cmd.status;
+ }
+}
+
+int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ int value)
+{
+ int ret;
+
+ trace_cdnsp_ep_halt(value ? "Set" : "Clear");
+
+ if (value) {
+ ret = cdnsp_cmd_stop_ep(pdev, pep);
+ if (ret)
+ return ret;
+
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
+ cdnsp_queue_halt_endpoint(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ }
+
+ pep->ep_state |= EP_HALTED;
+ } else {
+ /*
+ * In device mode driver can call reset endpoint command
+ * from any endpoint state.
+ */
+ cdnsp_queue_reset_ep(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx);
+
+ if (ret)
+ return ret;
+
+ pep->ep_state &= ~EP_HALTED;
+
+ if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+
+ pep->ep_state &= ~EP_WEDGE;
+ }
+
+ return 0;
+}
+
+static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_slot_ctx *slot_ctx;
+ int ret = 0;
+ u32 ep_sts;
+ int i;
+
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+ /* Don't issue the command if there's no endpoints to update. */
+ if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
+ return 0;
+
+ ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
+ ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+ /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
+ __le32 le32 = cpu_to_le32(BIT(i));
+
+ if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
+ (ctrl_ctx->add_flags & le32) || i == 1) {
+ slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
+ break;
+ }
+ }
+
+ ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
+
+ if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
+ ep_sts == EP_STATE_DISABLED) ||
+ (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
+ ret = cdnsp_configure_endpoint(pdev);
+
+ trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx));
+ trace_cdnsp_handle_cmd_config_ep(pep->out_ctx);
+
+ cdnsp_zero_in_ctx(pdev);
+
+ return ret;
+}
+
+/*
+ * This submits a Reset Device Command, which will set the device state to 0,
+ * set the device address to 0, and disable all the endpoints except the default
+ * control endpoint. The USB core should come back and call
+ * cdnsp_setup_device(), and then re-set up the configuration.
+ */
+int cdnsp_reset_device(struct cdnsp_device *pdev)
+{
+ struct cdnsp_slot_ctx *slot_ctx;
+ int slot_state;
+ int ret, i;
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ slot_ctx->dev_info = 0;
+ pdev->device_address = 0;
+
+ /* If device is not setup, there is no point in resetting it. */
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+ trace_cdnsp_reset_device(slot_ctx);
+
+ if (slot_state <= SLOT_STATE_DEFAULT &&
+ pdev->eps[0].ep_state & EP_HALTED) {
+ cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
+ }
+
+ /*
+ * During Reset Device command controller shall transition the
+ * endpoint ep0 to the Running State.
+ */
+ pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
+ pdev->eps[0].ep_state |= EP_ENABLED;
+
+ if (slot_state <= SLOT_STATE_DEFAULT)
+ return 0;
+
+ cdnsp_queue_reset_device(pdev);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ /*
+ * After Reset Device command all not default endpoints
+ * are in Disabled state.
+ */
+ for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
+ pdev->eps[i].ep_state |= EP_STOPPED;
+
+ trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
+
+ if (ret)
+ dev_err(pdev->dev, "Reset device failed with error code %d",
+ ret);
+
+ return ret;
+}
+
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field.
+ * Sets the dequeue pointer to the stream context array.
+ */
+static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
+ struct cdnsp_ep_ctx *ep_ctx,
+ struct cdnsp_stream_info *stream_info)
+{
+ u32 max_primary_streams;
+
+ /* MaxPStreams is the number of stream context array entries, not the
+ * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
+ * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
+ */
+ max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
+ ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
+ ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
+ | EP_HAS_LSA);
+ ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
+}
+
+/*
+ * The drivers use this function to prepare a bulk endpoints to use streams.
+ *
+ * Don't allow the call to succeed if endpoint only supports one stream
+ * (which means it doesn't support streams at all).
+ */
+int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
+ unsigned int num_stream_ctxs;
+ int ret;
+
+ if (num_streams == 0)
+ return 0;
+
+ if (num_streams > STREAM_NUM_STREAMS)
+ return -EINVAL;
+
+ /*
+ * Add two to the number of streams requested to account for
+ * stream 0 that is reserved for controller usage and one additional
+ * for TASK SET FULL response.
+ */
+ num_streams += 2;
+
+ /* The stream context array size must be a power of two */
+ num_stream_ctxs = roundup_pow_of_two(num_streams);
+
+ trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams);
+
+ ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
+ if (ret)
+ return ret;
+
+ cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
+
+ pep->ep_state |= EP_HAS_STREAMS;
+ pep->stream_info.td_count = 0;
+ pep->stream_info.first_prime_det = 0;
+
+ /* Subtract 1 for stream 0, which drivers can't use. */
+ return num_streams - 1;
+}
+
+int cdnsp_disable_slot(struct cdnsp_device *pdev)
+{
+ int ret;
+
+ cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ pdev->slot_id = 0;
+ pdev->active_port = NULL;
+
+ trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+ memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
+ memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
+
+ return ret;
+}
+
+int cdnsp_enable_slot(struct cdnsp_device *pdev)
+{
+ struct cdnsp_slot_ctx *slot_ctx;
+ int slot_state;
+ int ret;
+
+ /* If device is not setup, there is no point in resetting it */
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+
+ if (slot_state != SLOT_STATE_DISABLED)
+ return 0;
+
+ cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+ if (ret)
+ goto show_trace;
+
+ pdev->slot_id = 1;
+
+show_trace:
+ trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+ return ret;
+}
+
+/*
+ * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
+ * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
+ */
+int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_slot_ctx *slot_ctx;
+ int dev_state = 0;
+ int ret;
+
+ if (!pdev->slot_id) {
+ trace_cdnsp_slot_id("incorrect");
+ return -EINVAL;
+ }
+
+ if (!pdev->active_port->port_num)
+ return -EINVAL;
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+ dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+
+ if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) {
+ trace_cdnsp_slot_already_in_default(slot_ctx);
+ return 0;
+ }
+
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+ if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
+ ret = cdnsp_setup_addressable_priv_dev(pdev);
+ if (ret)
+ return ret;
+ }
+
+ cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
+
+ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+ ctrl_ctx->drop_flags = 0;
+
+ trace_cdnsp_setup_device_slot(slot_ctx);
+
+ cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+ /* Zero the input context control for later use. */
+ ctrl_ctx->add_flags = 0;
+ ctrl_ctx->drop_flags = 0;
+
+ return ret;
+}
+
+void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
+ struct usb_request *req,
+ int enable)
+{
+ if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
+ return;
+
+ trace_cdnsp_lpm(enable);
+
+ if (enable)
+ writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
+ &pdev->active_port->regs->portpmsc);
+ else
+ writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
+}
+
+static int cdnsp_get_frame(struct cdnsp_device *pdev)
+{
+ return readl(&pdev->run_regs->microframe_index) >> 3;
+}
+
+static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_device *pdev;
+ struct cdnsp_ep *pep;
+ unsigned long flags;
+ u32 added_ctxs;
+ int ret;
+
+ if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
+ !desc->wMaxPacketSize)
+ return -EINVAL;
+
+ pep = to_cdnsp_ep(ep);
+ pdev = pep->pdev;
+
+ if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
+ "%s is already enabled\n", pep->name))
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ added_ctxs = cdnsp_get_endpoint_flag(desc);
+ if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+ dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+
+ if (pdev->gadget.speed == USB_SPEED_FULL) {
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
+ pep->interval = desc->bInterval << 3;
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
+ pep->interval = BIT(desc->bInterval - 1) << 3;
+ }
+
+ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
+ if (pep->interval > BIT(12)) {
+ dev_err(pdev->dev, "bInterval %d not supported\n",
+ desc->bInterval);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
+ }
+
+ ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
+ if (ret)
+ goto unlock;
+
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+ ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
+ ctrl_ctx->drop_flags = 0;
+
+ ret = cdnsp_update_eps_configuration(pdev, pep);
+ if (ret) {
+ cdnsp_free_endpoint_rings(pdev, pep);
+ goto unlock;
+ }
+
+ pep->ep_state |= EP_ENABLED;
+ pep->ep_state &= ~EP_STOPPED;
+
+unlock:
+ trace_cdnsp_ep_enable_end(pep, 0);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
+{
+ struct cdnsp_input_control_ctx *ctrl_ctx;
+ struct cdnsp_request *preq;
+ struct cdnsp_device *pdev;
+ struct cdnsp_ep *pep;
+ unsigned long flags;
+ u32 drop_flag;
+ int ret = 0;
+
+ if (!ep)
+ return -EINVAL;
+
+ pep = to_cdnsp_ep(ep);
+ pdev = pep->pdev;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ if (!(pep->ep_state & EP_ENABLED)) {
+ dev_err(pdev->dev, "%s is already disabled\n", pep->name);
+ ret = -EINVAL;
+ goto finish;
+ }
+
+ cdnsp_cmd_stop_ep(pdev, pep);
+ pep->ep_state |= EP_DIS_IN_RROGRESS;
+ cdnsp_cmd_flush_ep(pdev, pep);
+
+ /* Remove all queued USB requests. */
+ while (!list_empty(&pep->pending_list)) {
+ preq = next_request(&pep->pending_list);
+ cdnsp_ep_dequeue(pep, preq);
+ }
+
+ cdnsp_invalidate_ep_events(pdev, pep);
+
+ pep->ep_state &= ~EP_DIS_IN_RROGRESS;
+ drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
+ ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+ ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
+ ctrl_ctx->add_flags = 0;
+
+ cdnsp_endpoint_zero(pdev, pep);
+
+ ret = cdnsp_update_eps_configuration(pdev, pep);
+ cdnsp_free_endpoint_rings(pdev, pep);
+
+ pep->ep_state &= ~EP_ENABLED;
+ pep->ep_state |= EP_STOPPED;
+
+finish:
+ trace_cdnsp_ep_disable_end(pep, 0);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_request *preq;
+
+ preq = kzalloc(sizeof(*preq), gfp_flags);
+ if (!preq)
+ return NULL;
+
+ preq->epnum = pep->number;
+ preq->pep = pep;
+
+ trace_cdnsp_alloc_request(preq);
+
+ return &preq->request;
+}
+
+static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdnsp_request *preq = to_cdnsp_request(request);
+
+ trace_cdnsp_free_request(preq);
+ kfree(preq);
+}
+
+static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
+ struct usb_request *request,
+ gfp_t gfp_flags)
+{
+ struct cdnsp_request *preq;
+ struct cdnsp_device *pdev;
+ struct cdnsp_ep *pep;
+ unsigned long flags;
+ int ret;
+
+ if (!request || !ep)
+ return -EINVAL;
+
+ pep = to_cdnsp_ep(ep);
+ pdev = pep->pdev;
+
+ if (!(pep->ep_state & EP_ENABLED)) {
+ dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
+ pep->name);
+ return -EINVAL;
+ }
+
+ preq = to_cdnsp_request(request);
+ spin_lock_irqsave(&pdev->lock, flags);
+ ret = cdnsp_ep_enqueue(pep, preq);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
+ struct usb_request *request)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_device *pdev = pep->pdev;
+ unsigned long flags;
+ int ret;
+
+ if (!pep->endpoint.desc) {
+ dev_err(pdev->dev,
+ "%s: can't dequeue to disabled endpoint\n",
+ pep->name);
+ return -ESHUTDOWN;
+ }
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_device *pdev = pep->pdev;
+ struct cdnsp_request *preq;
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+
+ preq = next_request(&pep->pending_list);
+ if (value) {
+ if (preq) {
+ trace_cdnsp_ep_busy_try_halt_again(pep, 0);
+ ret = -EAGAIN;
+ goto done;
+ }
+ }
+
+ ret = cdnsp_halt_endpoint(pdev, pep, value);
+
+done:
+ spin_unlock_irqrestore(&pdev->lock, flags);
+ return ret;
+}
+
+static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+ struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+ struct cdnsp_device *pdev = pep->pdev;
+ unsigned long flags = 0;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ pep->ep_state |= EP_WEDGE;
+ ret = cdnsp_halt_endpoint(pdev, pep, 1);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
+ .enable = cdnsp_gadget_ep_enable,
+ .disable = cdnsp_gadget_ep_disable,
+ .alloc_request = cdnsp_gadget_ep_alloc_request,
+ .free_request = cdnsp_gadget_ep_free_request,
+ .queue = cdnsp_gadget_ep_queue,
+ .dequeue = cdnsp_gadget_ep_dequeue,
+ .set_halt = cdnsp_gadget_ep_set_halt,
+ .set_wedge = cdnsp_gadget_ep_set_wedge,
+};
+
+static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
+ .enable = cdnsp_gadget_ep_enable,
+ .disable = cdnsp_gadget_ep_disable,
+ .alloc_request = cdnsp_gadget_ep_alloc_request,
+ .free_request = cdnsp_gadget_ep_free_request,
+ .queue = cdnsp_gadget_ep_queue,
+ .dequeue = cdnsp_gadget_ep_dequeue,
+ .set_halt = cdnsp_gadget_ep_set_halt,
+ .set_wedge = cdnsp_gadget_ep_set_wedge,
+};
+
+void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
+ struct cdnsp_request *preq,
+ int status)
+{
+ struct cdnsp_device *pdev = pep->pdev;
+
+ list_del(&preq->list);
+
+ if (preq->request.status == -EINPROGRESS)
+ preq->request.status = status;
+
+ usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
+ preq->direction);
+
+ trace_cdnsp_request_giveback(preq);
+
+ if (preq != &pdev->ep0_preq) {
+ spin_unlock(&pdev->lock);
+ usb_gadget_giveback_request(&pep->endpoint, &preq->request);
+ spin_lock(&pdev->lock);
+ }
+}
+
+static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+};
+
+static int cdnsp_run(struct cdnsp_device *pdev,
+ enum usb_device_speed speed)
+{
+ u32 fs_speed = 0;
+ u64 temp_64;
+ u32 temp;
+ int ret;
+
+ temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+ temp_64 &= ~ERST_PTR_MASK;
+ temp = readl(&pdev->ir_set->irq_control);
+ temp &= ~IMOD_INTERVAL_MASK;
+ temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
+ writel(temp, &pdev->ir_set->irq_control);
+
+ temp = readl(&pdev->port3x_regs->mode_addr);
+
+ switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
+ temp |= CFG_3XPORT_SSP_SUPPORT;
+ break;
+ case USB_SPEED_SUPER:
+ temp &= ~CFG_3XPORT_SSP_SUPPORT;
+ break;
+ case USB_SPEED_HIGH:
+ break;
+ case USB_SPEED_FULL:
+ fs_speed = PORT_REG6_FORCE_FS;
+ break;
+ default:
+ dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
+ speed);
+ fallthrough;
+ case USB_SPEED_UNKNOWN:
+ /* Default to superspeed. */
+ speed = USB_SPEED_SUPER;
+ break;
+ }
+
+ if (speed >= USB_SPEED_SUPER) {
+ writel(temp, &pdev->port3x_regs->mode_addr);
+ cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
+ XDEV_RXDETECT);
+ } else {
+ cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
+ }
+
+ cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
+ XDEV_RXDETECT);
+
+ cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+
+ writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
+
+ ret = cdnsp_start(pdev);
+ if (ret) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ temp = readl(&pdev->op_regs->command);
+ temp |= (CMD_INTE);
+ writel(temp, &pdev->op_regs->command);
+
+ temp = readl(&pdev->ir_set->irq_pending);
+ writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
+
+ trace_cdnsp_init("Controller ready to work");
+ return 0;
+err:
+ cdnsp_halt(pdev);
+ return ret;
+}
+
+static int cdnsp_gadget_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ enum usb_device_speed max_speed = driver->max_speed;
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ pdev->gadget_driver = driver;
+
+ /* limit speed if necessary */
+ max_speed = min(driver->max_speed, g->max_speed);
+ ret = cdnsp_run(pdev, max_speed);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+/*
+ * Update Event Ring Dequeue Pointer:
+ * - When all events have finished
+ * - To avoid "Event Ring Full Error" condition
+ */
+void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
+ union cdnsp_trb *event_ring_deq,
+ u8 clear_ehb)
+{
+ u64 temp_64;
+ dma_addr_t deq;
+
+ temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+
+ /* If necessary, update the HW's version of the event ring deq ptr. */
+ if (event_ring_deq != pdev->event_ring->dequeue) {
+ deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+ pdev->event_ring->dequeue);
+ temp_64 &= ERST_PTR_MASK;
+ temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
+ }
+
+ /* Clear the event handler busy flag (RW1C). */
+ if (clear_ehb)
+ temp_64 |= ERST_EHB;
+ else
+ temp_64 &= ~ERST_EHB;
+
+ cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue);
+}
+
+static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
+{
+ struct cdnsp_segment *seg;
+ u64 val_64;
+ int i;
+
+ cdnsp_initialize_ring_info(pdev->cmd_ring);
+
+ seg = pdev->cmd_ring->first_seg;
+ for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
+ memset(seg->trbs, 0,
+ sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
+ seg = seg->next;
+ }
+
+ /* Set the address in the Command Ring Control register. */
+ val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
+ val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
+ (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
+ pdev->cmd_ring->cycle_state;
+ cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
+}
+
+static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
+{
+ struct cdnsp_segment *event_deq_seg;
+ union cdnsp_trb *event_ring_deq;
+ union cdnsp_trb *event;
+ u32 cycle_bit;
+
+ event_ring_deq = pdev->event_ring->dequeue;
+ event_deq_seg = pdev->event_ring->deq_seg;
+ event = pdev->event_ring->dequeue;
+
+ /* Update ring dequeue pointer. */
+ while (1) {
+ cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
+
+ /* Does the controller or driver own the TRB? */
+ if (cycle_bit != pdev->event_ring->cycle_state)
+ break;
+
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+
+ if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
+ event++;
+ continue;
+ }
+
+ if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
+ event))
+ cycle_bit ^= 1;
+
+ event_deq_seg = event_deq_seg->next;
+ event = event_deq_seg->trbs;
+ }
+
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+}
+
+static void cdnsp_stop(struct cdnsp_device *pdev)
+{
+ u32 temp;
+
+ cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
+
+ /* Remove internally queued request for ep0. */
+ if (!list_empty(&pdev->eps[0].pending_list)) {
+ struct cdnsp_request *req;
+
+ req = next_request(&pdev->eps[0].pending_list);
+ if (req == &pdev->ep0_preq)
+ cdnsp_ep_dequeue(&pdev->eps[0], req);
+ }
+
+ cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
+ cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
+ cdnsp_disable_slot(pdev);
+ cdnsp_halt(pdev);
+
+ temp = readl(&pdev->op_regs->status);
+ writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
+ temp = readl(&pdev->ir_set->irq_pending);
+ writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
+
+ cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
+ cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
+
+ /* Clear interrupt line */
+ temp = readl(&pdev->ir_set->irq_pending);
+ temp |= IMAN_IP;
+ writel(temp, &pdev->ir_set->irq_pending);
+
+ cdnsp_consume_all_events(pdev);
+ cdnsp_clear_cmd_ring(pdev);
+
+ trace_cdnsp_exit("Controller stopped.");
+}
+
+/*
+ * Stop controller.
+ * This function is called by the gadget core when the driver is removed.
+ * Disable slot, disable IRQs, and quiesce the controller.
+ */
+static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ cdnsp_stop(pdev);
+ pdev->gadget_driver = NULL;
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_get_frame(struct usb_gadget *g)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+
+ return cdnsp_get_frame(pdev);
+}
+
+static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
+{
+ struct cdnsp_port_regs __iomem *port_regs;
+ u32 portpm, portsc;
+
+ port_regs = pdev->active_port->regs;
+ portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
+
+ /* Remote wakeup feature is not enabled by host. */
+ if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
+ portpm = readl(&port_regs->portpmsc);
+
+ if (!(portpm & PORT_RWE))
+ return;
+ }
+
+ if (portsc == XDEV_U3 && !pdev->may_wakeup)
+ return;
+
+ cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
+
+ pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
+}
+
+static int cdnsp_gadget_wakeup(struct usb_gadget *g)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ __cdnsp_gadget_wakeup(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
+ int is_selfpowered)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ g->is_selfpowered = !!is_selfpowered;
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
+ struct cdns *cdns = dev_get_drvdata(pdev->dev);
+
+ trace_cdnsp_pullup(is_on);
+
+ if (!is_on) {
+ cdnsp_reset_device(pdev);
+ cdns_clear_vbus(cdns);
+ } else {
+ cdns_set_vbus(cdns);
+ }
+ return 0;
+}
+
+static const struct usb_gadget_ops cdnsp_gadget_ops = {
+ .get_frame = cdnsp_gadget_get_frame,
+ .wakeup = cdnsp_gadget_wakeup,
+ .set_selfpowered = cdnsp_gadget_set_selfpowered,
+ .pullup = cdnsp_gadget_pullup,
+ .udc_start = cdnsp_gadget_udc_start,
+ .udc_stop = cdnsp_gadget_udc_stop,
+};
+
+static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ void __iomem *reg = &pdev->cap_regs->hc_capbase;
+ int endpoints;
+
+ reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
+
+ if (!pep->direction) {
+ pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
+ pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
+ pep->buffering = (pep->buffering + 1) / 2;
+ pep->buffering_period = (pep->buffering_period + 1) / 2;
+ return;
+ }
+
+ endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2;
+
+ /* Set to XBUF_TX_TAG_MASK_0 register. */
+ reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
+ /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
+ reg += pep->number * sizeof(u32) * 2;
+
+ pep->buffering = (readl(reg) + 1) / 2;
+ pep->buffering_period = pep->buffering;
+}
+
+static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
+{
+ int max_streams = HCC_MAX_PSA(pdev->hcc_params);
+ struct cdnsp_ep *pep;
+ int i;
+
+ INIT_LIST_HEAD(&pdev->gadget.ep_list);
+
+ if (max_streams < STREAM_LOG_STREAMS) {
+ dev_err(pdev->dev, "Stream size %d not supported\n",
+ max_streams);
+ return -EINVAL;
+ }
+
+ max_streams = STREAM_LOG_STREAMS;
+
+ for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
+ bool direction = !(i & 1); /* Start from OUT endpoint. */
+ u8 epnum = ((i + 1) >> 1);
+
+ if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
+ continue;
+
+ pep = &pdev->eps[i];
+ pep->pdev = pdev;
+ pep->number = epnum;
+ pep->direction = direction; /* 0 for OUT, 1 for IN. */
+
+ /*
+ * Ep0 is bidirectional, so ep0in and ep0out are represented by
+ * pdev->eps[0]
+ */
+ if (epnum == 0) {
+ snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+ epnum, "BiDir");
+
+ pep->idx = 0;
+ usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
+ pep->endpoint.maxburst = 1;
+ pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
+ pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
+ pep->endpoint.comp_desc = NULL;
+ pep->endpoint.caps.type_control = true;
+ pep->endpoint.caps.dir_in = true;
+ pep->endpoint.caps.dir_out = true;
+
+ pdev->ep0_preq.epnum = pep->number;
+ pdev->ep0_preq.pep = pep;
+ pdev->gadget.ep0 = &pep->endpoint;
+ } else {
+ snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+ epnum, (pep->direction) ? "in" : "out");
+
+ pep->idx = (epnum * 2 + (direction ? 1 : 0)) - 1;
+ usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
+
+ pep->endpoint.max_streams = max_streams;
+ pep->endpoint.ops = &cdnsp_gadget_ep_ops;
+ list_add_tail(&pep->endpoint.ep_list,
+ &pdev->gadget.ep_list);
+
+ pep->endpoint.caps.type_iso = true;
+ pep->endpoint.caps.type_bulk = true;
+ pep->endpoint.caps.type_int = true;
+
+ pep->endpoint.caps.dir_in = direction;
+ pep->endpoint.caps.dir_out = !direction;
+ }
+
+ pep->endpoint.name = pep->name;
+ pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
+ pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
+ cdnsp_get_ep_buffering(pdev, pep);
+
+ dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
+ "CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
+ "SupDir IN: %s, OUT: %s\n",
+ pep->name, 1024,
+ (pep->endpoint.caps.type_control) ? "yes" : "no",
+ (pep->endpoint.caps.type_int) ? "yes" : "no",
+ (pep->endpoint.caps.type_bulk) ? "yes" : "no",
+ (pep->endpoint.caps.type_iso) ? "yes" : "no",
+ (pep->endpoint.caps.dir_in) ? "yes" : "no",
+ (pep->endpoint.caps.dir_out) ? "yes" : "no");
+
+ INIT_LIST_HEAD(&pep->pending_list);
+ }
+
+ return 0;
+}
+
+static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
+{
+ struct cdnsp_ep *pep;
+ int i;
+
+ for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
+ pep = &pdev->eps[i];
+ if (pep->number != 0 && pep->out_ctx)
+ list_del(&pep->endpoint.ep_list);
+ }
+}
+
+void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
+{
+ pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
+
+ if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->disconnect(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+
+ pdev->gadget.speed = USB_SPEED_UNKNOWN;
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
+
+ pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
+}
+
+void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
+{
+ if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->suspend(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+}
+
+void cdnsp_resume_gadget(struct cdnsp_device *pdev)
+{
+ if (pdev->gadget_driver && pdev->gadget_driver->resume) {
+ spin_unlock(&pdev->lock);
+ pdev->gadget_driver->resume(&pdev->gadget);
+ spin_lock(&pdev->lock);
+ }
+}
+
+void cdnsp_irq_reset(struct cdnsp_device *pdev)
+{
+ struct cdnsp_port_regs __iomem *port_regs;
+
+ cdnsp_reset_device(pdev);
+
+ port_regs = pdev->active_port->regs;
+ pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
+
+ spin_unlock(&pdev->lock);
+ usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
+ spin_lock(&pdev->lock);
+
+ switch (pdev->gadget.speed) {
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ pdev->gadget.ep0->maxpacket = 512;
+ break;
+ case USB_SPEED_HIGH:
+ case USB_SPEED_FULL:
+ cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ pdev->gadget.ep0->maxpacket = 64;
+ break;
+ default:
+ /* Low speed is not supported. */
+ dev_err(pdev->dev, "Unknown device speed\n");
+ break;
+ }
+
+ cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
+ cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
+}
+
+static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
+{
+ void __iomem *reg = &pdev->cap_regs->hc_capbase;
+
+ reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
+ pdev->rev_cap = reg;
+
+ dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
+ readl(&pdev->rev_cap->ctrl_revision),
+ readl(&pdev->rev_cap->rtl_revision),
+ readl(&pdev->rev_cap->ep_supported),
+ readl(&pdev->rev_cap->rx_buff_size),
+ readl(&pdev->rev_cap->tx_buff_size));
+}
+
+static int cdnsp_gen_setup(struct cdnsp_device *pdev)
+{
+ int ret;
+ u32 reg;
+
+ pdev->cap_regs = pdev->regs;
+ pdev->op_regs = pdev->regs +
+ HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
+ pdev->run_regs = pdev->regs +
+ (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
+
+ /* Cache read-only capability registers */
+ pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
+ pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
+ pdev->hci_version = HC_VERSION(pdev->hcc_params);
+ pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
+
+ cdnsp_get_rev_cap(pdev);
+
+ /* Make sure the Device Controller is halted. */
+ ret = cdnsp_halt(pdev);
+ if (ret)
+ return ret;
+
+ /* Reset the internal controller memory state and registers. */
+ ret = cdnsp_reset(pdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Set dma_mask and coherent_dma_mask to 64-bits,
+ * if controller supports 64-bit addressing.
+ */
+ if (HCC_64BIT_ADDR(pdev->hcc_params) &&
+ !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
+ dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
+ } else {
+ /*
+ * This is to avoid error in cases where a 32-bit USB
+ * controller is used on a 64-bit capable system.
+ */
+ ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
+ dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
+ }
+
+ spin_lock_init(&pdev->lock);
+
+ ret = cdnsp_mem_init(pdev);
+ if (ret)
+ return ret;
+
+ /*
+ * Software workaround for U1: after transition
+ * to U1 the controller starts gating clock, and in some cases,
+ * it causes that controller stack.
+ */
+ reg = readl(&pdev->port3x_regs->mode_2);
+ reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
+ writel(reg, &pdev->port3x_regs->mode_2);
+
+ return 0;
+}
+
+static int __cdnsp_gadget_init(struct cdns *cdns)
+{
+ struct cdnsp_device *pdev;
+ u32 max_speed;
+ int ret = -ENOMEM;
+
+ cdns_drd_gadget_on(cdns);
+
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
+ pm_runtime_get_sync(cdns->dev);
+
+ cdns->gadget_dev = pdev;
+ pdev->dev = cdns->dev;
+ pdev->regs = cdns->dev_regs;
+ max_speed = usb_get_maximum_speed(cdns->dev);
+
+ switch (max_speed) {
+ case USB_SPEED_FULL:
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
+ break;
+ default:
+ dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
+ fallthrough;
+ case USB_SPEED_UNKNOWN:
+ /* Default to SSP */
+ max_speed = USB_SPEED_SUPER_PLUS;
+ break;
+ }
+
+ pdev->gadget.ops = &cdnsp_gadget_ops;
+ pdev->gadget.name = "cdnsp-gadget";
+ pdev->gadget.speed = USB_SPEED_UNKNOWN;
+ pdev->gadget.sg_supported = 1;
+ pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
+ pdev->gadget.lpm_capable = 1;
+
+ pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
+ if (!pdev->setup_buf)
+ goto free_pdev;
+
+ /*
+ * Controller supports not aligned buffer but it should improve
+ * performance.
+ */
+ pdev->gadget.quirk_ep_out_aligned_size = true;
+
+ ret = cdnsp_gen_setup(pdev);
+ if (ret) {
+ dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
+ goto free_setup;
+ }
+
+ ret = cdnsp_gadget_init_endpoints(pdev);
+ if (ret) {
+ dev_err(pdev->dev, "failed to initialize endpoints\n");
+ goto halt_pdev;
+ }
+
+ ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
+ if (ret) {
+ dev_err(pdev->dev, "failed to register udc\n");
+ goto free_endpoints;
+ }
+
+ ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
+ cdnsp_irq_handler,
+ cdnsp_thread_irq_handler, IRQF_SHARED,
+ dev_name(pdev->dev), pdev);
+ if (ret)
+ goto del_gadget;
+
+ return 0;
+
+del_gadget:
+ usb_del_gadget_udc(&pdev->gadget);
+free_endpoints:
+ cdnsp_gadget_free_endpoints(pdev);
+halt_pdev:
+ cdnsp_halt(pdev);
+ cdnsp_reset(pdev);
+ cdnsp_mem_cleanup(pdev);
+free_setup:
+ kfree(pdev->setup_buf);
+free_pdev:
+ kfree(pdev);
+
+ return ret;
+}
+
+static void cdnsp_gadget_exit(struct cdns *cdns)
+{
+ struct cdnsp_device *pdev = cdns->gadget_dev;
+
+ devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
+ pm_runtime_mark_last_busy(cdns->dev);
+ pm_runtime_put_autosuspend(cdns->dev);
+ usb_del_gadget_udc(&pdev->gadget);
+ cdnsp_gadget_free_endpoints(pdev);
+ cdnsp_mem_cleanup(pdev);
+ kfree(pdev);
+ cdns->gadget_dev = NULL;
+ cdns_drd_gadget_off(cdns);
+}
+
+static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
+{
+ struct cdnsp_device *pdev = cdns->gadget_dev;
+ unsigned long flags;
+
+ if (pdev->link_state == XDEV_U3)
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ cdnsp_disconnect_gadget(pdev);
+ cdnsp_stop(pdev);
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return 0;
+}
+
+static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
+{
+ struct cdnsp_device *pdev = cdns->gadget_dev;
+ enum usb_device_speed max_speed;
+ unsigned long flags;
+ int ret;
+
+ if (!pdev->gadget_driver)
+ return 0;
+
+ spin_lock_irqsave(&pdev->lock, flags);
+ max_speed = pdev->gadget_driver->max_speed;
+
+ /* Limit speed if necessary. */
+ max_speed = min(max_speed, pdev->gadget.max_speed);
+
+ ret = cdnsp_run(pdev, max_speed);
+
+ if (pdev->link_state == XDEV_U3)
+ __cdnsp_gadget_wakeup(pdev);
+
+ spin_unlock_irqrestore(&pdev->lock, flags);
+
+ return ret;
+}
+
+/**
+ * cdnsp_gadget_init - initialize device structure
+ * @cdns: cdnsp instance
+ *
+ * This function initializes the gadget.
+ */
+int cdnsp_gadget_init(struct cdns *cdns)
+{
+ struct cdns_role_driver *rdrv;
+
+ rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
+ if (!rdrv)
+ return -ENOMEM;
+
+ rdrv->start = __cdnsp_gadget_init;
+ rdrv->stop = cdnsp_gadget_exit;
+ rdrv->suspend = cdnsp_gadget_suspend;
+ rdrv->resume = cdnsp_gadget_resume;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
+ rdrv->name = "gadget";
+ cdns->roles[USB_ROLE_DEVICE] = rdrv;
+
+ return 0;
+}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
new file mode 100644
index 000000000000..6bbb26548c04
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -0,0 +1,1601 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp.
+ */
+#ifndef __LINUX_CDNSP_GADGET_H
+#define __LINUX_CDNSP_GADGET_H
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/usb/gadget.h>
+#include <linux/irq.h>
+
+/* Max number slots - only 1 is allowed. */
+#define CDNSP_DEV_MAX_SLOTS 1
+
+#define CDNSP_EP0_SETUP_SIZE 512
+
+/* One control and 15 for in and 15 for out endpoints. */
+#define CDNSP_ENDPOINTS_NUM 31
+
+/* Best Effort Service Latency. */
+#define CDNSP_DEFAULT_BESL 0
+
+/* Device Controller command default timeout value in us */
+#define CDNSP_CMD_TIMEOUT (15 * 1000)
+
+/* Up to 16 ms to halt an device controller */
+#define CDNSP_MAX_HALT_USEC (16 * 1000)
+
+#define CDNSP_CTX_SIZE 2112
+
+/*
+ * Controller register interface.
+ */
+
+/**
+ * struct cdnsp_cap_regs - CDNSP Registers.
+ * @hc_capbase: Length of the capabilities register and controller
+ * version number
+ * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params: HCCPARAMS - Capability Parameters
+ * @db_off: DBOFF - Doorbell array offset
+ * @run_regs_off: RTSOFF - Runtime register space offset
+ * @hcc_params2: HCCPARAMS2 Capability Parameters 2,
+ */
+struct cdnsp_cap_regs {
+ __le32 hc_capbase;
+ __le32 hcs_params1;
+ __le32 hcs_params2;
+ __le32 hcs_params3;
+ __le32 hcc_params;
+ __le32 db_off;
+ __le32 run_regs_off;
+ __le32 hcc_params2;
+ /* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks. */
+/* bits 7:0 - how long is the Capabilities register. */
+#define HC_LENGTH(p) (((p) >> 00) & GENMASK(7, 0))
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & GENMASK(15, 1))
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Endpoints */
+#define HCS_ENDPOINTS_MASK GENMASK(7, 0)
+#define HCS_ENDPOINTS(p) (((p) & HCS_ENDPOINTS_MASK) >> 0)
+
+/* HCCPARAMS offset from PCI base address */
+#define HCC_PARAMS_OFFSET 0x10
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* 1: device controller can use 64-bit address pointers. */
+#define HCC_64BIT_ADDR(p) ((p) & BIT(0))
+/* 1: device controller uses 64-byte Device Context structures. */
+#define HCC_64BYTE_CONTEXT(p) ((p) & BIT(2))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15. */
+#define HCC_MAX_PSA(p) ((((p) >> 12) & 0xf) + 1)
+/* Extended Capabilities pointer from PCI base. */
+#define HCC_EXT_CAPS(p) (((p) & GENMASK(31, 16)) >> 16)
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved. */
+#define DBOFF_MASK GENMASK(31, 2)
+
+/* run_regs_off bitmask - bits 0:4 reserved. */
+#define RTSOFF_MASK GENMASK(31, 5)
+
+/**
+ * struct cdnsp_op_regs - Device Controller Operational Registers.
+ * @command: USBCMD - Controller command register.
+ * @status: USBSTS - Controller status register.
+ * @page_size: This indicates the page size that the device controller supports.
+ * If bit n is set, the controller supports a page size of 2^(n+12),
+ * up to a 128MB page size. 4K is the minimum page size.
+ * @dnctrl: DNCTRL - Device notification control register.
+ * @cmd_ring: CRP - 64-bit Command Ring Pointer.
+ * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer.
+ * @config_reg: CONFIG - Configure Register
+ * @port_reg_base: PORTSCn - base address for Port Status and Control
+ * Each port has a Port Status and Control register,
+ * followed by a Port Power Management Status and Control
+ * register, a Port Link Info register, and a reserved
+ * register.
+ */
+struct cdnsp_op_regs {
+ __le32 command;
+ __le32 status;
+ __le32 page_size;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 dnctrl;
+ __le64 cmd_ring;
+ /* rsvd: offset 0x20-2F. */
+ __le32 reserved3[4];
+ __le64 dcbaa_ptr;
+ __le32 config_reg;
+ /* rsvd: offset 0x3C-3FF. */
+ __le32 reserved4[241];
+ /* port 1 registers, which serve as a base address for other ports. */
+ __le32 port_reg_base;
+};
+
+/* Number of registers per port. */
+#define NUM_PORT_REGS 4
+
+/**
+ * struct cdnsp_port_regs - Port Registers.
+ * @portsc: PORTSC - Port Status and Control Register.
+ * @portpmsc: PORTPMSC - Port Power Managements Status and Control Register.
+ * @portli: PORTLI - Port Link Info register.
+ */
+struct cdnsp_port_regs {
+ __le32 portsc;
+ __le32 portpmsc;
+ __le32 portli;
+ __le32 reserved;
+};
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0 (connect status) and 10:13 (port speed).
+ * These bits are also sticky - meaning they're in the AUX well and they aren't
+ * changed by a hot and warm.
+ */
+#define CDNSP_PORT_RO (PORT_CONNECT | DEV_SPEED_MASK)
+
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8 (link state), 25:26 ("wake on" enable state)
+ */
+#define CDNSP_PORT_RWS (PORT_PLS_MASK | PORT_WKCONN_E | PORT_WKDISC_E)
+
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1 (port enable/disable), 17 ( connect changed),
+ * 21 (port reset changed) , 22 (Port Link State Change),
+ */
+#define CDNSP_PORT_RW1CS (PORT_PED | PORT_CSC | PORT_RC | PORT_PLC)
+
+/* USBCMD - USB command - bitmasks. */
+/* Run/Stop, controller execution - do not write unless controller is halted.*/
+#define CMD_R_S BIT(0)
+/*
+ * Reset device controller - resets internal controller state machine and all
+ * registers (except PCI config regs).
+ */
+#define CMD_RESET BIT(1)
+/* Event Interrupt Enable - a '1' allows interrupts from the controller. */
+#define CMD_INTE BIT(2)
+/*
+ * Device System Error Interrupt Enable - get out-of-band signal for
+ * controller errors.
+ */
+#define CMD_DSEIE BIT(3)
+/* device controller save/restore state. */
+#define CMD_CSS BIT(8)
+#define CMD_CRS BIT(9)
+/*
+ * Enable Wrap Event - '1' means device controller generates an event
+ * when MFINDEX wraps.
+ */
+#define CMD_EWE BIT(10)
+/* 1: device enabled */
+#define CMD_DEVEN BIT(17)
+/* bits 18:31 are reserved (and should be preserved on writes). */
+
+/* Command register values to disable interrupts. */
+#define CDNSP_IRQS (CMD_INTE | CMD_DSEIE | CMD_EWE)
+
+/* USBSTS - USB status - bitmasks */
+/* controller not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT BIT(0)
+/*
+ * serious error, e.g. PCI parity error. The controller will clear
+ * the run/stop bit.
+ */
+#define STS_FATAL BIT(2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set.*/
+#define STS_EINT BIT(3)
+/* port change detect */
+#define STS_PCD BIT(4)
+/* save state status - '1' means device controller is saving state. */
+#define STS_SSS BIT(8)
+/* restore state status - '1' means controllers is restoring state. */
+#define STS_RSS BIT(9)
+/* 1: save or restore error */
+#define STS_SRE BIT(10)
+/* 1: device Not Ready to accept doorbell or op reg writes after reset. */
+#define STS_CNR BIT(11)
+/* 1: internal Device Controller Error.*/
+#define STS_HCE BIT(12)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks. */
+/* bit 0 is the command ring cycle state. */
+#define CMD_RING_CS BIT(0)
+/* stop ring immediately - abort the currently executing command. */
+#define CMD_RING_ABORT BIT(2)
+/*
+ * Command Ring Busy.
+ * Set when Doorbell register is written with DB for command and cleared when
+ * the controller reached end of CR.
+ */
+#define CMD_RING_BUSY(p) ((p) & BIT(4))
+/* 1: command ring is running */
+#define CMD_RING_RUNNING BIT(3)
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_RSVD_BITS GENMASK(5, 0)
+
+/* CONFIG - Configure Register - config_reg bitmasks. */
+/* bits 0:7 - maximum number of device slots enabled. */
+#define MAX_DEVS GENMASK(7, 0)
+/* bit 8: U3 Entry Enabled, assert PLC when controller enters U3. */
+#define CONFIG_U3E BIT(8)
+
+/* PORTSC - Port Status and Control Register - port_reg_base bitmasks */
+/* 1: device connected. */
+#define PORT_CONNECT BIT(0)
+/* 1: port enabled. */
+#define PORT_PED BIT(1)
+/* 1: port reset signaling asserted. */
+#define PORT_RESET BIT(4)
+/*
+ * Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe sets the link state.
+ */
+#define PORT_PLS_MASK GENMASK(8, 5)
+#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
+#define XDEV_U2 (0x2 << 5)
+#define XDEV_U3 (0x3 << 5)
+#define XDEV_DISABLED (0x4 << 5)
+#define XDEV_RXDETECT (0x5 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_RECOVERY (0x8 << 5)
+#define XDEV_HOT_RESET (0x9 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
+#define XDEV_TEST_MODE (0xb << 5)
+#define XDEV_RESUME (0xf << 5)
+/* 1: port has power. */
+#define PORT_POWER BIT(9)
+/*
+ * bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - Reserved (Low Speed not supported
+ * 3 - high speed
+ * 4 - super speed
+ * 5 - super speed
+ * 6-15 reserved
+ */
+#define DEV_SPEED_MASK GENMASK(13, 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define XDEV_SSP (0x5 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE BIT(16)
+/* 1: connect status change */
+#define PORT_CSC BIT(17)
+/* 1: warm reset for a USB 3.0 device is done. */
+#define PORT_WRC BIT(19)
+/* 1: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC BIT(21)
+/*
+ * port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ----------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC BIT(22)
+/* Port configure error change - port failed to configure its link partner. */
+#define PORT_CEC BIT(23)
+/* Wake on connect (enable). */
+#define PORT_WKCONN_E BIT(25)
+/* Wake on disconnect (enable). */
+#define PORT_WKDISC_E BIT(26)
+/* Indicates if Warm Reset is being received. */
+#define PORT_WR BIT(31)
+
+#define PORT_CHANGE_BITS (PORT_CSC | PORT_WRC | PORT_RC | PORT_PLC | PORT_CEC)
+
+/* PORTPMSCUSB3 - Port Power Management Status and Control - bitmasks. */
+/* Enables U1 entry. */
+#define PORT_U1_TIMEOUT_MASK GENMASK(7, 0)
+#define PORT_U1_TIMEOUT(p) ((p) & PORT_U1_TIMEOUT_MASK)
+/* Enables U2 entry .*/
+#define PORT_U2_TIMEOUT_MASK GENMASK(14, 8)
+#define PORT_U2_TIMEOUT(p) (((p) << 8) & PORT_U2_TIMEOUT_MASK)
+
+/* PORTPMSCUSB2 - Port Power Management Status and Control - bitmasks. */
+#define PORT_L1S_MASK GENMASK(2, 0)
+#define PORT_L1S(p) ((p) & PORT_L1S_MASK)
+#define PORT_L1S_ACK PORT_L1S(1)
+#define PORT_L1S_NYET PORT_L1S(2)
+#define PORT_L1S_STALL PORT_L1S(3)
+#define PORT_L1S_TIMEOUT PORT_L1S(4)
+/* Remote Wake Enable. */
+#define PORT_RWE BIT(3)
+/* Best Effort Service Latency (BESL). */
+#define PORT_BESL(p) (((p) << 4) & GENMASK(7, 4))
+/* Hardware LPM Enable (HLE). */
+#define PORT_HLE BIT(16)
+/* Received Best Effort Service Latency (BESL). */
+#define PORT_RRBESL(p) (((p) & GENMASK(20, 17)) >> 17)
+/* Port Test Control. */
+#define PORT_TEST_MODE_MASK GENMASK(31, 28)
+#define PORT_TEST_MODE(p) (((p) << 28) & PORT_TEST_MODE_MASK)
+
+/**
+ * struct cdnsp_intr_reg - Interrupt Register Set.
+ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ * interrupts and check for pending interrupts.
+ * @irq_control: IMOD - Interrupt Moderation Register.
+ * Used to throttle interrupts.
+ * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERST base address.
+ * @erst_dequeue: Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it. The event ring is comprised of
+ * multiple segments of the same size. The controller places events on the ring
+ * and "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The driver processes those events and
+ * updates the dequeue pointer.
+ */
+struct cdnsp_intr_reg {
+ __le32 irq_pending;
+ __le32 irq_control;
+ __le32 erst_size;
+ __le32 rsvd;
+ __le64 erst_base;
+ __le64 erst_dequeue;
+};
+
+/* IMAN - Interrupt Management Register - irq_pending bitmasks l. */
+#define IMAN_IE BIT(1)
+#define IMAN_IP BIT(0)
+/* bits 2:31 need to be preserved */
+#define IMAN_IE_SET(p) (((p) & IMAN_IE) | 0x2)
+#define IMAN_IE_CLEAR(p) (((p) & IMAN_IE) & ~(0x2))
+
+/* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
+/*
+ * Minimum interval between interrupts (in 250ns intervals). The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define IMOD_INTERVAL_MASK GENMASK(15, 0)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define IMOD_COUNTER_MASK GENMASK(31, 16)
+#define IMOD_DEFAULT_INTERVAL 0
+
+/* erst_size bitmasks. */
+/* Preserve bits 16:31 of erst_size. */
+#define ERST_SIZE_MASK GENMASK(31, 16)
+
+/* erst_dequeue bitmasks. */
+/*
+ * Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies. This is an optional HW hint.
+ */
+#define ERST_DESI_MASK GENMASK(2, 0)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced. */
+#define ERST_EHB BIT(3)
+#define ERST_PTR_MASK GENMASK(3, 0)
+
+/**
+ * struct cdnsp_run_regs
+ * @microframe_index: MFINDEX - current microframe number.
+ * @ir_set: Array of Interrupter registers.
+ *
+ * Device Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct cdnsp_run_regs {
+ __le32 microframe_index;
+ __le32 rsvd[7];
+ struct cdnsp_intr_reg ir_set[128];
+};
+
+/**
+ * USB2.0 Port Peripheral Configuration Registers.
+ * @ext_cap: Header register for Extended Capability.
+ * @port_reg1: Timer Configuration Register.
+ * @port_reg2: Timer Configuration Register.
+ * @port_reg3: Timer Configuration Register.
+ * @port_reg4: Timer Configuration Register.
+ * @port_reg5: Timer Configuration Register.
+ * @port_reg6: Chicken bits for USB20PPP.
+ */
+struct cdnsp_20port_cap {
+ __le32 ext_cap;
+ __le32 port_reg1;
+ __le32 port_reg2;
+ __le32 port_reg3;
+ __le32 port_reg4;
+ __le32 port_reg5;
+ __le32 port_reg6;
+};
+
+/* Extended capability register fields */
+#define EXT_CAPS_ID(p) (((p) >> 0) & GENMASK(7, 0))
+#define EXT_CAPS_NEXT(p) (((p) >> 8) & GENMASK(7, 0))
+/* Extended capability IDs - ID 0 reserved */
+#define EXT_CAPS_PROTOCOL 2
+
+/* USB 2.0 Port Peripheral Configuration Extended Capability */
+#define EXT_CAP_CFG_DEV_20PORT_CAP_ID 0xC1
+/*
+ * Setting this bit to '1' enables automatic wakeup from L1 state on transfer
+ * TRB prepared when USBSSP operates in USB2.0 mode.
+ */
+#define PORT_REG6_L1_L0_HW_EN BIT(1)
+/*
+ * Setting this bit to '1' forces Full Speed when USBSSP operates in USB2.0
+ * mode (disables High Speed).
+ */
+#define PORT_REG6_FORCE_FS BIT(0)
+
+/**
+ * USB3.x Port Peripheral Configuration Registers.
+ * @ext_cap: Header register for Extended Capability.
+ * @mode_addr: Miscellaneous 3xPORT operation mode configuration register.
+ * @mode_2: 3x Port Control Register 2.
+ */
+struct cdnsp_3xport_cap {
+ __le32 ext_cap;
+ __le32 mode_addr;
+ __le32 reserved[52];
+ __le32 mode_2;
+};
+
+/* Extended Capability Header for 3XPort Configuration Registers. */
+#define D_XEC_CFG_3XPORT_CAP 0xC0
+#define CFG_3XPORT_SSP_SUPPORT BIT(31)
+#define CFG_3XPORT_U1_PIPE_CLK_GATE_EN BIT(0)
+
+/* Revision Extended Capability ID */
+#define RTL_REV_CAP 0xC4
+#define RTL_REV_CAP_RX_BUFF_CMD_SIZE BITMASK(31, 24)
+#define RTL_REV_CAP_RX_BUFF_SIZE BITMASK(15, 0)
+#define RTL_REV_CAP_TX_BUFF_CMD_SIZE BITMASK(31, 24)
+#define RTL_REV_CAP_TX_BUFF_SIZE BITMASK(15, 0)
+
+#define CDNSP_VER_1 0x00000000
+#define CDNSP_VER_2 0x10000000
+
+#define CDNSP_IF_EP_EXIST(pdev, ep_num, dir) \
+ (readl(&(pdev)->rev_cap->ep_supported) & \
+ (BIT(ep_num) << ((dir) ? 0 : 16)))
+
+/**
+ * struct cdnsp_rev_cap - controller capabilities.
+ * @ext_cap: Header for RTL Revision Extended Capability.
+ * @rtl_revision: RTL revision.
+ * @rx_buff_size: Rx buffer sizes.
+ * @tx_buff_size: Tx buffer sizes.
+ * @ep_supported: Supported endpoints.
+ * @ctrl_revision: Controller revision ID.
+ */
+struct cdnsp_rev_cap {
+ __le32 ext_cap;
+ __le32 rtl_revision;
+ __le32 rx_buff_size;
+ __le32 tx_buff_size;
+ __le32 ep_supported;
+ __le32 ctrl_revision;
+};
+
+/* USB2.0 Port Peripheral Configuration Registers. */
+#define D_XEC_PRE_REGS_CAP 0xC8
+#define REG_CHICKEN_BITS_2_OFFSET 0x48
+#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28)
+
+/* XBUF Extended Capability ID. */
+#define XBUF_CAP_ID 0xCB
+#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C
+#define XBUF_RX_TAG_MASK_1_OFFSET 0x24
+#define XBUF_TX_CMD_OFFSET 0x2C
+
+/**
+ * struct cdnsp_doorbell_array.
+ * @cmd_db: Command ring doorbell register.
+ * @ep_db: Endpoint ring doorbell register.
+ * Bits 0 - 7: Endpoint target.
+ * Bits 8 - 15: RsvdZ.
+ * Bits 16 - 31: Stream ID.
+ */
+struct cdnsp_doorbell_array {
+ __le32 cmd_db;
+ __le32 ep_db;
+};
+
+#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_EP0_OUT(ep, stream) ((ep) & 0xff)
+#define DB_VALUE_CMD 0x00000000
+
+/**
+ * struct cdnsp_container_ctx.
+ * @type: Type of context. Used to calculated offsets to contained contexts.
+ * @size: Size of the context data.
+ * @ctx_size: context data structure size - 64 or 32 bits.
+ * @dma: dma address of the bytes.
+ * @bytes: The raw context data given to HW.
+ *
+ * Represents either a Device or Input context. Holds a pointer to the raw
+ * memory used for the context (bytes) and dma address of it (dma).
+ */
+struct cdnsp_container_ctx {
+ unsigned int type;
+#define CDNSP_CTX_TYPE_DEVICE 0x1
+#define CDNSP_CTX_TYPE_INPUT 0x2
+ int size;
+ int ctx_size;
+ dma_addr_t dma;
+ u8 *bytes;
+};
+
+/**
+ * struct cdnsp_slot_ctx
+ * @dev_info: Device speed, and last valid endpoint.
+ * @dev_port: Device port number that is needed to access the USB device.
+ * @int_target: Interrupter target number.
+ * @dev_state: Slot state and device address.
+ *
+ * Slot Context - This assumes the controller uses 32-byte context
+ * structures. If the controller uses 64-byte contexts, there is an additional
+ * 32 bytes reserved at the end of the slot context for controller internal use.
+ */
+struct cdnsp_slot_ctx {
+ __le32 dev_info;
+ __le32 dev_port;
+ __le32 int_target;
+ __le32 dev_state;
+ /* offset 0x10 to 0x1f reserved for controller internal use. */
+ __le32 reserved[4];
+};
+
+/* Bits 20:23 in the Slot Context are the speed for the device. */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+#define SLOT_SPEED_SSP (XDEV_SSP << 10)
+
+/* dev_info bitmasks. */
+/* Device speed - values defined by PORTSC Device Speed field - 20:23. */
+#define DEV_SPEED GENMASK(23, 20)
+#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20)
+/* Index of the last valid endpoint context in this device context - 27:31. */
+#define LAST_CTX_MASK ((unsigned int)GENMASK(31, 27))
+#define LAST_CTX(p) ((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
+#define SLOT_FLAG BIT(0)
+#define EP0_FLAG BIT(1)
+
+/* dev_port bitmasks */
+/* Device port number that is needed to access the USB device. */
+#define DEV_PORT(p) (((p) & 0xff) << 16)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the controller. */
+#define DEV_ADDR_MASK GENMASK(7, 0)
+/* Slot state */
+#define SLOT_STATE GENMASK(31, 27)
+#define GET_SLOT_STATE(p) (((p) & SLOT_STATE) >> 27)
+
+#define SLOT_STATE_DISABLED 0
+#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
+#define SLOT_STATE_DEFAULT 1
+#define SLOT_STATE_ADDRESSED 2
+#define SLOT_STATE_CONFIGURED 3
+
+/**
+ * struct cdnsp_ep_ctx.
+ * @ep_info: Endpoint state, streams, mult, and interval information.
+ * @ep_info2: Information on endpoint type, max packet size, max burst size,
+ * error count, and whether the controller will force an event for
+ * all transactions.
+ * @deq: 64-bit ring dequeue pointer address. If the endpoint only
+ * defines one stream, this points to the endpoint transfer ring.
+ * Otherwise, it points to a stream context array, which has a
+ * ring pointer for each flow.
+ * @tx_info: Average TRB lengths for the endpoint ring and
+ * max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - This assumes the controller uses 32-byte context
+ * structures. If the controller uses 64-byte contexts, there is an additional
+ * 32 bytes reserved at the end of the endpoint context for controller internal
+ * use.
+ */
+struct cdnsp_ep_ctx {
+ __le32 ep_info;
+ __le32 ep_info2;
+ __le64 deq;
+ __le32 tx_info;
+ /* offset 0x14 - 0x1f reserved for controller internal use. */
+ __le32 reserved[3];
+};
+
+/* ep_info bitmasks. */
+/*
+ * Endpoint State - bits 0:2:
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK GENMASK(3, 0)
+#define EP_STATE_DISABLED 0
+#define EP_STATE_RUNNING 1
+#define EP_STATE_HALTED 2
+#define EP_STATE_STOPPED 3
+#define EP_STATE_ERROR 4
+#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
+
+/* Mult - Max number of burst within an interval, in EP companion desc. */
+#define EP_MULT(p) (((p) << 8) & GENMASK(9, 8))
+#define CTX_TO_EP_MULT(p) (((p) & GENMASK(9, 8)) >> 8)
+/* bits 10:14 are Max Primary Streams. */
+/* bit 15 is Linear Stream Array. */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p) (((p) << 16) & GENMASK(23, 16))
+#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) & GENMASK(23, 16)) >> 16))
+#define CTX_TO_EP_INTERVAL(p) (((p) & GENMASK(23, 16)) >> 16)
+#define EP_MAXPSTREAMS_MASK GENMASK(14, 10)
+#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
+#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10)
+/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+#define EP_HAS_LSA BIT(15)
+
+/* ep_info2 bitmasks */
+#define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
+#define EP_TYPE(p) ((p) << 3)
+#define ISOC_OUT_EP 1
+#define BULK_OUT_EP 2
+#define INT_OUT_EP 3
+#define CTRL_EP 4
+#define ISOC_IN_EP 5
+#define BULK_IN_EP 6
+#define INT_IN_EP 7
+/* bit 6 reserved. */
+/* bit 7 is Device Initiate Disable - for disabling stream selection. */
+#define MAX_BURST(p) (((p) << 8) & GENMASK(15, 8))
+#define CTX_TO_MAX_BURST(p) (((p) & GENMASK(15, 8)) >> 8)
+#define MAX_PACKET(p) (((p) << 16) & GENMASK(31, 16))
+#define MAX_PACKET_MASK GENMASK(31, 16)
+#define MAX_PACKET_DECODED(p) (((p) & GENMASK(31, 16)) >> 16)
+
+/* tx_info bitmasks. */
+#define EP_AVG_TRB_LENGTH(p) ((p) & GENMASK(15, 0))
+#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) << 16) & GENMASK(31, 16))
+#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) & GENMASK(23, 16)) >> 16) << 24)
+#define CTX_TO_MAX_ESIT_PAYLOAD_LO(p) (((p) & GENMASK(31, 16)) >> 16)
+#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) & GENMASK(31, 24)) >> 24)
+
+/* deq bitmasks. */
+#define EP_CTX_CYCLE_MASK BIT(0)
+#define CTX_DEQ_MASK (~0xfL)
+
+/**
+ * struct cdnsp_input_control_context
+ * Input control context;
+ *
+ * @drop_context: Set the bit of the endpoint context you want to disable.
+ * @add_context: Set the bit of the endpoint context you want to enable.
+ */
+struct cdnsp_input_control_ctx {
+ __le32 drop_flags;
+ __le32 add_flags;
+ __le32 rsvd2[6];
+};
+
+/**
+ * Represents everything that is needed to issue a command on the command ring.
+ *
+ * @in_ctx: Pointer to input context structure.
+ * @status: Command Completion Code for last command.
+ * @command_trb: Pointer to command TRB.
+ */
+struct cdnsp_command {
+ /* Input context for changing device state. */
+ struct cdnsp_container_ctx *in_ctx;
+ u32 status;
+ union cdnsp_trb *command_trb;
+};
+
+/**
+ * Stream context structure.
+ *
+ * @stream_ring: 64-bit stream ring address, cycle state, and stream type.
+ * @reserved: offset 0x14 - 0x1f reserved for controller internal use.
+ */
+struct cdnsp_stream_ctx {
+ __le64 stream_ring;
+ __le32 reserved[2];
+};
+
+/* Stream Context Types - bits 3:1 of stream ctx deq ptr. */
+#define SCT_FOR_CTX(p) (((p) << 1) & GENMASK(3, 1))
+/* Secondary stream array type, dequeue pointer is to a transfer ring. */
+#define SCT_SEC_TR 0
+/* Primary stream array type, dequeue pointer is to a transfer ring. */
+#define SCT_PRI_TR 1
+
+/**
+ * struct cdnsp_stream_info: Representing everything that is needed to
+ * supports stream capable endpoints.
+ * @stream_rings: Array of pointers containing Transfer rings for all
+ * supported streams.
+ * @num_streams: Number of streams, including stream 0.
+ * @stream_ctx_array: The stream context array may be bigger than the number
+ * of streams the driver asked for.
+ * @num_stream_ctxs: Number of streams.
+ * @ctx_array_dma: Dma address of Context Stream Array.
+ * @trb_address_map: For mapping physical TRB addresses to segments in
+ * stream rings.
+ * @td_count: Number of TDs associated with endpoint.
+ * @first_prime_det: First PRIME packet detected.
+ * @drbls_count: Number of allowed doorbells.
+ */
+struct cdnsp_stream_info {
+ struct cdnsp_ring **stream_rings;
+ unsigned int num_streams;
+ struct cdnsp_stream_ctx *stream_ctx_array;
+ unsigned int num_stream_ctxs;
+ dma_addr_t ctx_array_dma;
+ struct radix_tree_root trb_address_map;
+ int td_count;
+ u8 first_prime_det;
+#define STREAM_DRBL_FIFO_DEPTH 2
+ u8 drbls_count;
+};
+
+#define STREAM_LOG_STREAMS 4
+#define STREAM_NUM_STREAMS BIT(STREAM_LOG_STREAMS)
+
+#if STREAM_LOG_STREAMS > 16 && STREAM_LOG_STREAMS < 1
+#error "Not suupported stream value"
+#endif
+
+/**
+ * struct cdnsp_ep - extended device side representation of USB endpoint.
+ * @endpoint: usb endpoint
+ * @pending_req_list: List of requests queuing on transfer ring.
+ * @pdev: Device associated with this endpoint.
+ * @number: Endpoint number (1 - 15).
+ * idx: The device context index (DCI).
+ * interval: Interval between packets used for ISOC endpoint.
+ * @name: A human readable name e.g. ep1out.
+ * @direction: Endpoint direction.
+ * @buffering: Number of on-chip buffers related to endpoint.
+ * @buffering_period; Number of on-chip buffers related to periodic endpoint.
+ * @in_ctx: Pointer to input endpoint context structure.
+ * @out_ctx: Pointer to output endpoint context structure.
+ * @ring: Pointer to transfer ring.
+ * @stream_info: Hold stream information.
+ * @ep_state: Current state of endpoint.
+ * @skip: Sometimes the controller can not process isochronous endpoint ring
+ * quickly enough, and it will miss some isoc tds on the ring and
+ * generate Missed Service Error Event.
+ * Set skip flag when receive a Missed Service Error Event and
+ * process the missed tds on the endpoint ring.
+ */
+struct cdnsp_ep {
+ struct usb_ep endpoint;
+ struct list_head pending_list;
+ struct cdnsp_device *pdev;
+ u8 number;
+ u8 idx;
+ u32 interval;
+ char name[20];
+ u8 direction;
+ u8 buffering;
+ u8 buffering_period;
+ struct cdnsp_ep_ctx *in_ctx;
+ struct cdnsp_ep_ctx *out_ctx;
+ struct cdnsp_ring *ring;
+ struct cdnsp_stream_info stream_info;
+ unsigned int ep_state;
+#define EP_ENABLED BIT(0)
+#define EP_DIS_IN_RROGRESS BIT(1)
+#define EP_HALTED BIT(2)
+#define EP_STOPPED BIT(3)
+#define EP_WEDGE BIT(4)
+#define EP0_HALTED_STATUS BIT(5)
+#define EP_HAS_STREAMS BIT(6)
+
+ bool skip;
+};
+
+/**
+ * struct cdnsp_device_context_array
+ * @dev_context_ptr: Array of 64-bit DMA addresses for device contexts.
+ * @dma: DMA address for device contexts structure.
+ */
+struct cdnsp_device_context_array {
+ __le64 dev_context_ptrs[CDNSP_DEV_MAX_SLOTS + 1];
+ dma_addr_t dma;
+};
+
+/**
+ * struct cdnsp_transfer_event.
+ * @buffer: 64-bit buffer address, or immediate data.
+ * @transfer_len: Data length transferred.
+ * @flags: Field is interpreted differently based on the type of TRB.
+ */
+struct cdnsp_transfer_event {
+ __le64 buffer;
+ __le32 transfer_len;
+ __le32 flags;
+};
+
+/* Invalidate event after disabling endpoint. */
+#define TRB_EVENT_INVALIDATE 8
+
+/* Transfer event TRB length bit mask. */
+/* bits 0:23 */
+#define EVENT_TRB_LEN(p) ((p) & GENMASK(23, 0))
+/* Completion Code - only applicable for some types of TRBs */
+#define COMP_CODE_MASK (0xff << 24)
+#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_INVALID 0
+#define COMP_SUCCESS 1
+#define COMP_DATA_BUFFER_ERROR 2
+#define COMP_BABBLE_DETECTED_ERROR 3
+#define COMP_TRB_ERROR 5
+#define COMP_RESOURCE_ERROR 7
+#define COMP_NO_SLOTS_AVAILABLE_ERROR 9
+#define COMP_INVALID_STREAM_TYPE_ERROR 10
+#define COMP_SLOT_NOT_ENABLED_ERROR 11
+#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12
+#define COMP_SHORT_PACKET 13
+#define COMP_RING_UNDERRUN 14
+#define COMP_RING_OVERRUN 15
+#define COMP_VF_EVENT_RING_FULL_ERROR 16
+#define COMP_PARAMETER_ERROR 17
+#define COMP_CONTEXT_STATE_ERROR 19
+#define COMP_EVENT_RING_FULL_ERROR 21
+#define COMP_INCOMPATIBLE_DEVICE_ERROR 22
+#define COMP_MISSED_SERVICE_ERROR 23
+#define COMP_COMMAND_RING_STOPPED 24
+#define COMP_COMMAND_ABORTED 25
+#define COMP_STOPPED 26
+#define COMP_STOPPED_LENGTH_INVALID 27
+#define COMP_STOPPED_SHORT_PACKET 28
+#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29
+#define COMP_ISOCH_BUFFER_OVERRUN 31
+#define COMP_EVENT_LOST_ERROR 32
+#define COMP_UNDEFINED_ERROR 33
+#define COMP_INVALID_STREAM_ID_ERROR 34
+
+/*Transfer Event NRDY bit fields */
+#define TRB_TO_DEV_STREAM(p) ((p) & GENMASK(16, 0))
+#define TRB_TO_HOST_STREAM(p) ((p) & GENMASK(16, 0))
+#define STREAM_PRIME_ACK 0xFFFE
+#define STREAM_REJECTED 0xFFFF
+
+/** Transfer Event bit fields **/
+#define TRB_TO_EP_ID(p) (((p) & GENMASK(20, 16)) >> 16)
+
+/**
+ * struct cdnsp_link_trb
+ * @segment_ptr: 64-bit segment pointer.
+ * @intr_target: Interrupter target.
+ * @control: Flags.
+ */
+struct cdnsp_link_trb {
+ __le64 segment_ptr;
+ __le32 intr_target;
+ __le32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE BIT(1)
+
+/**
+ * struct cdnsp_event_cmd - Command completion event TRB.
+ * cmd_trb: Pointer to command TRB, or the value passed by the event data trb
+ * status: Command completion parameters and error code.
+ * flags: Flags.
+ */
+struct cdnsp_event_cmd {
+ __le64 cmd_trb;
+ __le32 status;
+ __le32 flags;
+};
+
+/* flags bitmasks */
+
+/* Address device - disable SetAddress. */
+#define TRB_BSR BIT(9)
+
+/* Configure Endpoint - Deconfigure. */
+#define TRB_DC BIT(9)
+
+/* Force Header */
+#define TRB_FH_TO_PACKET_TYPE(p) ((p) & GENMASK(4, 0))
+#define TRB_FH_TR_PACKET 0x4
+#define TRB_FH_TO_DEVICE_ADDRESS(p) (((p) << 25) & GENMASK(31, 25))
+#define TRB_FH_TR_PACKET_DEV_NOT 0x6
+#define TRB_FH_TO_NOT_TYPE(p) (((p) << 4) & GENMASK(7, 4))
+#define TRB_FH_TR_PACKET_FUNCTION_WAKE 0x1
+#define TRB_FH_TO_INTERFACE(p) (((p) << 8) & GENMASK(15, 8))
+
+enum cdnsp_setup_dev {
+ SETUP_CONTEXT_ONLY,
+ SETUP_CONTEXT_ADDRESS,
+};
+
+/* bits 24:31 are the slot ID. */
+#define TRB_TO_SLOT_ID(p) (((p) & GENMASK(31, 24)) >> 24)
+#define SLOT_ID_FOR_TRB(p) (((p) << 24) & GENMASK(31, 24))
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB. */
+#define TRB_TO_EP_INDEX(p) (((p) >> 16) & 0x1f)
+
+#define EP_ID_FOR_TRB(p) ((((p) + 1) << 16) & GENMASK(20, 16))
+
+#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
+#define TRB_TO_SUSPEND_PORT(p) (((p) >> 23) & 0x1)
+#define LAST_EP_INDEX 30
+
+/* Set TR Dequeue Pointer command TRB fields. */
+#define TRB_TO_STREAM_ID(p) ((((p) & GENMASK(31, 16)) >> 16))
+#define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16))
+#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
+
+/* Link TRB specific fields. */
+#define TRB_TC BIT(1)
+
+/* Port Status Change Event TRB fields. */
+/* Port ID - bits 31:24. */
+#define GET_PORT_ID(p) (((p) & GENMASK(31, 24)) >> 24)
+#define SET_PORT_ID(p) (((p) << 24) & GENMASK(31, 24))
+#define EVENT_DATA BIT(2)
+
+/* Normal TRB fields. */
+/* transfer_len bitmasks - bits 0:16. */
+#define TRB_LEN(p) ((p) & GENMASK(16, 0))
+/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31). */
+#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17)
+#define GET_TD_SIZE(p) (((p) & GENMASK(21, 17)) >> 17)
+/*
+ * Controller uses the TD_SIZE field for TBC if Extended TBC
+ * is enabled (ETE).
+ */
+#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at. */
+#define TRB_INTR_TARGET(p) (((p) << 22) & GENMASK(31, 22))
+#define GET_INTR_TARGET(p) (((p) & GENMASK(31, 22)) >> 22)
+/*
+ * Total burst count field, Rsvdz on controller with Extended TBC
+ * enabled (ETE).
+ */
+#define TRB_TBC(p) (((p) & 0x3) << 7)
+#define TRB_TLBPC(p) (((p) & 0xf) << 16)
+
+/* Cycle bit - indicates TRB ownership by driver or driver.*/
+#define TRB_CYCLE BIT(0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT BIT(1)
+/* Interrupt on short packet. */
+#define TRB_ISP BIT(2)
+/* Set PCIe no snoop attribute. */
+#define TRB_NO_SNOOP BIT(3)
+/* Chain multiple TRBs into a TD. */
+#define TRB_CHAIN BIT(4)
+/* Interrupt on completion. */
+#define TRB_IOC BIT(5)
+/* The buffer pointer contains immediate data. */
+#define TRB_IDT BIT(6)
+/* 0 - NRDY during data stage, 1 - NRDY during status stage (only control). */
+#define TRB_STAT BIT(7)
+/* Block Event Interrupt. */
+#define TRB_BEI BIT(9)
+
+/* Control transfer TRB specific fields. */
+#define TRB_DIR_IN BIT(16)
+
+/* TRB bit mask in Data Stage TRB */
+#define TRB_SETUPID_BITMASK GENMASK(9, 8)
+#define TRB_SETUPID(p) ((p) << 8)
+#define TRB_SETUPID_TO_TYPE(p) (((p) & TRB_SETUPID_BITMASK) >> 8)
+
+#define TRB_SETUP_SPEEDID_USB3 0x1
+#define TRB_SETUP_SPEEDID_USB2 0x0
+#define TRB_SETUP_SPEEDID(p) ((p) & (1 << 7))
+
+#define TRB_SETUPSTAT_ACK 0x1
+#define TRB_SETUPSTAT_STALL 0x0
+#define TRB_SETUPSTAT(p) ((p) << 6)
+
+/* Isochronous TRB specific fields */
+#define TRB_SIA BIT(31)
+#define TRB_FRAME_ID(p) (((p) << 20) & GENMASK(30, 20))
+
+struct cdnsp_generic_trb {
+ __le32 field[4];
+};
+
+union cdnsp_trb {
+ struct cdnsp_link_trb link;
+ struct cdnsp_transfer_event trans_event;
+ struct cdnsp_event_cmd event_cmd;
+ struct cdnsp_generic_trb generic;
+};
+
+/* TRB bit mask. */
+#define TRB_TYPE_BITMASK GENMASK(15, 10)
+#define TRB_TYPE(p) ((p) << 10)
+#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
+
+/* TRB type IDs. */
+/* bulk, interrupt, isoc scatter/gather, and control data stage. */
+#define TRB_NORMAL 1
+/* Setup Stage for control transfers. */
+#define TRB_SETUP 2
+/* Data Stage for control transfers. */
+#define TRB_DATA 3
+/* Status Stage for control transfers. */
+#define TRB_STATUS 4
+/* ISOC transfers. */
+#define TRB_ISOC 5
+/* TRB for linking ring segments. */
+#define TRB_LINK 6
+#define TRB_EVENT_DATA 7
+/* Transfer Ring No-op (not for the command ring). */
+#define TRB_TR_NOOP 8
+
+/* Command TRBs */
+/* Enable Slot Command. */
+#define TRB_ENABLE_SLOT 9
+/* Disable Slot Command. */
+#define TRB_DISABLE_SLOT 10
+/* Address Device Command. */
+#define TRB_ADDR_DEV 11
+/* Configure Endpoint Command. */
+#define TRB_CONFIG_EP 12
+/* Evaluate Context Command. */
+#define TRB_EVAL_CONTEXT 13
+/* Reset Endpoint Command. */
+#define TRB_RESET_EP 14
+/* Stop Transfer Ring Command. */
+#define TRB_STOP_RING 15
+/* Set Transfer Ring Dequeue Pointer Command. */
+#define TRB_SET_DEQ 16
+/* Reset Device Command. */
+#define TRB_RESET_DEV 17
+/* Force Event Command (opt). */
+#define TRB_FORCE_EVENT 18
+/* Force Header Command - generate a transaction or link management packet. */
+#define TRB_FORCE_HEADER 22
+/* No-op Command - not for transfer rings. */
+#define TRB_CMD_NOOP 23
+/* TRB IDs 24-31 reserved. */
+
+/* Event TRBS. */
+/* Transfer Event. */
+#define TRB_TRANSFER 32
+/* Command Completion Event. */
+#define TRB_COMPLETION 33
+/* Port Status Change Event. */
+#define TRB_PORT_STATUS 34
+/* Device Controller Event. */
+#define TRB_HC_EVENT 37
+/* MFINDEX Wrap Event - microframe counter wrapped. */
+#define TRB_MFINDEX_WRAP 39
+/* TRB IDs 40-47 reserved. */
+/* Endpoint Not Ready Event. */
+#define TRB_ENDPOINT_NRDY 48
+/* TRB IDs 49-53 reserved. */
+/* Halt Endpoint Command. */
+#define TRB_HALT_ENDPOINT 54
+/* Doorbell Overflow Event. */
+#define TRB_DRB_OVERFLOW 57
+/* Flush Endpoint Command. */
+#define TRB_FLUSH_ENDPOINT 58
+
+#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_LINK)))
+#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+ cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4.
+ * The command ring is 64-byte aligned, so it must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT 256
+#define TRBS_PER_EVENT_SEGMENT 256
+#define TRBS_PER_EV_DEQ_UPDATE 100
+#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16)
+#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE))
+/* TRB buffer pointers can't cross 64KB boundaries. */
+#define TRB_MAX_BUFF_SHIFT 16
+#define TRB_MAX_BUFF_SIZE BIT(TRB_MAX_BUFF_SHIFT)
+/* How much data is left before the 64KB boundary? */
+#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
+ ((addr) & (TRB_MAX_BUFF_SIZE - 1)))
+
+/**
+ * struct cdnsp_segment - segment related data.
+ * @trbs: Array of Transfer Request Blocks.
+ * @next: Pointer to the next segment.
+ * @dma: DMA address of current segment.
+ * @bounce_dma: Bounce buffer DMA address .
+ * @bounce_buf: Bounce buffer virtual address.
+ * bounce_offs: Bounce buffer offset.
+ * bounce_len: Bounce buffer length.
+ */
+struct cdnsp_segment {
+ union cdnsp_trb *trbs;
+ struct cdnsp_segment *next;
+ dma_addr_t dma;
+ /* Max packet sized bounce buffer for td-fragmant alignment */
+ dma_addr_t bounce_dma;
+ void *bounce_buf;
+ unsigned int bounce_offs;
+ unsigned int bounce_len;
+};
+
+/**
+ * struct cdnsp_td - Transfer Descriptor object.
+ * @td_list: Used for binding TD with ep_ring->td_list.
+ * @preq: Request associated with this TD
+ * @start_seg: Segment containing the first_trb in TD.
+ * @first_trb: First TRB for this TD.
+ * @last_trb: Last TRB related with TD.
+ * @bounce_seg: Bounce segment for this TD.
+ * @request_length_set: actual_length of the request has already been set.
+ * @drbl - TD has been added to HW scheduler - only for stream capable
+ * endpoints.
+ */
+struct cdnsp_td {
+ struct list_head td_list;
+ struct cdnsp_request *preq;
+ struct cdnsp_segment *start_seg;
+ union cdnsp_trb *first_trb;
+ union cdnsp_trb *last_trb;
+ struct cdnsp_segment *bounce_seg;
+ bool request_length_set;
+ bool drbl;
+};
+
+/**
+ * struct cdnsp_dequeue_state - New dequeue pointer for Transfer Ring.
+ * @new_deq_seg: New dequeue segment.
+ * @new_deq_ptr: New dequeue pointer.
+ * @new_cycle_state: New cycle state.
+ * @stream_id: stream id for which new dequeue pointer has been selected.
+ */
+struct cdnsp_dequeue_state {
+ struct cdnsp_segment *new_deq_seg;
+ union cdnsp_trb *new_deq_ptr;
+ int new_cycle_state;
+ unsigned int stream_id;
+};
+
+enum cdnsp_ring_type {
+ TYPE_CTRL = 0,
+ TYPE_ISOC,
+ TYPE_BULK,
+ TYPE_INTR,
+ TYPE_STREAM,
+ TYPE_COMMAND,
+ TYPE_EVENT,
+};
+
+/**
+ * struct cdnsp_ring - information describing transfer, command or event ring.
+ * @first_seg: First segment on transfer ring.
+ * @last_seg: Last segment on transfer ring.
+ * @enqueue: SW enqueue pointer address.
+ * @enq_seg: SW enqueue segment address.
+ * @dequeue: SW dequeue pointer address.
+ * @deq_seg: SW dequeue segment address.
+ * @td_list: transfer descriptor list associated with this ring.
+ * @cycle_state: Current cycle bit. Write the cycle state into the TRB cycle
+ * field to give ownership of the TRB to the device controller
+ * (if we are the producer) or to check if we own the TRB
+ * (if we are the consumer).
+ * @stream_id: Stream id
+ * @stream_active: Stream is active - PRIME packet has been detected.
+ * @stream_rejected: This ring has been rejected by host.
+ * @num_tds: Number of TDs associated with ring.
+ * @num_segs: Number of segments.
+ * @num_trbs_free: Number of free TRBs on the ring.
+ * @bounce_buf_len: Length of bounce buffer.
+ * @type: Ring type - event, transfer, or command ring.
+ * @last_td_was_short - TD is short TD.
+ * @trb_address_map: For mapping physical TRB addresses to segments in
+ * stream rings.
+ */
+struct cdnsp_ring {
+ struct cdnsp_segment *first_seg;
+ struct cdnsp_segment *last_seg;
+ union cdnsp_trb *enqueue;
+ struct cdnsp_segment *enq_seg;
+ union cdnsp_trb *dequeue;
+ struct cdnsp_segment *deq_seg;
+ struct list_head td_list;
+ u32 cycle_state;
+ unsigned int stream_id;
+ unsigned int stream_active;
+ unsigned int stream_rejected;
+ int num_tds;
+ unsigned int num_segs;
+ unsigned int num_trbs_free;
+ unsigned int bounce_buf_len;
+ enum cdnsp_ring_type type;
+ bool last_td_was_short;
+ struct radix_tree_root *trb_address_map;
+};
+
+/**
+ * struct cdnsp_erst_entry - even ring segment table entry object.
+ * @seg_addr: 64-bit event ring segment address.
+ * seg_size: Number of TRBs in segment.;
+ */
+struct cdnsp_erst_entry {
+ __le64 seg_addr;
+ __le32 seg_size;
+ /* Set to zero */
+ __le32 rsvd;
+};
+
+/**
+ * struct cdnsp_erst - even ring segment table for event ring.
+ * @entries: Array of event ring segments
+ * @num_entries: Number of segments in entries array.
+ * @erst_dma_addr: DMA address for entries array.
+ */
+struct cdnsp_erst {
+ struct cdnsp_erst_entry *entries;
+ unsigned int num_entries;
+ dma_addr_t erst_dma_addr;
+};
+
+/**
+ * struct cdnsp_request - extended device side representation of usb_request
+ * object .
+ * @td: Transfer descriptor associated with this request.
+ * @request: Generic usb_request object describing single I/O request.
+ * @list: Used to adding request to endpoint pending_list.
+ * @pep: Extended representation of usb_ep object
+ * @epnum: Endpoint number associated with usb request.
+ * @direction: Endpoint direction for usb request.
+ */
+struct cdnsp_request {
+ struct cdnsp_td td;
+ struct usb_request request;
+ struct list_head list;
+ struct cdnsp_ep *pep;
+ u8 epnum;
+ unsigned direction:1;
+};
+
+#define ERST_NUM_SEGS 1
+
+/* Stages used during enumeration process.*/
+enum cdnsp_ep0_stage {
+ CDNSP_SETUP_STAGE,
+ CDNSP_DATA_STAGE,
+ CDNSP_STATUS_STAGE,
+};
+
+/**
+ * struct cdnsp_port - holds information about detected ports.
+ * @port_num: Port number.
+ * @exist: Indicate if port exist.
+ * maj_rev: Major revision.
+ * min_rev: Minor revision.
+ */
+struct cdnsp_port {
+ struct cdnsp_port_regs __iomem *regs;
+ u8 port_num;
+ u8 exist;
+ u8 maj_rev;
+ u8 min_rev;
+};
+
+#define CDNSP_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
+#define CDNSP_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff)
+#define CDNSP_EXT_PORT_OFF(x) ((x) & 0xff)
+#define CDNSP_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
+
+/**
+ * struct cdnsp_device - represent USB device.
+ * @dev: Pointer to device structure associated whit this controller.
+ * @gadget: Device side representation of the peripheral controller.
+ * @gadget_driver: Pointer to the gadget driver.
+ * @irq: IRQ line number used by device side.
+ * @regs:IO device memory.
+ * @cap_regs: Capability registers.
+ * @op_regs: Operational registers.
+ * @run_regs: Runtime registers.
+ * @dba: Device base address register.
+ * @ir_set: Current interrupter register set.
+ * @port20_regs: Port 2.0 Peripheral Configuration Registers.
+ * @port3x_regs: USB3.x Port Peripheral Configuration Registers.
+ * @rev_cap: Controller Capabilities Registers.
+ * @hcs_params1: Cached register copies of read-only HCSPARAMS1
+ * @hcc_params: Cached register copies of read-only HCCPARAMS1
+ * @setup: Temporary buffer for setup packet.
+ * @ep0_preq: Internal allocated request used during enumeration.
+ * @ep0_stage: ep0 stage during enumeration process.
+ * @three_stage_setup: Three state or two state setup.
+ * @ep0_expect_in: Data IN expected for control transfer.
+ * @setup_id: Setup identifier.
+ * @setup_speed - Speed detected for current SETUP packet.
+ * @setup_buf: Buffer for SETUP packet.
+ * @device_address: Current device address.
+ * @may_wakeup: remote wakeup enabled/disabled.
+ * @lock: Lock used in interrupt thread context.
+ * @hci_version: device controller version.
+ * @dcbaa: Device context base address array.
+ * @cmd_ring: Command ring.
+ * @cmd: Represent all what is needed to issue command on Command Ring.
+ * @event_ring: Event ring.
+ * @erst: Event Ring Segment table
+ * @slot_id: Current Slot ID. Should be 0 or 1.
+ * @out_ctx: Output context.
+ * @in_ctx: Input context.
+ * @eps: array of endpoints object associated with device.
+ * @usb2_hw_lpm_capable: hardware lpm is enabled;
+ * @u1_allowed: Allow device transition to U1 state.
+ * @u2_allowed: Allow device transition to U2 state
+ * @device_pool: DMA pool for allocating input and output context.
+ * @segment_pool: DMA pool for allocating new segments.
+ * @cdnsp_state: Current state of controller.
+ * @link_state: Current link state.
+ * @usb2_port - Port USB 2.0.
+ * @usb3_port - Port USB 3.0.
+ * @active_port - Current selected Port.
+ * @test_mode: selected Test Mode.
+ */
+struct cdnsp_device {
+ struct device *dev;
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+ unsigned int irq;
+ void __iomem *regs;
+
+ /* Registers map */
+ struct cdnsp_cap_regs __iomem *cap_regs;
+ struct cdnsp_op_regs __iomem *op_regs;
+ struct cdnsp_run_regs __iomem *run_regs;
+ struct cdnsp_doorbell_array __iomem *dba;
+ struct cdnsp_intr_reg __iomem *ir_set;
+ struct cdnsp_20port_cap __iomem *port20_regs;
+ struct cdnsp_3xport_cap __iomem *port3x_regs;
+ struct cdnsp_rev_cap __iomem *rev_cap;
+
+ /* Cached register copies of read-only CDNSP data */
+ __u32 hcs_params1;
+ __u32 hcs_params3;
+ __u32 hcc_params;
+ /* Lock used in interrupt thread context. */
+ spinlock_t lock;
+ struct usb_ctrlrequest setup;
+ struct cdnsp_request ep0_preq;
+ enum cdnsp_ep0_stage ep0_stage;
+ u8 three_stage_setup;
+ u8 ep0_expect_in;
+ u8 setup_id;
+ u8 setup_speed;
+ void *setup_buf;
+ u8 device_address;
+ int may_wakeup;
+ u16 hci_version;
+
+ /* data structures */
+ struct cdnsp_device_context_array *dcbaa;
+ struct cdnsp_ring *cmd_ring;
+ struct cdnsp_command cmd;
+ struct cdnsp_ring *event_ring;
+ struct cdnsp_erst erst;
+ int slot_id;
+
+ /*
+ * Commands to the hardware are passed an "input context" that
+ * tells the hardware what to change in its data structures.
+ * The hardware will return changes in an "output context" that
+ * software must allocate for the hardware. .
+ */
+ struct cdnsp_container_ctx out_ctx;
+ struct cdnsp_container_ctx in_ctx;
+ struct cdnsp_ep eps[CDNSP_ENDPOINTS_NUM];
+ u8 usb2_hw_lpm_capable:1;
+ u8 u1_allowed:1;
+ u8 u2_allowed:1;
+
+ /* DMA pools */
+ struct dma_pool *device_pool;
+ struct dma_pool *segment_pool;
+
+#define CDNSP_STATE_HALTED BIT(1)
+#define CDNSP_STATE_DYING BIT(2)
+#define CDNSP_STATE_DISCONNECT_PENDING BIT(3)
+#define CDNSP_WAKEUP_PENDING BIT(4)
+ unsigned int cdnsp_state;
+ unsigned int link_state;
+
+ struct cdnsp_port usb2_port;
+ struct cdnsp_port usb3_port;
+ struct cdnsp_port *active_port;
+ u16 test_mode;
+};
+
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Registers with 64-bit address pointers should be written to with
+ * dword accesses by writing the low dword first (ptr[0]), then the high dword
+ * (ptr[1]) second. controller implementations that do not support 64-bit
+ * address pointers will ignore the high dword, and write order is irrelevant.
+ */
+static inline u64 cdnsp_read_64(__le64 __iomem *regs)
+{
+ return lo_hi_readq(regs);
+}
+
+static inline void cdnsp_write_64(const u64 val, __le64 __iomem *regs)
+{
+ lo_hi_writeq(val, regs);
+}
+
+/* CDNSP memory management functions. */
+void cdnsp_mem_cleanup(struct cdnsp_device *pdev);
+int cdnsp_mem_init(struct cdnsp_device *pdev);
+int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev);
+void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev);
+void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *ep);
+int cdnsp_endpoint_init(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ gfp_t mem_flags);
+int cdnsp_ring_expansion(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ unsigned int num_trbs, gfp_t flags);
+struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *ep, u64 address);
+int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams);
+int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+
+/* Device controller glue. */
+int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id);
+int cdnsp_halt(struct cdnsp_device *pdev);
+void cdnsp_died(struct cdnsp_device *pdev);
+int cdnsp_reset(struct cdnsp_device *pdev);
+irqreturn_t cdnsp_irq_handler(int irq, void *priv);
+int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup);
+void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *usbsssp_data,
+ struct usb_request *req, int enable);
+irqreturn_t cdnsp_thread_irq_handler(int irq, void *data);
+
+/* Ring, segment, TRB, and TD functions. */
+dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
+ union cdnsp_trb *trb);
+bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb);
+bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
+ struct cdnsp_segment *seg,
+ union cdnsp_trb *trb);
+int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev);
+void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
+ union cdnsp_trb *event_ring_deq,
+ u8 clear_ehb);
+void cdnsp_initialize_ring_info(struct cdnsp_ring *ring);
+void cdnsp_ring_cmd_db(struct cdnsp_device *pdev);
+void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type);
+void cdnsp_queue_address_device(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr,
+ enum cdnsp_setup_dev setup);
+void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index);
+int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
+int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
+int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq);
+void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr);
+void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index);
+void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index);
+void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index);
+void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num);
+void cdnsp_queue_reset_device(struct cdnsp_device *pdev);
+void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ struct cdnsp_dequeue_state *deq_state);
+void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep);
+void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring);
+void cdnsp_set_link_state(struct cdnsp_device *pdev,
+ __le32 __iomem *port_regs, u32 link_state);
+u32 cdnsp_port_state_to_neutral(u32 state);
+
+/* CDNSP device controller contexts. */
+int cdnsp_enable_slot(struct cdnsp_device *pdev);
+int cdnsp_disable_slot(struct cdnsp_device *pdev);
+struct cdnsp_input_control_ctx
+ *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx);
+struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx);
+struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
+ unsigned int ep_index);
+/* CDNSP gadget interface. */
+void cdnsp_suspend_gadget(struct cdnsp_device *pdev);
+void cdnsp_resume_gadget(struct cdnsp_device *pdev);
+void cdnsp_disconnect_gadget(struct cdnsp_device *pdev);
+void cdnsp_gadget_giveback(struct cdnsp_ep *pep, struct cdnsp_request *preq,
+ int status);
+int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq);
+int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq);
+unsigned int cdnsp_port_speed(unsigned int port_status);
+void cdnsp_irq_reset(struct cdnsp_device *pdev);
+int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep, int value);
+int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+void cdnsp_setup_analyze(struct cdnsp_device *pdev);
+int cdnsp_status_stage(struct cdnsp_device *pdev);
+int cdnsp_reset_device(struct cdnsp_device *pdev);
+
+/**
+ * next_request - gets the next request on the given list
+ * @list: the request list to operate on
+ *
+ * Caller should take care of locking. This function return NULL or the first
+ * request available on list.
+ */
+static inline struct cdnsp_request *next_request(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct cdnsp_request, list);
+}
+
+#define to_cdnsp_ep(ep) (container_of(ep, struct cdnsp_ep, endpoint))
+#define gadget_to_cdnsp(g) (container_of(g, struct cdnsp_device, gadget))
+#define request_to_cdnsp_request(r) (container_of(r, struct cdnsp_request, \
+ request))
+#define to_cdnsp_request(r) (container_of(r, struct cdnsp_request, request))
+int cdnsp_remove_request(struct cdnsp_device *pdev, struct cdnsp_request *preq,
+ struct cdnsp_ep *pep);
+
+#endif /* __LINUX_CDNSP_GADGET_H */
diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c
new file mode 100644
index 000000000000..7a84e928710e
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-mem.c
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep);
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
+ unsigned int cycle_state,
+ unsigned int max_packet,
+ gfp_t flags)
+{
+ struct cdnsp_segment *seg;
+ dma_addr_t dma;
+ int i;
+
+ seg = kzalloc(sizeof(*seg), flags);
+ if (!seg)
+ return NULL;
+
+ seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
+ if (!seg->trbs) {
+ kfree(seg);
+ return NULL;
+ }
+
+ if (max_packet) {
+ seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+ if (!seg->bounce_buf)
+ goto free_dma;
+ }
+
+ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
+ if (cycle_state == 0) {
+ for (i = 0; i < TRBS_PER_SEGMENT; i++)
+ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+ }
+ seg->dma = dma;
+ seg->next = NULL;
+
+ return seg;
+
+free_dma:
+ dma_pool_free(pdev->segment_pool, seg->trbs, dma);
+ kfree(seg);
+
+ return NULL;
+}
+
+static void cdnsp_segment_free(struct cdnsp_device *pdev,
+ struct cdnsp_segment *seg)
+{
+ if (seg->trbs)
+ dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
+
+ kfree(seg->bounce_buf);
+ kfree(seg);
+}
+
+static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
+ struct cdnsp_segment *first)
+{
+ struct cdnsp_segment *seg;
+
+ seg = first->next;
+
+ while (seg != first) {
+ struct cdnsp_segment *next = seg->next;
+
+ cdnsp_segment_free(pdev, seg);
+ seg = next;
+ }
+
+ cdnsp_segment_free(pdev, first);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment. The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void cdnsp_link_segments(struct cdnsp_device *pdev,
+ struct cdnsp_segment *prev,
+ struct cdnsp_segment *next,
+ enum cdnsp_ring_type type)
+{
+ struct cdnsp_link_trb *link;
+ u32 val;
+
+ if (!prev || !next)
+ return;
+
+ prev->next = next;
+ if (type != TYPE_EVENT) {
+ link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
+ link->segment_ptr = cpu_to_le64(next->dma);
+
+ /*
+ * Set the last TRB in the segment to have a TRB type ID
+ * of Link TRB
+ */
+ val = le32_to_cpu(link->control);
+ val &= ~TRB_TYPE_BITMASK;
+ val |= TRB_TYPE(TRB_LINK);
+ link->control = cpu_to_le32(val);
+ }
+}
+
+/*
+ * Link the ring to the new segments.
+ * Set Toggle Cycle for the new ring if needed.
+ */
+static void cdnsp_link_rings(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment *first,
+ struct cdnsp_segment *last,
+ unsigned int num_segs)
+{
+ struct cdnsp_segment *next;
+
+ if (!ring || !first || !last)
+ return;
+
+ next = ring->enq_seg->next;
+ cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
+ cdnsp_link_segments(pdev, last, next, ring->type);
+ ring->num_segs += num_segs;
+ ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
+
+ if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ ~cpu_to_le32(LINK_TOGGLE);
+ last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
+ ring->last_seg = last;
+ }
+}
+
+/*
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the device controller won't
+ * tell us which stream ring the TRB came from. We could store the stream ID
+ * in an event data TRB, but that doesn't help us for the cancellation case,
+ * since the endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example,
+ * say I have segments of size 1KB, that are always 1KB aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ * 0x10c90fff >> 10 = 0x43243
+ * 0x10c912c0 >> 10 = 0x43244
+ * 0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment *seg,
+ gfp_t mem_flags)
+{
+ unsigned long key;
+ int ret;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+
+ /* Skip any segments that were already added. */
+ if (radix_tree_lookup(trb_address_map, key))
+ return 0;
+
+ ret = radix_tree_maybe_preload(mem_flags);
+ if (ret)
+ return ret;
+
+ ret = radix_tree_insert(trb_address_map, key, ring);
+ radix_tree_preload_end();
+
+ return ret;
+}
+
+static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct cdnsp_segment *seg)
+{
+ unsigned long key;
+
+ key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+ if (radix_tree_lookup(trb_address_map, key))
+ radix_tree_delete(trb_address_map, key);
+}
+
+static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment *first_seg,
+ struct cdnsp_segment *last_seg,
+ gfp_t mem_flags)
+{
+ struct cdnsp_segment *failed_seg;
+ struct cdnsp_segment *seg;
+ int ret;
+
+ seg = first_seg;
+ do {
+ ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
+ mem_flags);
+ if (ret)
+ goto remove_streams;
+ if (seg == last_seg)
+ return 0;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return 0;
+
+remove_streams:
+ failed_seg = seg;
+ seg = first_seg;
+ do {
+ cdnsp_remove_segment_mapping(trb_address_map, seg);
+ if (seg == failed_seg)
+ return ret;
+ seg = seg->next;
+ } while (seg != first_seg);
+
+ return ret;
+}
+
+static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
+{
+ struct cdnsp_segment *seg;
+
+ seg = ring->first_seg;
+ do {
+ cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
+ seg = seg->next;
+ } while (seg != ring->first_seg);
+}
+
+static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
+{
+ return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
+ ring->first_seg, ring->last_seg, GFP_ATOMIC);
+}
+
+static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
+{
+ if (!ring)
+ return;
+
+ trace_cdnsp_ring_free(ring);
+
+ if (ring->first_seg) {
+ if (ring->type == TYPE_STREAM)
+ cdnsp_remove_stream_mapping(ring);
+
+ cdnsp_free_segments_for_ring(pdev, ring->first_seg);
+ }
+
+ kfree(ring);
+}
+
+void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
+{
+ ring->enqueue = ring->first_seg->trbs;
+ ring->enq_seg = ring->first_seg;
+ ring->dequeue = ring->enqueue;
+ ring->deq_seg = ring->first_seg;
+
+ /*
+ * The ring is initialized to 0. The producer must write 1 to the cycle
+ * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+ * compare CCS to the cycle bit to check ownership, so CCS = 1.
+ *
+ * New rings are initialized with cycle state equal to 1; if we are
+ * handling ring expansion, set the cycle state equal to the old ring.
+ */
+ ring->cycle_state = 1;
+
+ /*
+ * Each segment has a link TRB, and leave an extra TRB for SW
+ * accounting purpose
+ */
+ ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
+}
+
+/* Allocate segments and link them for a ring. */
+static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
+ struct cdnsp_segment **first,
+ struct cdnsp_segment **last,
+ unsigned int num_segs,
+ unsigned int cycle_state,
+ enum cdnsp_ring_type type,
+ unsigned int max_packet,
+ gfp_t flags)
+{
+ struct cdnsp_segment *prev;
+
+ /* Allocate first segment. */
+ prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
+ if (!prev)
+ return -ENOMEM;
+
+ num_segs--;
+ *first = prev;
+
+ /* Allocate all other segments. */
+ while (num_segs > 0) {
+ struct cdnsp_segment *next;
+
+ next = cdnsp_segment_alloc(pdev, cycle_state,
+ max_packet, flags);
+ if (!next) {
+ cdnsp_free_segments_for_ring(pdev, *first);
+ return -ENOMEM;
+ }
+
+ cdnsp_link_segments(pdev, prev, next, type);
+
+ prev = next;
+ num_segs--;
+ }
+
+ cdnsp_link_segments(pdev, prev, *first, type);
+ *last = prev;
+
+ return 0;
+}
+
+/*
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ */
+static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
+ unsigned int num_segs,
+ enum cdnsp_ring_type type,
+ unsigned int max_packet,
+ gfp_t flags)
+{
+ struct cdnsp_ring *ring;
+ int ret;
+
+ ring = kzalloc(sizeof *(ring), flags);
+ if (!ring)
+ return NULL;
+
+ ring->num_segs = num_segs;
+ ring->bounce_buf_len = max_packet;
+ INIT_LIST_HEAD(&ring->td_list);
+ ring->type = type;
+
+ if (num_segs == 0)
+ return ring;
+
+ ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
+ &ring->last_seg, num_segs,
+ 1, type, max_packet, flags);
+ if (ret)
+ goto fail;
+
+ /* Only event ring does not use link TRB. */
+ if (type != TYPE_EVENT)
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
+
+ cdnsp_initialize_ring_info(ring);
+ trace_cdnsp_ring_alloc(ring);
+ return ring;
+fail:
+ kfree(ring);
+ return NULL;
+}
+
+void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ cdnsp_ring_free(pdev, pep->ring);
+ pep->ring = NULL;
+ cdnsp_free_stream_info(pdev, pep);
+}
+
+/*
+ * Expand an existing ring.
+ * Allocate a new ring which has same segment numbers and link the two rings.
+ */
+int cdnsp_ring_expansion(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ unsigned int num_trbs,
+ gfp_t flags)
+{
+ unsigned int num_segs_needed;
+ struct cdnsp_segment *first;
+ struct cdnsp_segment *last;
+ unsigned int num_segs;
+ int ret;
+
+ num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
+ (TRBS_PER_SEGMENT - 1);
+
+ /* Allocate number of segments we needed, or double the ring size. */
+ num_segs = max(ring->num_segs, num_segs_needed);
+
+ ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
+ ring->cycle_state, ring->type,
+ ring->bounce_buf_len, flags);
+ if (ret)
+ return -ENOMEM;
+
+ if (ring->type == TYPE_STREAM)
+ ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
+ ring, first,
+ last, flags);
+
+ if (ret) {
+ cdnsp_free_segments_for_ring(pdev, first);
+
+ return ret;
+ }
+
+ cdnsp_link_rings(pdev, ring, first, last, num_segs);
+ trace_cdnsp_ring_expansion(ring);
+
+ return 0;
+}
+
+static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
+{
+ int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
+
+ pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
+ pdev->out_ctx.size = size;
+ pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
+ pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
+ &pdev->out_ctx.dma);
+
+ if (!pdev->out_ctx.bytes)
+ return -ENOMEM;
+
+ pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
+ pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
+ pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
+ pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
+ &pdev->in_ctx.dma);
+
+ if (!pdev->in_ctx.bytes) {
+ dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+ pdev->out_ctx.dma);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+struct cdnsp_input_control_ctx
+ *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
+{
+ if (ctx->type != CDNSP_CTX_TYPE_INPUT)
+ return NULL;
+
+ return (struct cdnsp_input_control_ctx *)ctx->bytes;
+}
+
+struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
+{
+ if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
+ return (struct cdnsp_slot_ctx *)ctx->bytes;
+
+ return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
+}
+
+struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
+ unsigned int ep_index)
+{
+ /* Increment ep index by offset of start of ep ctx array. */
+ ep_index++;
+ if (ctx->type == CDNSP_CTX_TYPE_INPUT)
+ ep_index++;
+
+ return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
+}
+
+static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
+ pep->stream_info.ctx_array_dma);
+}
+
+/* The stream context array must be a power of 2. */
+static struct cdnsp_stream_ctx
+ *cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ size_t size = sizeof(struct cdnsp_stream_ctx) *
+ pep->stream_info.num_stream_ctxs;
+
+ if (size > CDNSP_CTX_SIZE)
+ return NULL;
+
+ /**
+ * Driver uses intentionally the device_pool to allocated stream
+ * context array. Device Pool has 2048 bytes of size what gives us
+ * 128 entries.
+ */
+ return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
+ &pep->stream_info.ctx_array_dma);
+}
+
+struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
+{
+ if (pep->ep_state & EP_HAS_STREAMS)
+ return radix_tree_lookup(&pep->stream_info.trb_address_map,
+ address >> TRB_SEGMENT_SHIFT);
+
+ return pep->ring;
+}
+
+/*
+ * Change an endpoint's internal structure so it supports stream IDs.
+ * The number of requested streams includes stream 0, which cannot be used by
+ * driver.
+ *
+ * The number of stream contexts in the stream context array may be bigger than
+ * the number of streams the driver wants to use. This is because the number of
+ * stream context array entries must be a power of two.
+ */
+int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int num_stream_ctxs,
+ unsigned int num_streams)
+{
+ struct cdnsp_stream_info *stream_info;
+ struct cdnsp_ring *cur_ring;
+ u32 cur_stream;
+ u64 addr;
+ int ret;
+ int mps;
+
+ stream_info = &pep->stream_info;
+ stream_info->num_streams = num_streams;
+ stream_info->num_stream_ctxs = num_stream_ctxs;
+
+ /* Initialize the array of virtual pointers to stream rings. */
+ stream_info->stream_rings = kcalloc(num_streams,
+ sizeof(struct cdnsp_ring *),
+ GFP_ATOMIC);
+ if (!stream_info->stream_rings)
+ return -ENOMEM;
+
+ /* Initialize the array of DMA addresses for stream rings for the HW. */
+ stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
+ if (!stream_info->stream_ctx_array)
+ goto cleanup_stream_rings;
+
+ memset(stream_info->stream_ctx_array, 0,
+ sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
+ INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
+ mps = usb_endpoint_maxp(pep->endpoint.desc);
+
+ /*
+ * Allocate rings for all the streams that the driver will use,
+ * and add their segment DMA addresses to the radix tree.
+ * Stream 0 is reserved.
+ */
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
+ GFP_ATOMIC);
+ stream_info->stream_rings[cur_stream] = cur_ring;
+
+ if (!cur_ring)
+ goto cleanup_rings;
+
+ cur_ring->stream_id = cur_stream;
+ cur_ring->trb_address_map = &stream_info->trb_address_map;
+
+ /* Set deq ptr, cycle bit, and stream context type. */
+ addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
+ cur_ring->cycle_state;
+
+ stream_info->stream_ctx_array[cur_stream].stream_ring =
+ cpu_to_le64(addr);
+
+ trace_cdnsp_set_stream_ring(cur_ring);
+
+ ret = cdnsp_update_stream_mapping(cur_ring);
+ if (ret)
+ goto cleanup_rings;
+ }
+
+ return 0;
+
+cleanup_rings:
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ cdnsp_ring_free(pdev, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+
+cleanup_stream_rings:
+ kfree(pep->stream_info.stream_rings);
+
+ return -ENOMEM;
+}
+
+/* Frees all stream contexts associated with the endpoint. */
+static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_stream_info *stream_info = &pep->stream_info;
+ struct cdnsp_ring *cur_ring;
+ int cur_stream;
+
+ if (!(pep->ep_state & EP_HAS_STREAMS))
+ return;
+
+ for (cur_stream = 1; cur_stream < stream_info->num_streams;
+ cur_stream++) {
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (cur_ring) {
+ cdnsp_ring_free(pdev, cur_ring);
+ stream_info->stream_rings[cur_stream] = NULL;
+ }
+ }
+
+ if (stream_info->stream_ctx_array)
+ cdnsp_free_stream_ctx(pdev, pep);
+
+ kfree(stream_info->stream_rings);
+ pep->ep_state &= ~EP_HAS_STREAMS;
+}
+
+/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
+static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
+{
+ pdev->dcbaa->dev_context_ptrs[1] = 0;
+
+ cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
+
+ if (pdev->in_ctx.bytes)
+ dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
+ pdev->in_ctx.dma);
+
+ if (pdev->out_ctx.bytes)
+ dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+ pdev->out_ctx.dma);
+
+ pdev->in_ctx.bytes = NULL;
+ pdev->out_ctx.bytes = NULL;
+}
+
+static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev)
+{
+ int ret = -ENOMEM;
+
+ ret = cdnsp_init_device_ctx(pdev);
+ if (ret)
+ return ret;
+
+ /* Allocate endpoint 0 ring. */
+ pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC);
+ if (!pdev->eps[0].ring)
+ goto fail;
+
+ /* Point to output device context in dcbaa. */
+ pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
+ pdev->cmd.in_ctx = &pdev->in_ctx;
+
+ trace_cdnsp_alloc_priv_device(pdev);
+ return 0;
+fail:
+ dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+ pdev->out_ctx.dma);
+ dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
+ pdev->in_ctx.dma);
+
+ return ret;
+}
+
+void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
+{
+ struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
+ struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
+ dma_addr_t dma;
+
+ dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
+ ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
+}
+
+/* Setup an controller private device for a Set Address command. */
+int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
+{
+ struct cdnsp_slot_ctx *slot_ctx;
+ struct cdnsp_ep_ctx *ep0_ctx;
+ u32 max_packets, port;
+
+ ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
+ slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+
+ /* Only the control endpoint is valid - one endpoint context. */
+ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+
+ switch (pdev->gadget.speed) {
+ case USB_SPEED_SUPER_PLUS:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
+ max_packets = MAX_PACKET(512);
+ break;
+ case USB_SPEED_SUPER:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+ max_packets = MAX_PACKET(512);
+ break;
+ case USB_SPEED_HIGH:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
+ max_packets = MAX_PACKET(64);
+ break;
+ case USB_SPEED_FULL:
+ slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
+ max_packets = MAX_PACKET(64);
+ break;
+ default:
+ /* Speed was not set , this shouldn't happen. */
+ return -EINVAL;
+ }
+
+ port = DEV_PORT(pdev->active_port->port_num);
+ slot_ctx->dev_port |= cpu_to_le32(port);
+ slot_ctx->dev_state = cpu_to_le32((pdev->device_address &
+ DEV_ADDR_MASK));
+ ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8));
+ ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
+ ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
+ max_packets);
+
+ ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
+ pdev->eps[0].ring->cycle_state);
+
+ trace_cdnsp_setup_addressable_priv_device(pdev);
+
+ return 0;
+}
+
+/*
+ * Convert interval expressed as 2^(bInterval - 1) == interval into
+ * straight exponent value 2^n == interval.
+ */
+static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ unsigned int interval;
+
+ interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
+ if (interval != pep->endpoint.desc->bInterval - 1)
+ dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
+ pep->name, 1 << interval,
+ g->speed == USB_SPEED_FULL ? "" : "micro");
+
+ /*
+ * Full speed isoc endpoints specify interval in frames,
+ * not microframes. We are using microframes everywhere,
+ * so adjust accordingly.
+ */
+ if (g->speed == USB_SPEED_FULL)
+ interval += 3; /* 1 frame = 2^3 uframes */
+
+ /* Controller handles only up to 512ms (2^12). */
+ if (interval > 12)
+ interval = 12;
+
+ return interval;
+}
+
+/*
+ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
+ struct cdnsp_ep *pep,
+ unsigned int desc_interval,
+ unsigned int min_exponent,
+ unsigned int max_exponent)
+{
+ unsigned int interval;
+
+ interval = fls(desc_interval) - 1;
+ return clamp_val(interval, min_exponent, max_exponent);
+}
+
+/*
+ * Return the polling interval.
+ *
+ * The polling interval is expressed in "microframes". If controllers's Interval
+ * field is set to N, it will service the endpoint every 2^(Interval)*125us.
+ */
+static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ unsigned int interval = 0;
+
+ switch (g->speed) {
+ case USB_SPEED_HIGH:
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
+ usb_endpoint_xfer_isoc(pep->endpoint.desc))
+ interval = cdnsp_parse_exponent_interval(g, pep);
+ break;
+ case USB_SPEED_FULL:
+ if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
+ interval = cdnsp_parse_exponent_interval(g, pep);
+ } else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
+ interval = pep->endpoint.desc->bInterval << 3;
+ interval = cdnsp_microframes_to_exponent(g, pep,
+ interval,
+ 3, 10);
+ }
+
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return interval;
+}
+
+/*
+ * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
+ * High speed endpoint descriptors can define "the number of additional
+ * transaction opportunities per microframe", but that goes in the Max Burst
+ * endpoint context field.
+ */
+static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
+{
+ if (g->speed < USB_SPEED_SUPER ||
+ !usb_endpoint_xfer_isoc(pep->endpoint.desc))
+ return 0;
+
+ return pep->endpoint.comp_desc->bmAttributes;
+}
+
+static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ /* Super speed and Plus have max burst in ep companion desc */
+ if (g->speed >= USB_SPEED_SUPER)
+ return pep->endpoint.comp_desc->bMaxBurst;
+
+ if (g->speed == USB_SPEED_HIGH &&
+ (usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
+ usb_endpoint_xfer_int(pep->endpoint.desc)))
+ return (usb_endpoint_maxp(pep->endpoint.desc) & 0x1800) >> 11;
+
+ return 0;
+}
+
+static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
+{
+ int in;
+
+ in = usb_endpoint_dir_in(desc);
+
+ switch (usb_endpoint_type(desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ return CTRL_EP;
+ case USB_ENDPOINT_XFER_BULK:
+ return in ? BULK_IN_EP : BULK_OUT_EP;
+ case USB_ENDPOINT_XFER_ISOC:
+ return in ? ISOC_IN_EP : ISOC_OUT_EP;
+ case USB_ENDPOINT_XFER_INT:
+ return in ? INT_IN_EP : INT_OUT_EP;
+ }
+
+ return 0;
+}
+
+/*
+ * Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
+ struct cdnsp_ep *pep)
+{
+ int max_packet;
+ int max_burst;
+
+ /* Only applies for interrupt or isochronous endpoints*/
+ if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
+ usb_endpoint_xfer_bulk(pep->endpoint.desc))
+ return 0;
+
+ /* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
+ if (g->speed >= USB_SPEED_SUPER_PLUS &&
+ USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
+ return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
+ /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
+ else if (g->speed >= USB_SPEED_SUPER)
+ return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
+
+ max_packet = usb_endpoint_maxp(pep->endpoint.desc);
+ max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
+
+ /* A 0 in max burst means 1 transfer per ESIT */
+ return max_packet * max_burst;
+}
+
+int cdnsp_endpoint_init(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ gfp_t mem_flags)
+{
+ enum cdnsp_ring_type ring_type;
+ struct cdnsp_ep_ctx *ep_ctx;
+ unsigned int err_count = 0;
+ unsigned int avg_trb_len;
+ unsigned int max_packet;
+ unsigned int max_burst;
+ unsigned int interval;
+ u32 max_esit_payload;
+ unsigned int mult;
+ u32 endpoint_type;
+ int ret;
+
+ ep_ctx = pep->in_ctx;
+
+ endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
+ if (!endpoint_type)
+ return -EINVAL;
+
+ ring_type = usb_endpoint_type(pep->endpoint.desc);
+
+ /*
+ * Get values to fill the endpoint context, mostly from ep descriptor.
+ * The average TRB buffer length for bulk endpoints is unclear as we
+ * have no clue on scatter gather list entry size. For Isoc and Int,
+ * set it to max available.
+ */
+ max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
+ interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
+ mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
+ max_packet = usb_endpoint_maxp(pep->endpoint.desc);
+ max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
+ avg_trb_len = max_esit_payload;
+
+ /* Allow 3 retries for everything but isoc, set CErr = 3. */
+ if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
+ err_count = 3;
+ if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
+ pdev->gadget.speed == USB_SPEED_HIGH)
+ max_packet = 512;
+ /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
+ if (usb_endpoint_xfer_control(pep->endpoint.desc))
+ avg_trb_len = 8;
+
+ /* Set up the endpoint ring. */
+ pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
+ pep->skip = false;
+
+ /* Fill the endpoint context */
+ ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
+ EP_INTERVAL(interval) | EP_MULT(mult));
+ ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
+ MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
+ ERROR_COUNT(err_count));
+ ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
+ pep->ring->cycle_state);
+
+ ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
+ EP_AVG_TRB_LENGTH(avg_trb_len));
+
+ if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
+ pdev->gadget.speed > USB_SPEED_HIGH) {
+ ret = cdnsp_alloc_streams(pdev, pep);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ pep->in_ctx->ep_info = 0;
+ pep->in_ctx->ep_info2 = 0;
+ pep->in_ctx->deq = 0;
+ pep->in_ctx->tx_info = 0;
+}
+
+static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
+ struct cdnsp_ring *evt_ring,
+ struct cdnsp_erst *erst)
+{
+ struct cdnsp_erst_entry *entry;
+ struct cdnsp_segment *seg;
+ unsigned int val;
+ size_t size;
+
+ size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
+ erst->entries = dma_alloc_coherent(pdev->dev, size,
+ &erst->erst_dma_addr, GFP_KERNEL);
+ if (!erst->entries)
+ return -ENOMEM;
+
+ erst->num_entries = evt_ring->num_segs;
+
+ seg = evt_ring->first_seg;
+ for (val = 0; val < evt_ring->num_segs; val++) {
+ entry = &erst->entries[val];
+ entry->seg_addr = cpu_to_le64(seg->dma);
+ entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+ entry->rsvd = 0;
+ seg = seg->next;
+ }
+
+ return 0;
+}
+
+static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
+{
+ size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
+ struct device *dev = pdev->dev;
+
+ if (erst->entries)
+ dma_free_coherent(dev, size, erst->entries,
+ erst->erst_dma_addr);
+
+ erst->entries = NULL;
+}
+
+void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
+{
+ struct device *dev = pdev->dev;
+
+ cdnsp_free_priv_device(pdev);
+ cdnsp_free_erst(pdev, &pdev->erst);
+
+ if (pdev->event_ring)
+ cdnsp_ring_free(pdev, pdev->event_ring);
+
+ pdev->event_ring = NULL;
+
+ if (pdev->cmd_ring)
+ cdnsp_ring_free(pdev, pdev->cmd_ring);
+
+ pdev->cmd_ring = NULL;
+
+ dma_pool_destroy(pdev->segment_pool);
+ pdev->segment_pool = NULL;
+ dma_pool_destroy(pdev->device_pool);
+ pdev->device_pool = NULL;
+
+ if (pdev->dcbaa)
+ dma_free_coherent(dev, sizeof(*pdev->dcbaa),
+ pdev->dcbaa, pdev->dcbaa->dma);
+
+ pdev->dcbaa = NULL;
+
+ pdev->usb2_port.exist = 0;
+ pdev->usb3_port.exist = 0;
+ pdev->usb2_port.port_num = 0;
+ pdev->usb3_port.port_num = 0;
+ pdev->active_port = NULL;
+}
+
+static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
+{
+ dma_addr_t deq;
+ u64 temp;
+
+ deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+ pdev->event_ring->dequeue);
+
+ /* Update controller event ring dequeue pointer */
+ temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+ temp &= ERST_PTR_MASK;
+
+ /*
+ * Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ temp &= ~ERST_EHB;
+
+ cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
+ &pdev->ir_set->erst_dequeue);
+}
+
+static void cdnsp_add_in_port(struct cdnsp_device *pdev,
+ struct cdnsp_port *port,
+ __le32 __iomem *addr)
+{
+ u32 temp, port_offset, port_count;
+
+ temp = readl(addr);
+ port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
+ port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
+
+ /* Port offset and count in the third dword.*/
+ temp = readl(addr + 2);
+ port_offset = CDNSP_EXT_PORT_OFF(temp);
+ port_count = CDNSP_EXT_PORT_COUNT(temp);
+
+ trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev);
+
+ port->port_num = port_offset;
+ port->exist = 1;
+}
+
+/*
+ * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
+ * specify what speeds each port is supposed to be.
+ */
+static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
+{
+ void __iomem *base;
+ u32 offset;
+ int i;
+
+ base = &pdev->cap_regs->hc_capbase;
+ offset = cdnsp_find_next_ext_cap(base, 0,
+ EXT_CAP_CFG_DEV_20PORT_CAP_ID);
+ pdev->port20_regs = base + offset;
+
+ offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
+ pdev->port3x_regs = base + offset;
+
+ offset = 0;
+ base = &pdev->cap_regs->hc_capbase;
+
+ /* Driver expects max 2 extended protocol capability. */
+ for (i = 0; i < 2; i++) {
+ u32 temp;
+
+ offset = cdnsp_find_next_ext_cap(base, offset,
+ EXT_CAPS_PROTOCOL);
+ temp = readl(base + offset);
+
+ if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
+ !pdev->usb3_port.port_num)
+ cdnsp_add_in_port(pdev, &pdev->usb3_port,
+ base + offset);
+
+ if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
+ !pdev->usb2_port.port_num)
+ cdnsp_add_in_port(pdev, &pdev->usb2_port,
+ base + offset);
+ }
+
+ if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
+ dev_err(pdev->dev, "Error: Only one port detected\n");
+ return -ENODEV;
+ }
+
+ trace_cdnsp_init("Found USB 2.0 ports and USB 3.0 ports.");
+
+ pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *)
+ (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
+ (pdev->usb2_port.port_num - 1));
+
+ pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *)
+ (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
+ (pdev->usb3_port.port_num - 1));
+
+ return 0;
+}
+
+/*
+ * Initialize memory for CDNSP (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts, set up a command ring segment, create event
+ * ring (one for now).
+ */
+int cdnsp_mem_init(struct cdnsp_device *pdev)
+{
+ struct device *dev = pdev->dev;
+ int ret = -ENOMEM;
+ unsigned int val;
+ dma_addr_t dma;
+ u32 page_size;
+ u64 val_64;
+
+ /*
+ * Use 4K pages, since that's common and the minimum the
+ * controller supports
+ */
+ page_size = 1 << 12;
+
+ val = readl(&pdev->op_regs->config_reg);
+ val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
+ writel(val, &pdev->op_regs->config_reg);
+
+ /*
+ * Doorbell array must be physically contiguous
+ * and 64-byte (cache line) aligned.
+ */
+ pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
+ &dma, GFP_KERNEL);
+ if (!pdev->dcbaa)
+ return -ENOMEM;
+
+ memset(pdev->dcbaa, 0, sizeof(*pdev->dcbaa));
+ pdev->dcbaa->dma = dma;
+
+ cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
+
+ /*
+ * Initialize the ring segment pool. The ring must be a contiguous
+ * structure comprised of TRBs. The TRBs must be 16 byte aligned,
+ * however, the command ring segment needs 64-byte aligned segments
+ * and our use of dma addresses in the trb_address_map radix tree needs
+ * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
+ * need.
+ */
+ pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
+ TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
+ page_size);
+ if (!pdev->segment_pool)
+ goto release_dcbaa;
+
+ pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
+ CDNSP_CTX_SIZE, 64, page_size);
+ if (!pdev->device_pool)
+ goto destroy_segment_pool;
+
+
+ /* Set up the command ring to have one segments for now. */
+ pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL);
+ if (!pdev->cmd_ring)
+ goto destroy_device_pool;
+
+ /* Set the address in the Command Ring Control register */
+ val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
+ val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
+ (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
+ pdev->cmd_ring->cycle_state;
+ cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
+
+ val = readl(&pdev->cap_regs->db_off);
+ val &= DBOFF_MASK;
+ pdev->dba = (void __iomem *)pdev->cap_regs + val;
+
+ /* Set ir_set to interrupt register set 0 */
+ pdev->ir_set = &pdev->run_regs->ir_set[0];
+
+ /*
+ * Event ring setup: Allocate a normal ring, but also setup
+ * the event ring segment table (ERST).
+ */
+ pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
+ 0, GFP_KERNEL);
+ if (!pdev->event_ring)
+ goto free_cmd_ring;
+
+ ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst);
+ if (ret)
+ goto free_event_ring;
+
+ /* Set ERST count with the number of entries in the segment table. */
+ val = readl(&pdev->ir_set->erst_size);
+ val &= ERST_SIZE_MASK;
+ val |= ERST_NUM_SEGS;
+ writel(val, &pdev->ir_set->erst_size);
+
+ /* Set the segment table base address. */
+ val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
+ val_64 &= ERST_PTR_MASK;
+ val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
+ cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
+
+ /* Set the event ring dequeue address. */
+ cdnsp_set_event_deq(pdev);
+
+ ret = cdnsp_setup_port_arrays(pdev);
+ if (ret)
+ goto free_erst;
+
+ ret = cdnsp_alloc_priv_device(pdev);
+ if (ret) {
+ dev_err(pdev->dev,
+ "Could not allocate cdnsp_device data structures\n");
+ goto free_erst;
+ }
+
+ return 0;
+
+free_erst:
+ cdnsp_free_erst(pdev, &pdev->erst);
+free_event_ring:
+ cdnsp_ring_free(pdev, pdev->event_ring);
+free_cmd_ring:
+ cdnsp_ring_free(pdev, pdev->cmd_ring);
+destroy_device_pool:
+ dma_pool_destroy(pdev->device_pool);
+destroy_segment_pool:
+ dma_pool_destroy(pdev->segment_pool);
+release_dcbaa:
+ dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa,
+ pdev->dcbaa->dma);
+
+ cdnsp_reset(pdev);
+
+ return ret;
+}
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
new file mode 100644
index 000000000000..fe8a114c586c
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCI Glue driver.
+ *
+ * Copyright (C) 2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include "core.h"
+#include "gadget-export.h"
+
+#define PCI_BAR_HOST 0
+#define PCI_BAR_OTG 0
+#define PCI_BAR_DEV 2
+
+#define PCI_DEV_FN_HOST_DEVICE 0
+#define PCI_DEV_FN_OTG 1
+
+#define PCI_DRIVER_NAME "cdns-pci-usbssp"
+#define PLAT_DRIVER_NAME "cdns-usbssp"
+
+#define CDNS_VENDOR_ID 0x17cd
+#define CDNS_DEVICE_ID 0x0100
+#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+
+static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
+{
+ struct pci_dev *func;
+
+ /*
+ * Gets the second function.
+ * It's little tricky, but this platform has two function.
+ * The fist keeps resources for Host/Device while the second
+ * keeps resources for DRD/OTG.
+ */
+ func = pci_get_device(pdev->vendor, pdev->device, NULL);
+ if (!func)
+ return NULL;
+
+ if (func->devfn == pdev->devfn) {
+ func = pci_get_device(pdev->vendor, pdev->device, func);
+ if (!func)
+ return NULL;
+ }
+
+ return func;
+}
+
+static int cdnsp_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_dev *func;
+ struct resource *res;
+ struct cdns *cdnsp;
+ int ret;
+
+ /*
+ * For GADGET/HOST PCI (devfn) function number is 0,
+ * for OTG PCI (devfn) function number is 1.
+ */
+ if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE &&
+ pdev->devfn != PCI_DEV_FN_OTG))
+ return -EINVAL;
+
+ func = cdnsp_get_second_fun(pdev);
+ if (!func)
+ return -EINVAL;
+
+ if (func->class == PCI_CLASS_SERIAL_USB_XHCI ||
+ pdev->class == PCI_CLASS_SERIAL_USB_XHCI) {
+ ret = -EINVAL;
+ goto put_pci;
+ }
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret);
+ goto put_pci;
+ }
+
+ pci_set_master(pdev);
+ if (pci_is_enabled(func)) {
+ cdnsp = pci_get_drvdata(func);
+ } else {
+ cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL);
+ if (!cdnsp) {
+ ret = -ENOMEM;
+ goto disable_pci;
+ }
+ }
+
+ /* For GADGET device function number is 0. */
+ if (pdev->devfn == 0) {
+ resource_size_t rsrc_start, rsrc_len;
+
+ /* Function 0: host(BAR_0) + device(BAR_1).*/
+ dev_dbg(dev, "Initialize resources\n");
+ rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV);
+ rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV);
+ res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev");
+ if (!res) {
+ dev_dbg(dev, "controller already in use\n");
+ ret = -EBUSY;
+ goto free_cdnsp;
+ }
+
+ cdnsp->dev_regs = devm_ioremap(dev, rsrc_start, rsrc_len);
+ if (!cdnsp->dev_regs) {
+ dev_dbg(dev, "error mapping memory\n");
+ ret = -EFAULT;
+ goto free_cdnsp;
+ }
+
+ cdnsp->dev_irq = pdev->irq;
+ dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n",
+ &rsrc_start);
+
+ res = &cdnsp->xhci_res[0];
+ res->start = pci_resource_start(pdev, PCI_BAR_HOST);
+ res->end = pci_resource_end(pdev, PCI_BAR_HOST);
+ res->name = "xhci";
+ res->flags = IORESOURCE_MEM;
+ dev_dbg(dev, "USBSS-XHCI physical base addr: %pa\n",
+ &res->start);
+
+ /* Interrupt for XHCI, */
+ res = &cdnsp->xhci_res[1];
+ res->start = pdev->irq;
+ res->name = "host";
+ res->flags = IORESOURCE_IRQ;
+ } else {
+ res = &cdnsp->otg_res;
+ res->start = pci_resource_start(pdev, PCI_BAR_OTG);
+ res->end = pci_resource_end(pdev, PCI_BAR_OTG);
+ res->name = "otg";
+ res->flags = IORESOURCE_MEM;
+ dev_dbg(dev, "CDNSP-DRD physical base addr: %pa\n",
+ &res->start);
+
+ /* Interrupt for OTG/DRD. */
+ cdnsp->otg_irq = pdev->irq;
+ }
+
+ if (pci_is_enabled(func)) {
+ cdnsp->dev = dev;
+ cdnsp->gadget_init = cdnsp_gadget_init;
+
+ ret = cdns_init(cdnsp);
+ if (ret)
+ goto free_cdnsp;
+ }
+
+ pci_set_drvdata(pdev, cdnsp);
+
+ device_wakeup_enable(&pdev->dev);
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return 0;
+
+free_cdnsp:
+ if (!pci_is_enabled(func))
+ kfree(cdnsp);
+
+disable_pci:
+ pci_disable_device(pdev);
+
+put_pci:
+ pci_dev_put(func);
+
+ return ret;
+}
+
+static void cdnsp_pci_remove(struct pci_dev *pdev)
+{
+ struct cdns *cdnsp;
+ struct pci_dev *func;
+
+ func = cdnsp_get_second_fun(pdev);
+ cdnsp = (struct cdns *)pci_get_drvdata(pdev);
+
+ if (pci_dev_run_wake(pdev))
+ pm_runtime_get_noresume(&pdev->dev);
+
+ if (!pci_is_enabled(func)) {
+ kfree(cdnsp);
+ goto pci_put;
+ }
+
+ cdns_remove(cdnsp);
+
+pci_put:
+ pci_dev_put(func);
+}
+
+static int __maybe_unused cdnsp_pci_suspend(struct device *dev)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+
+ return cdns_suspend(cdns);
+}
+
+static int __maybe_unused cdnsp_pci_resume(struct device *dev)
+{
+ struct cdns *cdns = dev_get_drvdata(dev);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cdns->lock, flags);
+ ret = cdns_resume(cdns, 1);
+ spin_unlock_irqrestore(&cdns->lock, flags);
+
+ return ret;
+}
+
+static const struct dev_pm_ops cdnsp_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cdnsp_pci_suspend, cdnsp_pci_resume)
+};
+
+static const struct pci_device_id cdnsp_pci_ids[] = {
+ { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+ { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+ CDNS_DRD_IF, PCI_ANY_ID },
+ { 0, }
+};
+
+static struct pci_driver cdnsp_pci_driver = {
+ .name = "cdnsp-pci",
+ .id_table = &cdnsp_pci_ids[0],
+ .probe = cdnsp_pci_probe,
+ .remove = cdnsp_pci_remove,
+ .driver = {
+ .pm = &cdnsp_pci_pm_ops,
+ }
+};
+
+module_pci_driver(cdnsp_pci_driver);
+MODULE_DEVICE_TABLE(pci, cdnsp_pci_ids);
+
+MODULE_ALIAS("pci:cdnsp");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence CDNSP PCI driver");
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
new file mode 100644
index 000000000000..f9170d177a89
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -0,0 +1,2438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
+ * Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue. This means there will always be at
+ * least one free TRB in the ring. This is useful if you want to turn that
+ * into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ * link TRB, then load the pointer with the address in the link TRB. If the
+ * link TRB had its toggle bit set, you may need to update the ring cycle
+ * state (see cycle bit rules). You may have to do this multiple times
+ * until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ * equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ * in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ * Update enqueue pointer between each write (which may update the ring
+ * cycle state).
+ * 3. Notify consumer. If SW is producer, it rings the doorbell for command
+ * and endpoint rings. If controller is the producer for the event ring,
+ * and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
+ * the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ * continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer. SW is the consumer for the event ring, and it
+ * updates event ring dequeue pointer. Controller is the consumer for the
+ * command and endpoint rings; it generates events on the event ring
+ * for these.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include "cdnsp-trace.h"
+#include "cdnsp-gadget.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
+ union cdnsp_trb *trb)
+{
+ unsigned long segment_offset = trb - seg->trbs;
+
+ if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
+ return 0;
+
+ return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
+{
+ return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
+}
+
+static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
+{
+ return TRB_TYPE_LINK_LE32(trb->link.control);
+}
+
+bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
+{
+ return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
+}
+
+bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
+ struct cdnsp_segment *seg,
+ union cdnsp_trb *trb)
+{
+ return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
+}
+
+static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
+{
+ return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+}
+
+static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
+{
+ if (cdnsp_trb_is_link(trb)) {
+ /* Unchain chained link TRBs. */
+ trb->link.control &= cpu_to_le32(~TRB_CHAIN);
+ } else {
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
+ /* Preserve only the cycle bit of this TRB. */
+ trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
+ }
+}
+
+/*
+ * Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment. This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void cdnsp_next_trb(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ struct cdnsp_segment **seg,
+ union cdnsp_trb **trb)
+{
+ if (cdnsp_trb_is_link(*trb)) {
+ *seg = (*seg)->next;
+ *trb = ((*seg)->trbs);
+ } else {
+ (*trb)++;
+ }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ */
+void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
+{
+ /* event ring doesn't have link trbs, check for last trb. */
+ if (ring->type == TYPE_EVENT) {
+ if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
+ ring->dequeue++;
+ goto out;
+ }
+
+ if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
+ ring->cycle_state ^= 1;
+
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ goto out;
+ }
+
+ /* All other rings have link trbs. */
+ if (!cdnsp_trb_is_link(ring->dequeue)) {
+ ring->dequeue++;
+ ring->num_trbs_free++;
+ }
+ while (cdnsp_trb_is_link(ring->dequeue)) {
+ ring->deq_seg = ring->deq_seg->next;
+ ring->dequeue = ring->deq_seg->trbs;
+ }
+out:
+ trace_cdnsp_inc_deq(ring);
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell.
+ */
+static void cdnsp_inc_enq(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ bool more_trbs_coming)
+{
+ union cdnsp_trb *next;
+ u32 chain;
+
+ chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+
+ /* If this is not event ring, there is one less usable TRB. */
+ if (!cdnsp_trb_is_link(ring->enqueue))
+ ring->num_trbs_free--;
+ next = ++(ring->enqueue);
+
+ /* Update the dequeue pointer further if that was a link TRB */
+ while (cdnsp_trb_is_link(next)) {
+ /*
+ * If the caller doesn't plan on enqueuing more TDs before
+ * ringing the doorbell, then we don't want to give the link TRB
+ * to the hardware just yet. We'll give the link TRB back in
+ * cdnsp_prepare_ring() just before we enqueue the TD at the
+ * top of the ring.
+ */
+ if (!chain && !more_trbs_coming)
+ break;
+
+ next->link.control &= cpu_to_le32(~TRB_CHAIN);
+ next->link.control |= cpu_to_le32(chain);
+
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (cdnsp_link_trb_toggles_cycle(next))
+ ring->cycle_state ^= 1;
+
+ ring->enq_seg = ring->enq_seg->next;
+ ring->enqueue = ring->enq_seg->trbs;
+ next = ring->enqueue;
+ }
+
+ trace_cdnsp_inc_enq(ring);
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring and make sure
+ * enqueue pointer will not advance into dequeue segment.
+ */
+static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ unsigned int num_trbs)
+{
+ int num_trbs_in_deq_seg;
+
+ if (ring->num_trbs_free < num_trbs)
+ return false;
+
+ if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
+ num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
+
+ if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Workaround for L1: controller has issue with resuming from L1 after
+ * setting doorbell for endpoint during L1 state. This function forces
+ * resume signal in such case.
+ */
+static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
+{
+ if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
+ cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
+}
+
+/* Ring the doorbell after placing a command on the ring. */
+void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
+{
+ writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
+}
+
+/*
+ * Ring the doorbell after placing a transfer on the ring.
+ * Returns true if doorbell was set, otherwise false.
+ */
+static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id)
+{
+ __le32 __iomem *reg_addr = &pdev->dba->ep_db;
+ unsigned int ep_state = pep->ep_state;
+ unsigned int db_value;
+
+ /*
+ * Don't ring the doorbell for this endpoint if endpoint is halted or
+ * disabled.
+ */
+ if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
+ return false;
+
+ /* For stream capable endpoints driver can ring doorbell only twice. */
+ if (pep->ep_state & EP_HAS_STREAMS) {
+ if (pep->stream_info.drbls_count >= 2)
+ return false;
+
+ pep->stream_info.drbls_count++;
+ }
+
+ pep->ep_state &= ~EP_STOPPED;
+
+ if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
+ !pdev->ep0_expect_in)
+ db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
+ else
+ db_value = DB_VALUE(pep->idx, stream_id);
+
+ trace_cdnsp_tr_drbl(pep, stream_id);
+
+ writel(db_value, reg_addr);
+
+ cdnsp_force_l0_go(pdev);
+
+ /* Doorbell was set. */
+ return true;
+}
+
+/*
+ * Get the right ring for the given pep and stream_id.
+ * If the endpoint supports streams, boundary check the USB request's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id)
+{
+ if (!(pep->ep_state & EP_HAS_STREAMS))
+ return pep->ring;
+
+ if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
+ dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
+ pep->name, stream_id);
+ return NULL;
+ }
+
+ return pep->stream_info.stream_rings[stream_id];
+}
+
+static struct cdnsp_ring *
+ cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
+{
+ return cdnsp_get_transfer_ring(pdev, preq->pep,
+ preq->request.stream_id);
+}
+
+/* Ring the doorbell for any rings with pending requests. */
+void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_stream_info *stream_info;
+ unsigned int stream_id;
+ int ret;
+
+ if (pep->ep_state & EP_DIS_IN_RROGRESS)
+ return;
+
+ /* A ring has pending Request if its TD list is not empty. */
+ if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
+ if (pep->ring && !list_empty(&pep->ring->td_list))
+ cdnsp_ring_ep_doorbell(pdev, pep, 0);
+ return;
+ }
+
+ stream_info = &pep->stream_info;
+
+ for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
+ struct cdnsp_td *td, *td_temp;
+ struct cdnsp_ring *ep_ring;
+
+ if (stream_info->drbls_count >= 2)
+ return;
+
+ ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
+ if (!ep_ring)
+ continue;
+
+ if (!ep_ring->stream_active || ep_ring->stream_rejected)
+ continue;
+
+ list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
+ td_list) {
+ if (td->drbl)
+ continue;
+
+ ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
+ if (ret)
+ td->drbl = 1;
+ }
+ }
+}
+
+/*
+ * Get the hw dequeue pointer controller stopped on, either directly from the
+ * endpoint context, or if streams are in use from the stream context.
+ * The returned hw_dequeue contains the lowest four bits with cycle state
+ * and possible stream context type.
+ */
+static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
+ unsigned int ep_index,
+ unsigned int stream_id)
+{
+ struct cdnsp_stream_ctx *st_ctx;
+ struct cdnsp_ep *pep;
+
+ pep = &pdev->eps[stream_id];
+
+ if (pep->ep_state & EP_HAS_STREAMS) {
+ st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
+ return le64_to_cpu(st_ctx->stream_ring);
+ }
+
+ return le64_to_cpu(pep->out_ctx->deq);
+}
+
+/*
+ * Move the controller endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the controller endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ * - First we update our new ring state to be the same as when the
+ * controller stopped.
+ * - Then we traverse the ring to find the segment that contains
+ * the last TRB in the TD. We toggle the controller new cycle state
+ * when we pass any link TRBs with the toggle cycle bit set.
+ * - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ * if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id,
+ struct cdnsp_td *cur_td,
+ struct cdnsp_dequeue_state *state)
+{
+ bool td_last_trb_found = false;
+ struct cdnsp_segment *new_seg;
+ struct cdnsp_ring *ep_ring;
+ union cdnsp_trb *new_deq;
+ bool cycle_found = false;
+ u64 hw_dequeue;
+
+ ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
+ if (!ep_ring)
+ return;
+
+ /*
+ * Dig out the cycle state saved by the controller during the
+ * stop endpoint command.
+ */
+ hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
+ new_seg = ep_ring->deq_seg;
+ new_deq = ep_ring->dequeue;
+ state->new_cycle_state = hw_dequeue & 0x1;
+ state->stream_id = stream_id;
+
+ /*
+ * We want to find the pointer, segment and cycle state of the new trb
+ * (the one after current TD's last_trb). We know the cycle state at
+ * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+ * found.
+ */
+ do {
+ if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
+ == (dma_addr_t)(hw_dequeue & ~0xf)) {
+ cycle_found = true;
+
+ if (td_last_trb_found)
+ break;
+ }
+
+ if (new_deq == cur_td->last_trb)
+ td_last_trb_found = true;
+
+ if (cycle_found && cdnsp_trb_is_link(new_deq) &&
+ cdnsp_link_trb_toggles_cycle(new_deq))
+ state->new_cycle_state ^= 0x1;
+
+ cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
+
+ /* Search wrapped around, bail out. */
+ if (new_deq == pep->ring->dequeue) {
+ dev_err(pdev->dev,
+ "Error: Failed finding new dequeue state\n");
+ state->new_deq_seg = NULL;
+ state->new_deq_ptr = NULL;
+ return;
+ }
+
+ } while (!cycle_found || !td_last_trb_found);
+
+ state->new_deq_seg = new_seg;
+ state->new_deq_ptr = new_deq;
+
+ trace_cdnsp_new_deq_state(state);
+}
+
+/*
+ * flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
+ */
+static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ep_ring,
+ struct cdnsp_td *td,
+ bool flip_cycle)
+{
+ struct cdnsp_segment *seg = td->start_seg;
+ union cdnsp_trb *trb = td->first_trb;
+
+ while (1) {
+ cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
+
+ /* flip cycle if asked to */
+ if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+ trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+ if (trb == td->last_trb)
+ break;
+
+ cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
+ }
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment. If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment. Otherwise it
+ * returns 0.
+ */
+static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
+ struct cdnsp_segment *start_seg,
+ union cdnsp_trb *start_trb,
+ union cdnsp_trb *end_trb,
+ dma_addr_t suspect_dma)
+{
+ struct cdnsp_segment *cur_seg;
+ union cdnsp_trb *temp_trb;
+ dma_addr_t end_seg_dma;
+ dma_addr_t end_trb_dma;
+ dma_addr_t start_dma;
+
+ start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
+ cur_seg = start_seg;
+
+ do {
+ if (start_dma == 0)
+ return NULL;
+
+ temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
+ /* We may get an event for a Link TRB in the middle of a TD */
+ end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
+ /* If the end TRB isn't in this segment, this is set to 0 */
+ end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
+
+ trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
+ end_trb_dma, cur_seg->dma,
+ end_seg_dma);
+
+ if (end_trb_dma > 0) {
+ /*
+ * The end TRB is in this segment, so suspect should
+ * be here
+ */
+ if (start_dma <= end_trb_dma) {
+ if (suspect_dma >= start_dma &&
+ suspect_dma <= end_trb_dma) {
+ return cur_seg;
+ }
+ } else {
+ /*
+ * Case for one segment with a
+ * TD wrapped around to the top
+ */
+ if ((suspect_dma >= start_dma &&
+ suspect_dma <= end_seg_dma) ||
+ (suspect_dma >= cur_seg->dma &&
+ suspect_dma <= end_trb_dma)) {
+ return cur_seg;
+ }
+ }
+
+ return NULL;
+ }
+
+ /* Might still be somewhere in this segment */
+ if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+ return cur_seg;
+
+ cur_seg = cur_seg->next;
+ start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+ } while (cur_seg != start_seg);
+
+ return NULL;
+}
+
+static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ struct cdnsp_td *td)
+{
+ struct cdnsp_segment *seg = td->bounce_seg;
+ struct cdnsp_request *preq;
+ size_t len;
+
+ if (!seg)
+ return;
+
+ preq = td->preq;
+
+ trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
+ seg->bounce_dma, 0);
+
+ if (!preq->direction) {
+ dma_unmap_single(pdev->dev, seg->bounce_dma,
+ ring->bounce_buf_len, DMA_TO_DEVICE);
+ return;
+ }
+
+ dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
+ DMA_FROM_DEVICE);
+
+ /* For in transfers we need to copy the data from bounce to sg */
+ len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
+ seg->bounce_buf, seg->bounce_len,
+ seg->bounce_offs);
+ if (len != seg->bounce_len)
+ dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+
+ seg->bounce_len = 0;
+ seg->bounce_offs = 0;
+}
+
+static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ struct cdnsp_dequeue_state *deq_state)
+{
+ struct cdnsp_ring *ep_ring;
+ int ret;
+
+ if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+ return 0;
+ }
+
+ cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
+ trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
+
+ /*
+ * Update the ring's dequeue segment and dequeue pointer
+ * to reflect the new position.
+ */
+ ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
+
+ if (cdnsp_trb_is_link(ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
+ while (ep_ring->dequeue != deq_state->new_deq_ptr) {
+ ep_ring->num_trbs_free++;
+ ep_ring->dequeue++;
+
+ if (cdnsp_trb_is_link(ep_ring->dequeue)) {
+ if (ep_ring->dequeue == deq_state->new_deq_ptr)
+ break;
+
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+ }
+
+ /*
+ * Probably there was TIMEOUT during handling Set Dequeue Pointer
+ * command. It's critical error and controller will be stopped.
+ */
+ if (ret)
+ return -ESHUTDOWN;
+
+ /* Restart any rings with pending requests */
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+
+ return 0;
+}
+
+int cdnsp_remove_request(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ struct cdnsp_ep *pep)
+{
+ struct cdnsp_dequeue_state deq_state;
+ struct cdnsp_td *cur_td = NULL;
+ struct cdnsp_ring *ep_ring;
+ struct cdnsp_segment *seg;
+ int status = -ECONNRESET;
+ int ret = 0;
+ u64 hw_deq;
+
+ memset(&deq_state, 0, sizeof(deq_state));
+
+ trace_cdnsp_remove_request(pep->out_ctx);
+ trace_cdnsp_remove_request_td(preq);
+
+ cur_td = &preq->td;
+ ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+
+ /*
+ * If we stopped on the TD we need to cancel, then we have to
+ * move the controller endpoint ring dequeue pointer past
+ * this TD.
+ */
+ hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
+ hw_deq &= ~0xf;
+
+ seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
+ cur_td->last_trb, hw_deq);
+
+ if (seg && (pep->ep_state & EP_ENABLED))
+ cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
+ cur_td, &deq_state);
+ else
+ cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
+
+ /*
+ * The event handler won't see a completion for this TD anymore,
+ * so remove it from the endpoint ring's TD list.
+ */
+ list_del_init(&cur_td->td_list);
+ ep_ring->num_tds--;
+ pep->stream_info.td_count--;
+
+ /*
+ * During disconnecting all endpoint will be disabled so we don't
+ * have to worry about updating dequeue pointer.
+ */
+ if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
+ status = -ESHUTDOWN;
+ ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
+ }
+
+ cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
+ cdnsp_gadget_giveback(pep, cur_td->preq, status);
+
+ return ret;
+}
+
+static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
+{
+ struct cdnsp_port *port = pdev->active_port;
+ u8 old_port = 0;
+
+ if (port && port->port_num == port_id)
+ return 0;
+
+ if (port)
+ old_port = port->port_num;
+
+ if (port_id == pdev->usb2_port.port_num) {
+ port = &pdev->usb2_port;
+ } else if (port_id == pdev->usb3_port.port_num) {
+ port = &pdev->usb3_port;
+ } else {
+ dev_err(pdev->dev, "Port event with invalid port ID %d\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (port_id != old_port) {
+ cdnsp_disable_slot(pdev);
+ pdev->active_port = port;
+ cdnsp_enable_slot(pdev);
+ }
+
+ if (port_id == pdev->usb2_port.port_num)
+ cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
+ else
+ writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
+ &pdev->usb3_port.regs->portpmsc);
+
+ return 0;
+}
+
+static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
+ union cdnsp_trb *event)
+{
+ struct cdnsp_port_regs __iomem *port_regs;
+ u32 portsc, cmd_regs;
+ bool port2 = false;
+ u32 link_state;
+ u32 port_id;
+
+ /* Port status change events always have a successful completion code */
+ if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
+ dev_err(pdev->dev, "ERR: incorrect PSC event\n");
+
+ port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
+
+ if (cdnsp_update_port_id(pdev, port_id))
+ goto cleanup;
+
+ port_regs = pdev->active_port->regs;
+
+ if (port_id == pdev->usb2_port.port_num)
+ port2 = true;
+
+new_event:
+ portsc = readl(&port_regs->portsc);
+ writel(cdnsp_port_state_to_neutral(portsc) |
+ (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
+
+ trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
+
+ pdev->gadget.speed = cdnsp_port_speed(portsc);
+ link_state = portsc & PORT_PLS_MASK;
+
+ /* Port Link State change detected. */
+ if ((portsc & PORT_PLC)) {
+ if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
+ link_state == XDEV_RESUME) {
+ cmd_regs = readl(&pdev->op_regs->command);
+ if (!(cmd_regs & CMD_R_S))
+ goto cleanup;
+
+ if (DEV_SUPERSPEED_ANY(portsc)) {
+ cdnsp_set_link_state(pdev, &port_regs->portsc,
+ XDEV_U0);
+
+ cdnsp_resume_gadget(pdev);
+ }
+ }
+
+ if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
+ link_state == XDEV_U0) {
+ pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
+
+ cdnsp_force_header_wakeup(pdev, 1);
+ cdnsp_ring_cmd_db(pdev);
+ cdnsp_wait_for_cmd_compl(pdev);
+ }
+
+ if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
+ !DEV_SUPERSPEED_ANY(portsc))
+ cdnsp_resume_gadget(pdev);
+
+ if (link_state == XDEV_U3 && pdev->link_state != XDEV_U3)
+ cdnsp_suspend_gadget(pdev);
+
+ pdev->link_state = link_state;
+ }
+
+ if (portsc & PORT_CSC) {
+ /* Detach device. */
+ if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
+ cdnsp_disconnect_gadget(pdev);
+
+ /* Attach device. */
+ if (portsc & PORT_CONNECT) {
+ if (!port2)
+ cdnsp_irq_reset(pdev);
+
+ usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
+ }
+ }
+
+ /* Port reset. */
+ if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
+ cdnsp_irq_reset(pdev);
+ pdev->u1_allowed = 0;
+ pdev->u2_allowed = 0;
+ pdev->may_wakeup = 0;
+ }
+
+ if (portsc & PORT_CEC)
+ dev_err(pdev->dev, "Port Over Current detected\n");
+
+ if (portsc & PORT_CEC)
+ dev_err(pdev->dev, "Port Configure Error detected\n");
+
+ if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
+ goto new_event;
+
+cleanup:
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+}
+
+static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ struct cdnsp_ring *ep_ring,
+ int *status)
+{
+ struct cdnsp_request *preq = td->preq;
+
+ /* if a bounce buffer was used to align this td then unmap it */
+ cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
+
+ /*
+ * If the controller said we transferred more data than the buffer
+ * length, Play it safe and say we didn't transfer anything.
+ */
+ if (preq->request.actual > preq->request.length) {
+ preq->request.actual = 0;
+ *status = 0;
+ }
+
+ list_del_init(&td->td_list);
+ ep_ring->num_tds--;
+ preq->pep->stream_info.td_count--;
+
+ cdnsp_gadget_giveback(preq->pep, preq, *status);
+}
+
+static void cdnsp_finish_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *ep,
+ int *status)
+{
+ struct cdnsp_ring *ep_ring;
+ u32 trb_comp_code;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+
+ if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+ trb_comp_code == COMP_STOPPED ||
+ trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
+ /*
+ * The Endpoint Stop Command completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
+ * the ring dequeue pointer or take this TD off any lists yet.
+ */
+ return;
+ }
+
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_td_cleanup(pdev, td, ep_ring, status);
+}
+
+/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
+static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ring,
+ union cdnsp_trb *stop_trb)
+{
+ struct cdnsp_segment *seg = ring->deq_seg;
+ union cdnsp_trb *trb = ring->dequeue;
+ u32 sum;
+
+ for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
+ if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
+ sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
+ }
+ return sum;
+}
+
+static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ unsigned int stream_id,
+ int start_cycle,
+ struct cdnsp_generic_trb *start_trb)
+{
+ /*
+ * Pass all the TRBs to the hardware at once and make sure this write
+ * isn't reordered.
+ */
+ wmb();
+
+ if (start_cycle)
+ start_trb->field[3] |= cpu_to_le32(start_cycle);
+ else
+ start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+ if ((pep->ep_state & EP_HAS_STREAMS) &&
+ !pep->stream_info.first_prime_det) {
+ trace_cdnsp_wait_for_prime(pep, stream_id);
+ return 0;
+ }
+
+ return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
+}
+
+/*
+ * Process control tds, update USB request status and actual_length.
+ */
+static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ union cdnsp_trb *event_trb,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *pep,
+ int *status)
+{
+ struct cdnsp_ring *ep_ring;
+ u32 remaining;
+ u32 trb_type;
+
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+ /*
+ * if on data stage then update the actual_length of the USB
+ * request and flag it as set, so it won't be overwritten in the event
+ * for the last TRB.
+ */
+ if (trb_type == TRB_DATA) {
+ td->request_length_set = true;
+ td->preq->request.actual = td->preq->request.length - remaining;
+ }
+
+ /* at status stage */
+ if (!td->request_length_set)
+ td->preq->request.actual = td->preq->request.length;
+
+ if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
+ pdev->three_stage_setup) {
+ td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
+ td_list);
+ pdev->ep0_stage = CDNSP_STATUS_STAGE;
+
+ cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
+ &td->last_trb->generic);
+ return;
+ }
+
+ cdnsp_finish_td(pdev, td, event, pep, status);
+}
+
+/*
+ * Process isochronous tds, update usb request status and actual_length.
+ */
+static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ union cdnsp_trb *ep_trb,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *pep,
+ int status)
+{
+ struct cdnsp_request *preq = td->preq;
+ u32 remaining, requested, ep_trb_len;
+ bool sum_trbs_for_length = false;
+ struct cdnsp_ring *ep_ring;
+ u32 trb_comp_code;
+ u32 td_length;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+
+ requested = preq->request.length;
+
+ /* handle completion code */
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ preq->request.status = 0;
+ break;
+ case COMP_SHORT_PACKET:
+ preq->request.status = 0;
+ sum_trbs_for_length = true;
+ break;
+ case COMP_ISOCH_BUFFER_OVERRUN:
+ case COMP_BABBLE_DETECTED_ERROR:
+ preq->request.status = -EOVERFLOW;
+ break;
+ case COMP_STOPPED:
+ sum_trbs_for_length = true;
+ break;
+ case COMP_STOPPED_SHORT_PACKET:
+ /* field normally containing residue now contains transferred */
+ preq->request.status = 0;
+ requested = remaining;
+ break;
+ case COMP_STOPPED_LENGTH_INVALID:
+ requested = 0;
+ remaining = 0;
+ break;
+ default:
+ sum_trbs_for_length = true;
+ preq->request.status = -1;
+ break;
+ }
+
+ if (sum_trbs_for_length) {
+ td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
+ td_length += ep_trb_len - remaining;
+ } else {
+ td_length = requested;
+ }
+
+ td->preq->request.actual += td_length;
+
+ cdnsp_finish_td(pdev, td, event, pep, &status);
+}
+
+static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *pep,
+ int status)
+{
+ struct cdnsp_ring *ep_ring;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+ td->preq->request.status = -EXDEV;
+ td->preq->request.actual = 0;
+
+ /* Update ring dequeue pointer */
+ while (ep_ring->dequeue != td->last_trb)
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_inc_deq(pdev, ep_ring);
+
+ cdnsp_td_cleanup(pdev, td, ep_ring, &status);
+}
+
+/*
+ * Process bulk and interrupt tds, update usb request status and actual_length.
+ */
+static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
+ struct cdnsp_td *td,
+ union cdnsp_trb *ep_trb,
+ struct cdnsp_transfer_event *event,
+ struct cdnsp_ep *ep,
+ int *status)
+{
+ u32 remaining, requested, ep_trb_len;
+ struct cdnsp_ring *ep_ring;
+ u32 trb_comp_code;
+
+ ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+ ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+ requested = td->preq->request.length;
+
+ switch (trb_comp_code) {
+ case COMP_SUCCESS:
+ case COMP_SHORT_PACKET:
+ *status = 0;
+ break;
+ case COMP_STOPPED_SHORT_PACKET:
+ td->preq->request.actual = remaining;
+ goto finish_td;
+ case COMP_STOPPED_LENGTH_INVALID:
+ /* Stopped on ep trb with invalid length, exclude it. */
+ ep_trb_len = 0;
+ remaining = 0;
+ break;
+ }
+
+ if (ep_trb == td->last_trb)
+ ep_trb_len = requested - remaining;
+ else
+ ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
+ ep_trb_len - remaining;
+ td->preq->request.actual = ep_trb_len;
+
+finish_td:
+ ep->stream_info.drbls_count--;
+
+ cdnsp_finish_td(pdev, td, event, ep, status);
+}
+
+static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
+ struct cdnsp_transfer_event *event)
+{
+ struct cdnsp_generic_trb *generic;
+ struct cdnsp_ring *ep_ring;
+ struct cdnsp_ep *pep;
+ int cur_stream;
+ int ep_index;
+ int host_sid;
+ int dev_sid;
+
+ generic = (struct cdnsp_generic_trb *)event;
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
+ host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
+
+ pep = &pdev->eps[ep_index];
+
+ if (!(pep->ep_state & EP_HAS_STREAMS))
+ return;
+
+ if (host_sid == STREAM_PRIME_ACK) {
+ pep->stream_info.first_prime_det = 1;
+ for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
+ cur_stream++) {
+ ep_ring = pep->stream_info.stream_rings[cur_stream];
+ ep_ring->stream_active = 1;
+ ep_ring->stream_rejected = 0;
+ }
+ }
+
+ if (host_sid == STREAM_REJECTED) {
+ struct cdnsp_td *td, *td_temp;
+
+ pep->stream_info.drbls_count--;
+ ep_ring = pep->stream_info.stream_rings[dev_sid];
+ ep_ring->stream_active = 0;
+ ep_ring->stream_rejected = 1;
+
+ list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
+ td_list) {
+ td->drbl = 0;
+ }
+ }
+
+ cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted TRB DMA address or endpoint is disabled.
+ */
+static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
+ struct cdnsp_transfer_event *event)
+{
+ const struct usb_endpoint_descriptor *desc;
+ bool handling_skipped_tds = false;
+ struct cdnsp_segment *ep_seg;
+ struct cdnsp_ring *ep_ring;
+ int status = -EINPROGRESS;
+ union cdnsp_trb *ep_trb;
+ dma_addr_t ep_trb_dma;
+ struct cdnsp_ep *pep;
+ struct cdnsp_td *td;
+ u32 trb_comp_code;
+ int invalidate;
+ int ep_index;
+
+ invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+ ep_trb_dma = le64_to_cpu(event->buffer);
+
+ pep = &pdev->eps[ep_index];
+ ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+
+ /*
+ * If device is disconnect then all requests will be dequeued
+ * by upper layers as part of disconnect sequence.
+ * We don't want handle such event to avoid racing.
+ */
+ if (invalidate || !pdev->gadget.connected)
+ goto cleanup;
+
+ if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
+ trace_cdnsp_ep_disabled(pep->out_ctx);
+ goto err_out;
+ }
+
+ /* Some transfer events don't always point to a trb*/
+ if (!ep_ring) {
+ switch (trb_comp_code) {
+ case COMP_INVALID_STREAM_TYPE_ERROR:
+ case COMP_INVALID_STREAM_ID_ERROR:
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
+ goto cleanup;
+ default:
+ dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
+ pep->name);
+ goto err_out;
+ }
+ }
+
+ /* Look for some error cases that need special treatment. */
+ switch (trb_comp_code) {
+ case COMP_BABBLE_DETECTED_ERROR:
+ status = -EOVERFLOW;
+ break;
+ case COMP_RING_UNDERRUN:
+ case COMP_RING_OVERRUN:
+ /*
+ * When the Isoch ring is empty, the controller will generate
+ * a Ring Overrun Event for IN Isoch endpoint or Ring
+ * Underrun Event for OUT Isoch endpoint.
+ */
+ goto cleanup;
+ case COMP_MISSED_SERVICE_ERROR:
+ /*
+ * When encounter missed service error, one or more isoc tds
+ * may be missed by controller.
+ * Set skip flag of the ep_ring; Complete the missed tds as
+ * short transfer when process the ep_ring next time.
+ */
+ pep->skip = true;
+ break;
+ }
+
+ do {
+ /*
+ * This TRB should be in the TD at the head of this ring's TD
+ * list.
+ */
+ if (list_empty(&ep_ring->td_list)) {
+ /*
+ * Don't print warnings if it's due to a stopped
+ * endpoint generating an extra completion event, or
+ * a event for the last TRB of a short TD we already
+ * got a short event for.
+ * The short TD is already removed from the TD list.
+ */
+ if (!(trb_comp_code == COMP_STOPPED ||
+ trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+ ep_ring->last_td_was_short))
+ trace_cdnsp_trb_without_td(ep_ring,
+ (struct cdnsp_generic_trb *)event);
+
+ if (pep->skip) {
+ pep->skip = false;
+ trace_cdnsp_ep_list_empty_with_skip(pep, 0);
+ }
+
+ goto cleanup;
+ }
+
+ td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
+ td_list);
+
+ /* Is this a TRB in the currently executing TD? */
+ ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
+ ep_ring->dequeue, td->last_trb,
+ ep_trb_dma);
+
+ /*
+ * Skip the Force Stopped Event. The event_trb(ep_trb_dma)
+ * of FSE is not in the current TD pointed by ep_ring->dequeue
+ * because that the hardware dequeue pointer still at the
+ * previous TRB of the current TD. The previous TRB maybe a
+ * Link TD or the last TRB of the previous TD. The command
+ * completion handle will take care the rest.
+ */
+ if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
+ trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+ pep->skip = false;
+ goto cleanup;
+ }
+
+ desc = td->preq->pep->endpoint.desc;
+ if (!ep_seg) {
+ if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
+ /* Something is busted, give up! */
+ dev_err(pdev->dev,
+ "ERROR Transfer event TRB DMA ptr not "
+ "part of current TD ep_index %d "
+ "comp_code %u\n", ep_index,
+ trb_comp_code);
+ return -EINVAL;
+ }
+
+ cdnsp_skip_isoc_td(pdev, td, event, pep, status);
+ goto cleanup;
+ }
+
+ if (trb_comp_code == COMP_SHORT_PACKET)
+ ep_ring->last_td_was_short = true;
+ else
+ ep_ring->last_td_was_short = false;
+
+ if (pep->skip) {
+ pep->skip = false;
+ cdnsp_skip_isoc_td(pdev, td, event, pep, status);
+ goto cleanup;
+ }
+
+ ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
+ / sizeof(*ep_trb)];
+
+ trace_cdnsp_handle_transfer(ep_ring,
+ (struct cdnsp_generic_trb *)ep_trb);
+
+ if (cdnsp_trb_is_noop(ep_trb))
+ goto cleanup;
+
+ if (usb_endpoint_xfer_control(desc))
+ cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
+ &status);
+ else if (usb_endpoint_xfer_isoc(desc))
+ cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
+ status);
+ else
+ cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
+ &status);
+cleanup:
+ handling_skipped_tds = pep->skip;
+
+ /*
+ * Do not update event ring dequeue pointer if we're in a loop
+ * processing missed tds.
+ */
+ if (!handling_skipped_tds)
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+
+ /*
+ * If ep->skip is set, it means there are missed tds on the
+ * endpoint ring need to take care of.
+ * Process them as short transfer until reach the td pointed by
+ * the event.
+ */
+ } while (handling_skipped_tds);
+ return 0;
+
+err_out:
+ dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long)
+ cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+ pdev->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ return -EINVAL;
+}
+
+/*
+ * This function handles all events on the event ring.
+ * Returns true for "possibly more events to process" (caller should call
+ * again), otherwise false if done.
+ */
+static bool cdnsp_handle_event(struct cdnsp_device *pdev)
+{
+ unsigned int comp_code;
+ union cdnsp_trb *event;
+ bool update_ptrs = true;
+ u32 cycle_bit;
+ int ret = 0;
+ u32 flags;
+
+ event = pdev->event_ring->dequeue;
+ flags = le32_to_cpu(event->event_cmd.flags);
+ cycle_bit = (flags & TRB_CYCLE);
+
+ /* Does the controller or driver own the TRB? */
+ if (cycle_bit != pdev->event_ring->cycle_state)
+ return false;
+
+ trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
+
+ /*
+ * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * reads of the event's flags/data below.
+ */
+ rmb();
+
+ switch (flags & TRB_TYPE_BITMASK) {
+ case TRB_TYPE(TRB_COMPLETION):
+ /*
+ * Command can't be handled in interrupt context so just
+ * increment command ring dequeue pointer.
+ */
+ cdnsp_inc_deq(pdev, pdev->cmd_ring);
+ break;
+ case TRB_TYPE(TRB_PORT_STATUS):
+ cdnsp_handle_port_status(pdev, event);
+ update_ptrs = false;
+ break;
+ case TRB_TYPE(TRB_TRANSFER):
+ ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
+ if (ret >= 0)
+ update_ptrs = false;
+ break;
+ case TRB_TYPE(TRB_SETUP):
+ pdev->ep0_stage = CDNSP_SETUP_STAGE;
+ pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
+ pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
+ pdev->setup = *((struct usb_ctrlrequest *)
+ &event->trans_event.buffer);
+
+ cdnsp_setup_analyze(pdev);
+ break;
+ case TRB_TYPE(TRB_ENDPOINT_NRDY):
+ cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
+ break;
+ case TRB_TYPE(TRB_HC_EVENT): {
+ comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
+
+ switch (comp_code) {
+ case COMP_EVENT_RING_FULL_ERROR:
+ dev_err(pdev->dev, "Event Ring Full\n");
+ break;
+ default:
+ dev_err(pdev->dev, "Controller error code 0x%02x\n",
+ comp_code);
+ }
+
+ break;
+ }
+ case TRB_TYPE(TRB_MFINDEX_WRAP):
+ case TRB_TYPE(TRB_DRB_OVERFLOW):
+ break;
+ default:
+ dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
+ TRB_FIELD_TO_TYPE(flags));
+ }
+
+ if (update_ptrs)
+ /* Update SW event ring dequeue pointer. */
+ cdnsp_inc_deq(pdev, pdev->event_ring);
+
+ /*
+ * Caller will call us again to check if there are more items
+ * on the event ring.
+ */
+ return true;
+}
+
+irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+{
+ struct cdnsp_device *pdev = (struct cdnsp_device *)data;
+ union cdnsp_trb *event_ring_deq;
+ int counter = 0;
+
+ spin_lock(&pdev->lock);
+
+ if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
+ cdnsp_died(pdev);
+ spin_unlock(&pdev->lock);
+ return IRQ_HANDLED;
+ }
+
+ event_ring_deq = pdev->event_ring->dequeue;
+
+ while (cdnsp_handle_event(pdev)) {
+ if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
+ event_ring_deq = pdev->event_ring->dequeue;
+ counter = 0;
+ }
+ }
+
+ cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+
+ spin_unlock(&pdev->lock);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cdnsp_irq_handler(int irq, void *priv)
+{
+ struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
+ u32 irq_pending;
+ u32 status;
+
+ status = readl(&pdev->op_regs->status);
+
+ if (status == ~(u32)0) {
+ cdnsp_died(pdev);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & STS_EINT))
+ return IRQ_NONE;
+
+ writel(status | STS_EINT, &pdev->op_regs->status);
+ irq_pending = readl(&pdev->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &pdev->ir_set->irq_pending);
+
+ if (status & STS_FATAL) {
+ cdnsp_died(pdev);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Generic function for queuing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ *
+ * @more_trbs_coming: Will you enqueue more TRBs before setting doorbell?
+ */
+static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
+ bool more_trbs_coming, u32 field1, u32 field2,
+ u32 field3, u32 field4)
+{
+ struct cdnsp_generic_trb *trb;
+
+ trb = &ring->enqueue->generic;
+
+ trb->field[0] = cpu_to_le32(field1);
+ trb->field[1] = cpu_to_le32(field2);
+ trb->field[2] = cpu_to_le32(field3);
+ trb->field[3] = cpu_to_le32(field4);
+
+ trace_cdnsp_queue_trb(ring, trb);
+ cdnsp_inc_enq(pdev, ring, more_trbs_coming);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to
+ * queue num_trbs.
+ */
+static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
+ struct cdnsp_ring *ep_ring,
+ u32 ep_state, unsigned
+ int num_trbs,
+ gfp_t mem_flags)
+{
+ unsigned int num_trbs_needed;
+
+ /* Make sure the endpoint has been added to controller schedule. */
+ switch (ep_state) {
+ case EP_STATE_STOPPED:
+ case EP_STATE_RUNNING:
+ case EP_STATE_HALTED:
+ break;
+ default:
+ dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
+ return -EINVAL;
+ }
+
+ while (1) {
+ if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
+ break;
+
+ trace_cdnsp_no_room_on_ring("try ring expansion");
+
+ num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
+ if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
+ mem_flags)) {
+ dev_err(pdev->dev, "Ring expansion failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ while (cdnsp_trb_is_link(ep_ring->enqueue)) {
+ ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
+ /* The cycle bit must be set as the last operation. */
+ wmb();
+ ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+ /* Toggle the cycle bit after the last ring segment. */
+ if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
+ ep_ring->cycle_state ^= 1;
+ ep_ring->enq_seg = ep_ring->enq_seg->next;
+ ep_ring->enqueue = ep_ring->enq_seg->trbs;
+ }
+ return 0;
+}
+
+static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ unsigned int num_trbs)
+{
+ struct cdnsp_ring *ep_ring;
+ int ret;
+
+ ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
+ preq->request.stream_id);
+ if (!ep_ring)
+ return -EINVAL;
+
+ ret = cdnsp_prepare_ring(pdev, ep_ring,
+ GET_EP_CTX_STATE(preq->pep->out_ctx),
+ num_trbs, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&preq->td.td_list);
+ preq->td.preq = preq;
+
+ /* Add this TD to the tail of the endpoint ring's TD list. */
+ list_add_tail(&preq->td.td_list, &ep_ring->td_list);
+ ep_ring->num_tds++;
+ preq->pep->stream_info.td_count++;
+
+ preq->td.start_seg = ep_ring->enq_seg;
+ preq->td.first_trb = ep_ring->enqueue;
+
+ return 0;
+}
+
+static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
+{
+ unsigned int num_trbs;
+
+ num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+ TRB_MAX_BUFF_SIZE);
+ if (num_trbs == 0)
+ num_trbs++;
+
+ return num_trbs;
+}
+
+static unsigned int count_trbs_needed(struct cdnsp_request *preq)
+{
+ return cdnsp_count_trbs(preq->request.dma, preq->request.length);
+}
+
+static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
+{
+ unsigned int i, len, full_len, num_trbs = 0;
+ struct scatterlist *sg;
+
+ full_len = preq->request.length;
+
+ for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
+ len = sg_dma_len(sg);
+ num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
+ len = min(len, full_len);
+ full_len -= len;
+ if (full_len == 0)
+ break;
+ }
+
+ return num_trbs;
+}
+
+static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
+{
+ return cdnsp_count_trbs(preq->request.dma, preq->request.length);
+}
+
+static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
+{
+ if (running_total != preq->request.length)
+ dev_err(preq->pep->pdev->dev,
+ "%s - Miscalculated tx length, "
+ "queued %#x, asked for %#x (%d)\n",
+ preq->pep->name, running_total,
+ preq->request.length, preq->request.actual);
+}
+
+/*
+ * TD size is the number of max packet sized packets remaining in the TD
+ * (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+ * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+ *
+ * TD size = total_packet_count - packets_transferred
+ *
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ * This is taken care of in the TRB_TD_SIZE() macro
+ *
+ * The last TRB in a TD must have the TD size set to zero.
+ */
+static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
+ int transferred,
+ int trb_buff_len,
+ unsigned int td_total_len,
+ struct cdnsp_request *preq,
+ bool more_trbs_coming)
+{
+ u32 maxp, total_packet_count;
+
+ /* One TRB with a zero-length data packet. */
+ if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
+ trb_buff_len == td_total_len)
+ return 0;
+
+ maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
+ total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
+
+ /* Queuing functions don't count the current TRB into transferred. */
+ return (total_packet_count - ((transferred + trb_buff_len) / maxp));
+}
+
+static int cdnsp_align_td(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq, u32 enqd_len,
+ u32 *trb_buff_len, struct cdnsp_segment *seg)
+{
+ struct device *dev = pdev->dev;
+ unsigned int unalign;
+ unsigned int max_pkt;
+ u32 new_buff_len;
+
+ max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
+ unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+ /* We got lucky, last normal TRB data on segment is packet aligned. */
+ if (unalign == 0)
+ return 0;
+
+ /* Is the last nornal TRB alignable by splitting it. */
+ if (*trb_buff_len > unalign) {
+ *trb_buff_len -= unalign;
+ trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
+ enqd_len, 0, unalign);
+ return 0;
+ }
+
+ /*
+ * We want enqd_len + trb_buff_len to sum up to a number aligned to
+ * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+ * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+ */
+ new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+ if (new_buff_len > (preq->request.length - enqd_len))
+ new_buff_len = (preq->request.length - enqd_len);
+
+ /* Create a max max_pkt sized bounce buffer pointed to by last trb. */
+ if (preq->direction) {
+ sg_pcopy_to_buffer(preq->request.sg,
+ preq->request.num_mapped_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_TO_DEVICE);
+ } else {
+ seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+ max_pkt, DMA_FROM_DEVICE);
+ }
+
+ if (dma_mapping_error(dev, seg->bounce_dma)) {
+ /* Try without aligning.*/
+ dev_warn(pdev->dev,
+ "Failed mapping bounce buffer, not aligning\n");
+ return 0;
+ }
+
+ *trb_buff_len = new_buff_len;
+ seg->bounce_len = new_buff_len;
+ seg->bounce_offs = enqd_len;
+
+ trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
+ unalign);
+
+ /*
+ * Bounce buffer successful aligned and seg->bounce_dma will be used
+ * in transfer TRB as new transfer buffer address.
+ */
+ return 1;
+}
+
+int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+{
+ unsigned int enqd_len, block_len, trb_buff_len, full_len;
+ unsigned int start_cycle, num_sgs = 0;
+ struct cdnsp_generic_trb *start_trb;
+ u32 field, length_field, remainder;
+ struct scatterlist *sg = NULL;
+ bool more_trbs_coming = true;
+ bool need_zero_pkt = false;
+ bool zero_len_trb = false;
+ struct cdnsp_ring *ring;
+ bool first_trb = true;
+ unsigned int num_trbs;
+ struct cdnsp_ep *pep;
+ u64 addr, send_addr;
+ int sent_len, ret;
+
+ ring = cdnsp_request_to_transfer_ring(pdev, preq);
+ if (!ring)
+ return -EINVAL;
+
+ full_len = preq->request.length;
+
+ if (preq->request.num_sgs) {
+ num_sgs = preq->request.num_sgs;
+ sg = preq->request.sg;
+ addr = (u64)sg_dma_address(sg);
+ block_len = sg_dma_len(sg);
+ num_trbs = count_sg_trbs_needed(preq);
+ } else {
+ num_trbs = count_trbs_needed(preq);
+ addr = (u64)preq->request.dma;
+ block_len = full_len;
+ }
+
+ pep = preq->pep;
+
+ /* Deal with request.zero - need one more td/trb. */
+ if (preq->request.zero && preq->request.length &&
+ IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
+ need_zero_pkt = true;
+ num_trbs++;
+ }
+
+ ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+ if (ret)
+ return ret;
+
+ /*
+ * Don't give the first TRB to the hardware (by toggling the cycle bit)
+ * until we've finished creating all the other TRBs. The ring's cycle
+ * state may change as we enqueue the other TRBs, so save it too.
+ */
+ start_trb = &ring->enqueue->generic;
+ start_cycle = ring->cycle_state;
+ send_addr = addr;
+
+ /* Queue the TRBs, even if they are zero-length */
+ for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
+ enqd_len += trb_buff_len) {
+ field = TRB_TYPE(TRB_NORMAL);
+
+ /* TRB buffer should not cross 64KB boundaries */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ trb_buff_len = min(trb_buff_len, block_len);
+ if (enqd_len + trb_buff_len > full_len)
+ trb_buff_len = full_len - enqd_len;
+
+ /* Don't change the cycle bit of the first TRB until later */
+ if (first_trb) {
+ first_trb = false;
+ if (start_cycle == 0)
+ field |= TRB_CYCLE;
+ } else {
+ field |= ring->cycle_state;
+ }
+
+ /*
+ * Chain all the TRBs together; clear the chain bit in the last
+ * TRB to indicate it's the last TRB in the chain.
+ */
+ if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
+ field |= TRB_CHAIN;
+ if (cdnsp_trb_is_link(ring->enqueue + 1)) {
+ if (cdnsp_align_td(pdev, preq, enqd_len,
+ &trb_buff_len,
+ ring->enq_seg)) {
+ send_addr = ring->enq_seg->bounce_dma;
+ /* Assuming TD won't span 2 segs */
+ preq->td.bounce_seg = ring->enq_seg;
+ }
+ }
+ }
+
+ if (enqd_len + trb_buff_len >= full_len) {
+ if (need_zero_pkt && zero_len_trb) {
+ zero_len_trb = true;
+ } else {
+ field &= ~TRB_CHAIN;
+ field |= TRB_IOC;
+ more_trbs_coming = false;
+ need_zero_pkt = false;
+ preq->td.last_trb = ring->enqueue;
+ }
+ }
+
+ /* Only set interrupt on short packet for OUT endpoints. */
+ if (!preq->direction)
+ field |= TRB_ISP;
+
+ /* Set the TRB length, TD size, and interrupter fields. */
+ remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
+ full_len, preq,
+ more_trbs_coming);
+
+ length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
+
+ cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt,
+ lower_32_bits(send_addr),
+ upper_32_bits(send_addr),
+ length_field,
+ field);
+
+ addr += trb_buff_len;
+ sent_len = trb_buff_len;
+ while (sg && sent_len >= block_len) {
+ /* New sg entry */
+ --num_sgs;
+ sent_len -= block_len;
+ if (num_sgs != 0) {
+ sg = sg_next(sg);
+ block_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ addr += sent_len;
+ }
+ }
+ block_len -= sent_len;
+ send_addr = addr;
+ }
+
+ cdnsp_check_trb_math(preq, enqd_len);
+ ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
+ start_cycle, start_trb);
+
+ if (ret)
+ preq->td.drbl = 1;
+
+ return 0;
+}
+
+int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+{
+ u32 field, length_field, remainder;
+ struct cdnsp_ep *pep = preq->pep;
+ struct cdnsp_ring *ep_ring;
+ int num_trbs;
+ int ret;
+
+ ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+ if (!ep_ring)
+ return -EINVAL;
+
+ /* 1 TRB for data, 1 for status */
+ num_trbs = (pdev->three_stage_setup) ? 2 : 1;
+
+ ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+ if (ret)
+ return ret;
+
+ /* If there's data, queue data TRBs */
+ if (pdev->ep0_expect_in)
+ field = TRB_TYPE(TRB_DATA) | TRB_IOC;
+ else
+ field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
+
+ if (preq->request.length > 0) {
+ remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
+ preq->request.length, preq, 1);
+
+ length_field = TRB_LEN(preq->request.length) |
+ TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
+
+ if (pdev->ep0_expect_in)
+ field |= TRB_DIR_IN;
+
+ cdnsp_queue_trb(pdev, ep_ring, true,
+ lower_32_bits(preq->request.dma),
+ upper_32_bits(preq->request.dma), length_field,
+ field | ep_ring->cycle_state |
+ TRB_SETUPID(pdev->setup_id) |
+ pdev->setup_speed);
+
+ pdev->ep0_stage = CDNSP_DATA_STAGE;
+ }
+
+ /* Save the DMA address of the last TRB in the TD. */
+ preq->td.last_trb = ep_ring->enqueue;
+
+ /* Queue status TRB. */
+ if (preq->request.length == 0)
+ field = ep_ring->cycle_state;
+ else
+ field = (ep_ring->cycle_state ^ 1);
+
+ if (preq->request.length > 0 && pdev->ep0_expect_in)
+ field |= TRB_DIR_IN;
+
+ if (pep->ep_state & EP0_HALTED_STATUS) {
+ pep->ep_state &= ~EP0_HALTED_STATUS;
+ field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
+ } else {
+ field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
+ }
+
+ cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
+ field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
+ TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
+
+ cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
+
+ return 0;
+}
+
+int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
+ int ret = 0;
+
+ if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) {
+ trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
+ goto ep_stopped;
+ }
+
+ cdnsp_queue_stop_endpoint(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
+
+ep_stopped:
+ pep->ep_state |= EP_STOPPED;
+ return ret;
+}
+
+int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+ int ret;
+
+ cdnsp_queue_flush_endpoint(pdev, pep->idx);
+ cdnsp_ring_cmd_db(pdev);
+ ret = cdnsp_wait_for_cmd_compl(pdev);
+
+ trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
+
+ return ret;
+}
+
+/*
+ * The transfer burst count field of the isochronous TRB defines the number of
+ * bursts that are required to move all packets in this TD. Only SuperSpeed
+ * devices can burst up to bMaxBurst number of packets per service interval.
+ * This field is zero based, meaning a value of zero in the field means one
+ * burst. Basically, for everything but SuperSpeed devices, this field will be
+ * zero.
+ */
+static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ unsigned int total_packet_count)
+{
+ unsigned int max_burst;
+
+ if (pdev->gadget.speed < USB_SPEED_SUPER)
+ return 0;
+
+ max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
+ return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+}
+
+/*
+ * Returns the number of packets in the last "burst" of packets. This field is
+ * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
+ * the last burst packet count is equal to the total number of packets in the
+ * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
+ * must contain (bMaxBurst + 1) number of packets, but the last burst can
+ * contain 1 to (bMaxBurst + 1) packets.
+ */
+static unsigned int
+ cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq,
+ unsigned int total_packet_count)
+{
+ unsigned int max_burst;
+ unsigned int residue;
+
+ if (pdev->gadget.speed >= USB_SPEED_SUPER) {
+ /* bMaxBurst is zero based: 0 means 1 packet per burst. */
+ max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
+ residue = total_packet_count % (max_burst + 1);
+
+ /*
+ * If residue is zero, the last burst contains (max_burst + 1)
+ * number of packets, but the TLBPC field is zero-based.
+ */
+ if (residue == 0)
+ return max_burst;
+
+ return residue - 1;
+ }
+ if (total_packet_count == 0)
+ return 0;
+
+ return total_packet_count - 1;
+}
+
+/* Queue function isoc transfer */
+static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
+{
+ int trb_buff_len, td_len, td_remain_len, ret;
+ unsigned int burst_count, last_burst_pkt;
+ unsigned int total_pkt_count, max_pkt;
+ struct cdnsp_generic_trb *start_trb;
+ bool more_trbs_coming = true;
+ struct cdnsp_ring *ep_ring;
+ int running_total = 0;
+ u32 field, length_field;
+ int start_cycle;
+ int trbs_per_td;
+ u64 addr;
+ int i;
+
+ ep_ring = preq->pep->ring;
+ start_trb = &ep_ring->enqueue->generic;
+ start_cycle = ep_ring->cycle_state;
+ td_len = preq->request.length;
+ addr = (u64)preq->request.dma;
+ td_remain_len = td_len;
+
+ max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
+ total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
+
+ /* A zero-length transfer still involves at least one packet. */
+ if (total_pkt_count == 0)
+ total_pkt_count++;
+
+ burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
+ last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
+ total_pkt_count);
+ trbs_per_td = count_isoc_trbs_needed(preq);
+
+ ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
+ if (ret)
+ goto cleanup;
+
+ /*
+ * Set isoc specific data for the first TRB in a TD.
+ * Prevent HW from getting the TRBs by keeping the cycle state
+ * inverted in the first TDs isoc TRB.
+ */
+ field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
+ start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count);
+
+ /* Fill the rest of the TRB fields, and remaining normal TRBs. */
+ for (i = 0; i < trbs_per_td; i++) {
+ u32 remainder;
+
+ /* Calculate TRB length. */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ if (trb_buff_len > td_remain_len)
+ trb_buff_len = td_remain_len;
+
+ /* Set the TRB length, TD size, & interrupter fields. */
+ remainder = cdnsp_td_remainder(pdev, running_total,
+ trb_buff_len, td_len, preq,
+ more_trbs_coming);
+
+ length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
+
+ /* Only first TRB is isoc, overwrite otherwise. */
+ if (i) {
+ field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
+ length_field |= TRB_TD_SIZE(remainder);
+ } else {
+ length_field |= TRB_TD_SIZE_TBC(burst_count);
+ }
+
+ /* Only set interrupt on short packet for OUT EPs. */
+ if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
+ field |= TRB_ISP;
+
+ /* Set the chain bit for all except the last TRB. */
+ if (i < trbs_per_td - 1) {
+ more_trbs_coming = true;
+ field |= TRB_CHAIN;
+ } else {
+ more_trbs_coming = false;
+ preq->td.last_trb = ep_ring->enqueue;
+ field |= TRB_IOC;
+ }
+
+ cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
+ lower_32_bits(addr), upper_32_bits(addr),
+ length_field, field);
+
+ running_total += trb_buff_len;
+ addr += trb_buff_len;
+ td_remain_len -= trb_buff_len;
+ }
+
+ /* Check TD length */
+ if (running_total != td_len) {
+ dev_err(pdev->dev, "ISOC TD length unmatch\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
+ start_cycle, start_trb);
+
+ return 0;
+
+cleanup:
+ /* Clean up a partially enqueued isoc transfer. */
+ list_del_init(&preq->td.td_list);
+ ep_ring->num_tds--;
+
+ /*
+ * Use the first TD as a temporary variable to turn the TDs we've
+ * queued into No-ops with a software-owned cycle bit.
+ * That way the hardware won't accidentally start executing bogus TDs
+ * when we partially overwrite them.
+ * td->first_trb and td->start_seg are already set.
+ */
+ preq->td.last_trb = ep_ring->enqueue;
+ /* Every TRB except the first & last will have its cycle bit flipped. */
+ cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
+
+ /* Reset the ring enqueue back to the first TRB and its cycle bit. */
+ ep_ring->enqueue = preq->td.first_trb;
+ ep_ring->enq_seg = preq->td.start_seg;
+ ep_ring->cycle_state = start_cycle;
+ return ret;
+}
+
+int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
+{
+ struct cdnsp_ring *ep_ring;
+ u32 ep_state;
+ int num_trbs;
+ int ret;
+
+ ep_ring = preq->pep->ring;
+ ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
+ num_trbs = count_isoc_trbs_needed(preq);
+
+ /*
+ * Check the ring to guarantee there is enough room for the whole
+ * request. Do not insert any td of the USB Request to the ring if the
+ * check failed.
+ */
+ ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ return cdnsp_queue_isoc_tx(pdev, preq);
+}
+
+/**** Command Ring Operations ****/
+/*
+ * Generic function for queuing a command TRB on the command ring.
+ * Driver queue only one command to ring in the moment.
+ */
+static void cdnsp_queue_command(struct cdnsp_device *pdev,
+ u32 field1,
+ u32 field2,
+ u32 field3,
+ u32 field4)
+{
+ cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
+ GFP_ATOMIC);
+
+ pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
+
+ cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
+ field3, field4 | pdev->cmd_ring->cycle_state);
+}
+
+/* Queue a slot enable or disable request on the command ring */
+void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
+ SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/* Queue an address device command TRB */
+void cdnsp_queue_address_device(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr,
+ enum cdnsp_setup_dev setup)
+{
+ cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_ADDR_DEV) |
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
+}
+
+/* Queue a reset device command TRB */
+void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
+ SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/* Queue a configure endpoint command TRB */
+void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
+ dma_addr_t in_ctx_ptr)
+{
+ cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
+ upper_32_bits(in_ctx_ptr), 0,
+ TRB_TYPE(TRB_CONFIG_EP) |
+ SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/*
+ * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
+ * activity on an endpoint that is about to be suspended.
+ */
+void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
+}
+
+/* Set Transfer Ring Dequeue Pointer command. */
+void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
+ struct cdnsp_ep *pep,
+ struct cdnsp_dequeue_state *deq_state)
+{
+ u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
+ u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
+ u32 type = TRB_TYPE(TRB_SET_DEQ);
+ u32 trb_sct = 0;
+ dma_addr_t addr;
+
+ addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
+ deq_state->new_deq_ptr);
+
+ if (deq_state->stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+
+ cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
+ deq_state->new_cycle_state, upper_32_bits(addr),
+ trb_stream_id, trb_slot_id |
+ EP_ID_FOR_TRB(pep->idx) | type);
+}
+
+void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+ return cdnsp_queue_command(pdev, 0, 0, 0,
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index) |
+ TRB_TYPE(TRB_RESET_EP));
+}
+
+/*
+ * Queue a halt endpoint request on the command ring.
+ */
+void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index));
+}
+
+/*
+ * Queue a flush endpoint request on the command ring.
+ */
+void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
+ unsigned int ep_index)
+{
+ cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
+ SLOT_ID_FOR_TRB(pdev->slot_id) |
+ EP_ID_FOR_TRB(ep_index));
+}
+
+void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
+{
+ u32 lo, mid;
+
+ lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
+ TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
+ mid = TRB_FH_TR_PACKET_DEV_NOT |
+ TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
+ TRB_FH_TO_INTERFACE(intf_num);
+
+ cdnsp_queue_command(pdev, lo, mid, 0,
+ TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
+}
diff --git a/drivers/usb/cdns3/cdnsp-trace.c b/drivers/usb/cdns3/cdnsp-trace.c
new file mode 100644
index 000000000000..e50ab799ad95
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#include "cdnsp-trace.h"
diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h
new file mode 100644
index 000000000000..5aa88ca012de
--- /dev/null
+++ b/drivers/usb/cdns3/cdnsp-trace.h
@@ -0,0 +1,830 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ * Trace support header file
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cdnsp-dev
+
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR cdnsp_dev
+
+#if !defined(__CDNSP_DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __CDNSP_DEV_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "cdnsp-gadget.h"
+#include "cdnsp-debug.h"
+
+/*
+ * There is limitation for single buffer size in TRACEPOINT subsystem.
+ * By default TRACE_BUF_SIZE is 1024, so no all data will be logged.
+ * To show more data this must be increased. In most cases the default
+ * value is sufficient.
+ */
+#define CDNSP_MSG_MAX 500
+
+DECLARE_EVENT_CLASS(cdnsp_log_ep,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id),
+ TP_STRUCT__entry(
+ __string(name, pep->name)
+ __field(unsigned int, state)
+ __field(u32, stream_id)
+ __field(u8, enabled)
+ __field(unsigned int, num_streams)
+ __field(int, td_count)
+ __field(u8, first_prime_det)
+ __field(u8, drbls_count)
+ ),
+ TP_fast_assign(
+ __assign_str(name, pep->name);
+ __entry->state = pep->ep_state;
+ __entry->stream_id = stream_id;
+ __entry->enabled = pep->ep_state & EP_HAS_STREAMS;
+ __entry->num_streams = pep->stream_info.num_streams;
+ __entry->td_count = pep->stream_info.td_count;
+ __entry->first_prime_det = pep->stream_info.first_prime_det;
+ __entry->drbls_count = pep->stream_info.drbls_count;
+ ),
+ TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num %d "
+ "tds %d, first prime: %d drbls %d",
+ __get_str(name), __entry->state, __entry->stream_id,
+ __entry->enabled, __entry->num_streams, __entry->td_count,
+ __entry->first_prime_det, __entry->drbls_count)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_tr_drbl,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_wait_for_prime,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_list_empty_with_skip,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_enable_end,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_disable_end,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_busy_try_halt_again,
+ TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+ TP_ARGS(pep, stream_id)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_enable_disable,
+ TP_PROTO(int set),
+ TP_ARGS(set),
+ TP_STRUCT__entry(
+ __field(int, set)
+ ),
+ TP_fast_assign(
+ __entry->set = set;
+ ),
+ TP_printk("%s", __entry->set ? "enabled" : "disabled")
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_pullup,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u1,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u2,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_lpm,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_may_wakeup,
+ TP_PROTO(int set),
+ TP_ARGS(set)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_simple,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(text, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(text, msg)
+ ),
+ TP_printk("%s", __get_str(text))
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_exit,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_init,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_slot_id,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_no_room_on_ring,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_status_stage,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_request,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_set_config,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_halted,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep_halt,
+ TP_PROTO(char *msg),
+ TP_ARGS(msg)
+);
+
+TRACE_EVENT(cdnsp_looking_trb_in_td,
+ TP_PROTO(dma_addr_t suspect, dma_addr_t trb_start, dma_addr_t trb_end,
+ dma_addr_t curr_seg, dma_addr_t end_seg),
+ TP_ARGS(suspect, trb_start, trb_end, curr_seg, end_seg),
+ TP_STRUCT__entry(
+ __field(dma_addr_t, suspect)
+ __field(dma_addr_t, trb_start)
+ __field(dma_addr_t, trb_end)
+ __field(dma_addr_t, curr_seg)
+ __field(dma_addr_t, end_seg)
+ ),
+ TP_fast_assign(
+ __entry->suspect = suspect;
+ __entry->trb_start = trb_start;
+ __entry->trb_end = trb_end;
+ __entry->curr_seg = curr_seg;
+ __entry->end_seg = end_seg;
+ ),
+ TP_printk("DMA: suspect event: %pad, trb-start: %pad, trb-end %pad, "
+ "seg-start %pad, seg-end %pad",
+ &__entry->suspect, &__entry->trb_start, &__entry->trb_end,
+ &__entry->curr_seg, &__entry->end_seg)
+);
+
+TRACE_EVENT(cdnsp_port_info,
+ TP_PROTO(__le32 __iomem *addr, u32 offset, u32 count, u32 rev),
+ TP_ARGS(addr, offset, count, rev),
+ TP_STRUCT__entry(
+ __field(__le32 __iomem *, addr)
+ __field(u32, offset)
+ __field(u32, count)
+ __field(u32, rev)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->offset = offset;
+ __entry->count = count;
+ __entry->rev = rev;
+ ),
+ TP_printk("Ext Cap %p, port offset = %u, count = %u, rev = 0x%x",
+ __entry->addr, __entry->offset, __entry->count, __entry->rev)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_deq_state,
+ TP_PROTO(struct cdnsp_dequeue_state *state),
+ TP_ARGS(state),
+ TP_STRUCT__entry(
+ __field(int, new_cycle_state)
+ __field(struct cdnsp_segment *, new_deq_seg)
+ __field(dma_addr_t, deq_seg_dma)
+ __field(union cdnsp_trb *, new_deq_ptr)
+ __field(dma_addr_t, deq_ptr_dma)
+ ),
+ TP_fast_assign(
+ __entry->new_cycle_state = state->new_cycle_state;
+ __entry->new_deq_seg = state->new_deq_seg;
+ __entry->deq_seg_dma = state->new_deq_seg->dma;
+ __entry->new_deq_ptr = state->new_deq_ptr,
+ __entry->deq_ptr_dma = cdnsp_trb_virt_to_dma(state->new_deq_seg,
+ state->new_deq_ptr);
+ ),
+ TP_printk("New cycle state = 0x%x, New dequeue segment = %p (0x%pad dma), "
+ "New dequeue pointer = %p (0x%pad dma)",
+ __entry->new_cycle_state, __entry->new_deq_seg,
+ &__entry->deq_seg_dma, __entry->new_deq_ptr,
+ &__entry->deq_ptr_dma
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_deq_state, cdnsp_new_deq_state,
+ TP_PROTO(struct cdnsp_dequeue_state *state),
+ TP_ARGS(state)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ctrl,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl),
+ TP_STRUCT__entry(
+ __field(u8, bRequestType)
+ __field(u8, bRequest)
+ __field(u16, wValue)
+ __field(u16, wIndex)
+ __field(u16, wLength)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->bRequestType = ctrl->bRequestType;
+ __entry->bRequest = ctrl->bRequest;
+ __entry->wValue = le16_to_cpu(ctrl->wValue);
+ __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+ __entry->wLength = le16_to_cpu(ctrl->wLength);
+ ),
+ TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNSP_MSG_MAX,
+ __entry->bRequestType,
+ __entry->bRequest, __entry->wValue,
+ __entry->wIndex, __entry->wLength)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_ctrl, cdnsp_ctrl_req,
+ TP_PROTO(struct usb_ctrlrequest *ctrl),
+ TP_ARGS(ctrl)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_bounce,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign),
+ TP_STRUCT__entry(
+ __string(name, preq->pep->name)
+ __field(u32, new_buf_len)
+ __field(u32, offset)
+ __field(dma_addr_t, dma)
+ __field(unsigned int, unalign)
+ ),
+ TP_fast_assign(
+ __assign_str(name, preq->pep->name);
+ __entry->new_buf_len = new_buf_len;
+ __entry->offset = offset;
+ __entry->dma = dma;
+ __entry->unalign = unalign;
+ ),
+ TP_printk("%s buf len %d, offset %d, dma %pad, unalign %d",
+ __get_str(name), __entry->new_buf_len,
+ __entry->offset, &__entry->dma, __entry->unalign
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_align_td_split,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_map,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_unmap,
+ TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+ dma_addr_t dma, unsigned int unalign),
+ TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_trb,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb),
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, field0)
+ __field(u32, field1)
+ __field(u32, field2)
+ __field(u32, field3)
+ __field(union cdnsp_trb *, trb)
+ __field(dma_addr_t, trb_dma)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->type = ring->type;
+ __entry->field0 = le32_to_cpu(trb->field[0]);
+ __entry->field1 = le32_to_cpu(trb->field[1]);
+ __entry->field2 = le32_to_cpu(trb->field[2]);
+ __entry->field3 = le32_to_cpu(trb->field[3]);
+ __entry->trb = (union cdnsp_trb *)trb;
+ __entry->trb_dma = cdnsp_trb_virt_to_dma(ring->deq_seg,
+ (union cdnsp_trb *)trb);
+
+ ),
+ TP_printk("%s: %s trb: %p(%pad)", cdnsp_ring_type_string(__entry->type),
+ cdnsp_decode_trb(__get_str(str), CDNSP_MSG_MAX,
+ __entry->field0, __entry->field1,
+ __entry->field2, __entry->field3),
+ __entry->trb, &__entry->trb_dma
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_event,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_trb_without_td,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_command,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_transfer,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_queue_trb,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_wait_for_compl,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_timeout,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_defered_event,
+ TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+ TP_ARGS(ring, trb)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_pdev,
+ TP_PROTO(struct cdnsp_device *pdev),
+ TP_ARGS(pdev),
+ TP_STRUCT__entry(
+ __field(struct cdnsp_device *, pdev)
+ __field(struct usb_gadget *, gadget)
+ __field(dma_addr_t, out_ctx)
+ __field(dma_addr_t, in_ctx)
+ __field(u8, port_num)
+ ),
+ TP_fast_assign(
+ __entry->pdev = pdev;
+ __entry->gadget = &pdev->gadget;
+ __entry->in_ctx = pdev->in_ctx.dma;
+ __entry->out_ctx = pdev->out_ctx.dma;
+ __entry->port_num = pdev->active_port ?
+ pdev->active_port->port_num : 0xFF;
+ ),
+ TP_printk("pdev %p gadget %p ctx %pad | %pad, port %d ",
+ __entry->pdev, __entry->gadget, &__entry->in_ctx,
+ &__entry->out_ctx, __entry->port_num
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_alloc_priv_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_free_priv_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_addressable_priv_device,
+ TP_PROTO(struct cdnsp_device *vdev),
+ TP_ARGS(vdev)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_request,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req),
+ TP_STRUCT__entry(
+ __string(name, req->pep->name)
+ __field(struct usb_request *, request)
+ __field(struct cdnsp_request *, preq)
+ __field(void *, buf)
+ __field(unsigned int, actual)
+ __field(unsigned int, length)
+ __field(int, status)
+ __field(dma_addr_t, dma)
+ __field(unsigned int, stream_id)
+ __field(unsigned int, zero)
+ __field(unsigned int, short_not_ok)
+ __field(unsigned int, no_interrupt)
+ __field(struct scatterlist*, sg)
+ __field(unsigned int, num_sgs)
+ __field(unsigned int, num_mapped_sgs)
+
+ ),
+ TP_fast_assign(
+ __assign_str(name, req->pep->name);
+ __entry->request = &req->request;
+ __entry->preq = req;
+ __entry->buf = req->request.buf;
+ __entry->actual = req->request.actual;
+ __entry->length = req->request.length;
+ __entry->status = req->request.status;
+ __entry->dma = req->request.dma;
+ __entry->stream_id = req->request.stream_id;
+ __entry->zero = req->request.zero;
+ __entry->short_not_ok = req->request.short_not_ok;
+ __entry->no_interrupt = req->request.no_interrupt;
+ __entry->sg = req->request.sg;
+ __entry->num_sgs = req->request.num_sgs;
+ __entry->num_mapped_sgs = req->request.num_mapped_sgs;
+ ),
+ TP_printk("%s; req U:%p/P:%p, req buf %p, length %u/%u, status %d, "
+ "buf dma (%pad), SID %u, %s%s%s, sg %p, num_sg %d,"
+ " num_m_sg %d",
+ __get_str(name), __entry->request, __entry->preq,
+ __entry->buf, __entry->actual, __entry->length,
+ __entry->status, &__entry->dma,
+ __entry->stream_id, __entry->zero ? "Z" : "z",
+ __entry->short_not_ok ? "S" : "s",
+ __entry->no_interrupt ? "I" : "i",
+ __entry->sg, __entry->num_sgs, __entry->num_mapped_sgs
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_busy,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_error,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_dequeue,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_giveback,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_alloc_request,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_free_request,
+ TP_PROTO(struct cdnsp_request *req),
+ TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ep_ctx,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u32, info)
+ __field(u32, info2)
+ __field(u64, deq)
+ __field(u32, tx_info)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->info = le32_to_cpu(ctx->ep_info);
+ __entry->info2 = le32_to_cpu(ctx->ep_info2);
+ __entry->deq = le64_to_cpu(ctx->deq);
+ __entry->tx_info = le32_to_cpu(ctx->tx_info);
+ ),
+ TP_printk("%s", cdnsp_decode_ep_context(__get_str(str), CDNSP_MSG_MAX,
+ __entry->info, __entry->info2,
+ __entry->deq, __entry->tx_info)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_disabled,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_stopped_or_disabled,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_remove_request,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_stop_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_flush_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_set_deq_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_reset_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_config_ep,
+ TP_PROTO(struct cdnsp_ep_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_slot_ctx,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(u32, info)
+ __field(u32, info2)
+ __field(u32, int_target)
+ __field(u32, state)
+ ),
+ TP_fast_assign(
+ __entry->info = le32_to_cpu(ctx->dev_info);
+ __entry->info2 = le32_to_cpu(ctx->dev_port);
+ __entry->int_target = le32_to_cpu(ctx->int_target);
+ __entry->state = le32_to_cpu(ctx->dev_state);
+ ),
+ TP_printk("%s", cdnsp_decode_slot_context(__entry->info,
+ __entry->info2,
+ __entry->int_target,
+ __entry->state)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_slot_already_in_default,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_enable_slot,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_disable_slot,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_reset_device,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_setup_device_slot,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_addr_dev,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_reset_dev,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_set_deq,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_configure_endpoint,
+ TP_PROTO(struct cdnsp_slot_ctx *ctx),
+ TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_td_info,
+ TP_PROTO(struct cdnsp_request *preq),
+ TP_ARGS(preq),
+ TP_STRUCT__entry(
+ __string(name, preq->pep->name)
+ __field(struct usb_request *, request)
+ __field(struct cdnsp_request *, preq)
+ __field(union cdnsp_trb *, first_trb)
+ __field(union cdnsp_trb *, last_trb)
+ __field(dma_addr_t, trb_dma)
+ ),
+ TP_fast_assign(
+ __assign_str(name, preq->pep->name);
+ __entry->request = &preq->request;
+ __entry->preq = preq;
+ __entry->first_trb = preq->td.first_trb;
+ __entry->last_trb = preq->td.last_trb;
+ __entry->trb_dma = cdnsp_trb_virt_to_dma(preq->td.start_seg,
+ preq->td.first_trb)
+ ),
+ TP_printk("%s req/preq: %p/%p, first trb %p[vir]/%pad(dma), last trb %p",
+ __get_str(name), __entry->request, __entry->preq,
+ __entry->first_trb, &__entry->trb_dma,
+ __entry->last_trb
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_td_info, cdnsp_remove_request_td,
+ TP_PROTO(struct cdnsp_request *preq),
+ TP_ARGS(preq)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ring,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring),
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(void *, ring)
+ __field(dma_addr_t, enq)
+ __field(dma_addr_t, deq)
+ __field(dma_addr_t, enq_seg)
+ __field(dma_addr_t, deq_seg)
+ __field(unsigned int, num_segs)
+ __field(unsigned int, stream_id)
+ __field(unsigned int, cycle_state)
+ __field(unsigned int, num_trbs_free)
+ __field(unsigned int, bounce_buf_len)
+ ),
+ TP_fast_assign(
+ __entry->ring = ring;
+ __entry->type = ring->type;
+ __entry->num_segs = ring->num_segs;
+ __entry->stream_id = ring->stream_id;
+ __entry->enq_seg = ring->enq_seg->dma;
+ __entry->deq_seg = ring->deq_seg->dma;
+ __entry->cycle_state = ring->cycle_state;
+ __entry->num_trbs_free = ring->num_trbs_free;
+ __entry->bounce_buf_len = ring->bounce_buf_len;
+ __entry->enq = cdnsp_trb_virt_to_dma(ring->enq_seg,
+ ring->enqueue);
+ __entry->deq = cdnsp_trb_virt_to_dma(ring->deq_seg,
+ ring->dequeue);
+ ),
+ TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d"
+ " free_trbs %d bounce %d cycle %d",
+ cdnsp_ring_type_string(__entry->type), __entry->ring,
+ &__entry->enq, &__entry->enq_seg,
+ &__entry->deq, &__entry->deq_seg,
+ __entry->num_segs,
+ __entry->stream_id,
+ __entry->num_trbs_free,
+ __entry->bounce_buf_len,
+ __entry->cycle_state
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_alloc,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_free,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_set_stream_ring,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_expansion,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_enq,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_deq,
+ TP_PROTO(struct cdnsp_ring *ring),
+ TP_ARGS(ring)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_portsc,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc),
+ TP_STRUCT__entry(
+ __field(u32, portnum)
+ __field(u32, portsc)
+ __dynamic_array(char, str, CDNSP_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->portnum = portnum;
+ __entry->portsc = portsc;
+ ),
+ TP_printk("port-%d: %s",
+ __entry->portnum,
+ cdnsp_decode_portsc(__get_str(str), CDNSP_MSG_MAX,
+ __entry->portsc)
+ )
+);
+
+DEFINE_EVENT(cdnsp_log_portsc, cdnsp_handle_port_status,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc)
+);
+
+DEFINE_EVENT(cdnsp_log_portsc, cdnsp_link_state_changed,
+ TP_PROTO(u32 portnum, u32 portsc),
+ TP_ARGS(portnum, portsc)
+);
+
+TRACE_EVENT(cdnsp_stream_number,
+ TP_PROTO(struct cdnsp_ep *pep, int num_stream_ctxs, int num_streams),
+ TP_ARGS(pep, num_stream_ctxs, num_streams),
+ TP_STRUCT__entry(
+ __string(name, pep->name)
+ __field(int, num_stream_ctxs)
+ __field(int, num_streams)
+ ),
+ TP_fast_assign(
+ __entry->num_stream_ctxs = num_stream_ctxs;
+ __entry->num_streams = num_streams;
+ ),
+ TP_printk("%s Need %u stream ctx entries for %u stream IDs.",
+ __get_str(name), __entry->num_stream_ctxs,
+ __entry->num_streams)
+);
+
+#endif /* __CDNSP_TRACE_H */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cdnsp-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index 1991cb5cf6bf..199713769289 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Cadence USBSS DRD Driver.
+ * Cadence USBSS and USBSSP DRD Driver.
*
* Copyright (C) 2018-2019 Cadence.
* Copyright (C) 2017-2018 NXP
@@ -19,15 +19,13 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
-#include "gadget.h"
#include "core.h"
#include "host-export.h"
-#include "gadget-export.h"
#include "drd.h"
-static int cdns3_idle_init(struct cdns3 *cdns);
+static int cdns_idle_init(struct cdns *cdns);
-static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role)
+static int cdns_role_start(struct cdns *cdns, enum usb_role role)
{
int ret;
@@ -41,47 +39,47 @@ static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role)
if (!cdns->roles[role])
return -ENXIO;
- if (cdns->roles[role]->state == CDNS3_ROLE_STATE_ACTIVE)
+ if (cdns->roles[role]->state == CDNS_ROLE_STATE_ACTIVE)
return 0;
mutex_lock(&cdns->mutex);
ret = cdns->roles[role]->start(cdns);
if (!ret)
- cdns->roles[role]->state = CDNS3_ROLE_STATE_ACTIVE;
+ cdns->roles[role]->state = CDNS_ROLE_STATE_ACTIVE;
mutex_unlock(&cdns->mutex);
return ret;
}
-static void cdns3_role_stop(struct cdns3 *cdns)
+static void cdns_role_stop(struct cdns *cdns)
{
enum usb_role role = cdns->role;
if (WARN_ON(role > USB_ROLE_DEVICE))
return;
- if (cdns->roles[role]->state == CDNS3_ROLE_STATE_INACTIVE)
+ if (cdns->roles[role]->state == CDNS_ROLE_STATE_INACTIVE)
return;
mutex_lock(&cdns->mutex);
cdns->roles[role]->stop(cdns);
- cdns->roles[role]->state = CDNS3_ROLE_STATE_INACTIVE;
+ cdns->roles[role]->state = CDNS_ROLE_STATE_INACTIVE;
mutex_unlock(&cdns->mutex);
}
-static void cdns3_exit_roles(struct cdns3 *cdns)
+static void cdns_exit_roles(struct cdns *cdns)
{
- cdns3_role_stop(cdns);
- cdns3_drd_exit(cdns);
+ cdns_role_stop(cdns);
+ cdns_drd_exit(cdns);
}
/**
- * cdns3_core_init_role - initialize role of operation
- * @cdns: Pointer to cdns3 structure
+ * cdns_core_init_role - initialize role of operation
+ * @cdns: Pointer to cdns structure
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_core_init_role(struct cdns3 *cdns)
+static int cdns_core_init_role(struct cdns *cdns)
{
struct device *dev = cdns->dev;
enum usb_dr_mode best_dr_mode;
@@ -97,13 +95,23 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
* can be restricted later depending on strap pin configuration.
*/
if (dr_mode == USB_DR_MODE_UNKNOWN) {
- if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
- IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
- dr_mode = USB_DR_MODE_OTG;
- else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
- dr_mode = USB_DR_MODE_HOST;
- else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
- dr_mode = USB_DR_MODE_PERIPHERAL;
+ if (cdns->version == CDNSP_CONTROLLER_V2) {
+ if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) &&
+ IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+ dr_mode = USB_DR_MODE_OTG;
+ else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST))
+ dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+ dr_mode = USB_DR_MODE_PERIPHERAL;
+ } else {
+ if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
+ IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_OTG;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
+ dr_mode = USB_DR_MODE_HOST;
+ else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+ dr_mode = USB_DR_MODE_PERIPHERAL;
+ }
}
/*
@@ -112,7 +120,7 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
*/
best_dr_mode = cdns->dr_mode;
- ret = cdns3_idle_init(cdns);
+ ret = cdns_idle_init(cdns);
if (ret)
return ret;
@@ -128,7 +136,14 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
dr_mode = best_dr_mode;
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
- ret = cdns3_host_init(cdns);
+ if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ IS_ENABLED(CONFIG_USB_CDNSP_HOST)) ||
+ (cdns->version < CDNSP_CONTROLLER_V2 &&
+ IS_ENABLED(CONFIG_USB_CDNS3_HOST)))
+ ret = cdns_host_init(cdns);
+ else
+ ret = -ENXIO;
+
if (ret) {
dev_err(dev, "Host initialization failed with %d\n",
ret);
@@ -137,7 +152,11 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
}
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
- ret = cdns3_gadget_init(cdns);
+ if (cdns->gadget_init)
+ ret = cdns->gadget_init(cdns);
+ else
+ ret = -ENXIO;
+
if (ret) {
dev_err(dev, "Device initialization failed with %d\n",
ret);
@@ -147,28 +166,28 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
cdns->dr_mode = dr_mode;
- ret = cdns3_drd_update_mode(cdns);
+ ret = cdns_drd_update_mode(cdns);
if (ret)
goto err;
/* Initialize idle role to start with */
- ret = cdns3_role_start(cdns, USB_ROLE_NONE);
+ ret = cdns_role_start(cdns, USB_ROLE_NONE);
if (ret)
goto err;
switch (cdns->dr_mode) {
case USB_DR_MODE_OTG:
- ret = cdns3_hw_role_switch(cdns);
+ ret = cdns_hw_role_switch(cdns);
if (ret)
goto err;
break;
case USB_DR_MODE_PERIPHERAL:
- ret = cdns3_role_start(cdns, USB_ROLE_DEVICE);
+ ret = cdns_role_start(cdns, USB_ROLE_DEVICE);
if (ret)
goto err;
break;
case USB_DR_MODE_HOST:
- ret = cdns3_role_start(cdns, USB_ROLE_HOST);
+ ret = cdns_role_start(cdns, USB_ROLE_HOST);
if (ret)
goto err;
break;
@@ -179,32 +198,32 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
return 0;
err:
- cdns3_exit_roles(cdns);
+ cdns_exit_roles(cdns);
return ret;
}
/**
- * cdns3_hw_role_state_machine - role switch state machine based on hw events.
+ * cdns_hw_role_state_machine - role switch state machine based on hw events.
* @cdns: Pointer to controller structure.
*
* Returns next role to be entered based on hw events.
*/
-static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
+static enum usb_role cdns_hw_role_state_machine(struct cdns *cdns)
{
enum usb_role role = USB_ROLE_NONE;
int id, vbus;
if (cdns->dr_mode != USB_DR_MODE_OTG) {
- if (cdns3_is_host(cdns))
+ if (cdns_is_host(cdns))
role = USB_ROLE_HOST;
- if (cdns3_is_device(cdns))
+ if (cdns_is_device(cdns))
role = USB_ROLE_DEVICE;
return role;
}
- id = cdns3_get_id(cdns);
- vbus = cdns3_get_vbus(cdns);
+ id = cdns_get_id(cdns);
+ vbus = cdns_get_vbus(cdns);
/*
* Role change state machine
@@ -240,28 +259,28 @@ static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
return role;
}
-static int cdns3_idle_role_start(struct cdns3 *cdns)
+static int cdns_idle_role_start(struct cdns *cdns)
{
return 0;
}
-static void cdns3_idle_role_stop(struct cdns3 *cdns)
+static void cdns_idle_role_stop(struct cdns *cdns)
{
/* Program Lane swap and bring PHY out of RESET */
phy_reset(cdns->usb3_phy);
}
-static int cdns3_idle_init(struct cdns3 *cdns)
+static int cdns_idle_init(struct cdns *cdns)
{
- struct cdns3_role_driver *rdrv;
+ struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
- rdrv->start = cdns3_idle_role_start;
- rdrv->stop = cdns3_idle_role_stop;
- rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->start = cdns_idle_role_start;
+ rdrv->stop = cdns_idle_role_stop;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->suspend = NULL;
rdrv->resume = NULL;
rdrv->name = "idle";
@@ -272,10 +291,10 @@ static int cdns3_idle_init(struct cdns3 *cdns)
}
/**
- * cdns3_hw_role_switch - switch roles based on HW state
+ * cdns_hw_role_switch - switch roles based on HW state
* @cdns: controller
*/
-int cdns3_hw_role_switch(struct cdns3 *cdns)
+int cdns_hw_role_switch(struct cdns *cdns)
{
enum usb_role real_role, current_role;
int ret = 0;
@@ -287,22 +306,22 @@ int cdns3_hw_role_switch(struct cdns3 *cdns)
pm_runtime_get_sync(cdns->dev);
current_role = cdns->role;
- real_role = cdns3_hw_role_state_machine(cdns);
+ real_role = cdns_hw_role_state_machine(cdns);
/* Do nothing if nothing changed */
if (current_role == real_role)
goto exit;
- cdns3_role_stop(cdns);
+ cdns_role_stop(cdns);
dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role);
- ret = cdns3_role_start(cdns, real_role);
+ ret = cdns_role_start(cdns, real_role);
if (ret) {
/* Back to current role */
dev_err(cdns->dev, "set %d has failed, back to %d\n",
real_role, current_role);
- ret = cdns3_role_start(cdns, current_role);
+ ret = cdns_role_start(cdns, current_role);
if (ret)
dev_err(cdns->dev, "back to %d failed too\n",
current_role);
@@ -319,15 +338,15 @@ exit:
*
* Returns role
*/
-static enum usb_role cdns3_role_get(struct usb_role_switch *sw)
+static enum usb_role cdns_role_get(struct usb_role_switch *sw)
{
- struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
return cdns->role;
}
/**
- * cdns3_role_set - set current role of controller.
+ * cdns_role_set - set current role of controller.
*
* @sw: pointer to USB role switch structure
* @role: the previous role
@@ -335,9 +354,9 @@ static enum usb_role cdns3_role_get(struct usb_role_switch *sw)
* - Role switch for dual-role devices
* - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices
*/
-static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
+static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
{
- struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
+ struct cdns *cdns = usb_role_switch_get_drvdata(sw);
int ret = 0;
pm_runtime_get_sync(cdns->dev);
@@ -365,8 +384,8 @@ static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
}
}
- cdns3_role_stop(cdns);
- ret = cdns3_role_start(cdns, role);
+ cdns_role_stop(cdns);
+ ret = cdns_role_start(cdns, role);
if (ret)
dev_err(cdns->dev, "set role %d has failed\n", role);
@@ -375,37 +394,17 @@ pm_put:
return ret;
}
-static int set_phy_power_on(struct cdns3 *cdns)
-{
- int ret;
-
- ret = phy_power_on(cdns->usb2_phy);
- if (ret)
- return ret;
-
- ret = phy_power_on(cdns->usb3_phy);
- if (ret)
- phy_power_off(cdns->usb2_phy);
-
- return ret;
-}
-
-static void set_phy_power_off(struct cdns3 *cdns)
-{
- phy_power_off(cdns->usb3_phy);
- phy_power_off(cdns->usb2_phy);
-}
/**
- * cdns3_wakeup_irq - interrupt handler for wakeup events
- * @irq: irq number for cdns3 core device
- * @data: structure of cdns3
+ * cdns_wakeup_irq - interrupt handler for wakeup events
+ * @irq: irq number for cdns3/cdnsp core device
+ * @data: structure of cdns
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
-static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
+static irqreturn_t cdns_wakeup_irq(int irq, void *data)
{
- struct cdns3 *cdns = data;
+ struct cdns *cdns = data;
if (cdns->in_lpm) {
disable_irq_nosync(irq);
@@ -420,17 +419,14 @@ static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
}
/**
- * cdns3_probe - probe for cdns3 core device
- * @pdev: Pointer to cdns3 core platform device
+ * cdns_probe - probe for cdns3/cdnsp core device
+ * @cdns: Pointer to cdns structure.
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_probe(struct platform_device *pdev)
+int cdns_init(struct cdns *cdns)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct cdns3 *cdns;
- void __iomem *regs;
+ struct device *dev = cdns->dev;
int ret;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -439,259 +435,78 @@ static int cdns3_probe(struct platform_device *pdev)
return ret;
}
- cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
- if (!cdns)
- return -ENOMEM;
-
- cdns->dev = dev;
- cdns->pdata = dev_get_platdata(dev);
-
- platform_set_drvdata(pdev, cdns);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
- if (!res) {
- dev_err(dev, "missing host IRQ\n");
- return -ENODEV;
- }
-
- cdns->xhci_res[0] = *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
- if (!res) {
- dev_err(dev, "couldn't get xhci resource\n");
- return -ENXIO;
- }
-
- cdns->xhci_res[1] = *res;
-
- cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
- if (cdns->dev_irq < 0)
- return cdns->dev_irq;
-
- regs = devm_platform_ioremap_resource_byname(pdev, "dev");
- if (IS_ERR(regs))
- return PTR_ERR(regs);
- cdns->dev_regs = regs;
-
- cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
- if (cdns->otg_irq < 0)
- return cdns->otg_irq;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
- if (!res) {
- dev_err(dev, "couldn't get otg resource\n");
- return -ENXIO;
- }
-
- cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
-
- cdns->otg_res = *res;
-
- cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
- if (cdns->wakeup_irq == -EPROBE_DEFER)
- return cdns->wakeup_irq;
- else if (cdns->wakeup_irq == 0)
- return -EINVAL;
-
- if (cdns->wakeup_irq < 0) {
- dev_dbg(dev, "couldn't get wakeup irq\n");
- cdns->wakeup_irq = 0x0;
- }
-
mutex_init(&cdns->mutex);
- cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
- if (IS_ERR(cdns->usb2_phy))
- return PTR_ERR(cdns->usb2_phy);
-
- ret = phy_init(cdns->usb2_phy);
- if (ret)
- return ret;
-
- cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
- if (IS_ERR(cdns->usb3_phy))
- return PTR_ERR(cdns->usb3_phy);
-
- ret = phy_init(cdns->usb3_phy);
- if (ret)
- goto err1;
-
- ret = set_phy_power_on(cdns);
- if (ret)
- goto err2;
-
if (device_property_read_bool(dev, "usb-role-switch")) {
struct usb_role_switch_desc sw_desc = { };
- sw_desc.set = cdns3_role_set;
- sw_desc.get = cdns3_role_get;
+ sw_desc.set = cdns_role_set;
+ sw_desc.get = cdns_role_get;
sw_desc.allow_userspace_control = true;
sw_desc.driver_data = cdns;
sw_desc.fwnode = dev->fwnode;
cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
if (IS_ERR(cdns->role_sw)) {
- ret = PTR_ERR(cdns->role_sw);
dev_warn(dev, "Unable to register Role Switch\n");
- goto err3;
+ return PTR_ERR(cdns->role_sw);
}
}
if (cdns->wakeup_irq) {
ret = devm_request_irq(cdns->dev, cdns->wakeup_irq,
- cdns3_wakeup_irq,
+ cdns_wakeup_irq,
IRQF_SHARED,
dev_name(cdns->dev), cdns);
if (ret) {
dev_err(cdns->dev, "couldn't register wakeup irq handler\n");
- goto err4;
+ goto role_switch_unregister;
}
}
- ret = cdns3_drd_init(cdns);
+ ret = cdns_drd_init(cdns);
if (ret)
- goto err4;
+ goto init_failed;
- ret = cdns3_core_init_role(cdns);
+ ret = cdns_core_init_role(cdns);
if (ret)
- goto err4;
+ goto init_failed;
spin_lock_init(&cdns->lock);
- device_set_wakeup_capable(dev, true);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)))
- pm_runtime_forbid(dev);
- /*
- * The controller needs less time between bus and controller suspend,
- * and we also needs a small delay to avoid frequently entering low
- * power mode.
- */
- pm_runtime_set_autosuspend_delay(dev, 20);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_use_autosuspend(dev);
dev_dbg(dev, "Cadence USB3 core: probe succeed\n");
return 0;
-err4:
- cdns3_drd_exit(cdns);
+init_failed:
+ cdns_drd_exit(cdns);
+role_switch_unregister:
if (cdns->role_sw)
usb_role_switch_unregister(cdns->role_sw);
-err3:
- set_phy_power_off(cdns);
-err2:
- phy_exit(cdns->usb3_phy);
-err1:
- phy_exit(cdns->usb2_phy);
return ret;
}
+EXPORT_SYMBOL_GPL(cdns_init);
/**
- * cdns3_remove - unbind drd driver and clean up
- * @pdev: Pointer to Linux platform device
+ * cdns_remove - unbind drd driver and clean up
+ * @cdns: Pointer to cdns structure.
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_remove(struct platform_device *pdev)
+int cdns_remove(struct cdns *cdns)
{
- struct cdns3 *cdns = platform_get_drvdata(pdev);
-
- pm_runtime_get_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
- pm_runtime_put_noidle(&pdev->dev);
- cdns3_exit_roles(cdns);
+ cdns_exit_roles(cdns);
usb_role_switch_unregister(cdns->role_sw);
- set_phy_power_off(cdns);
- phy_exit(cdns->usb2_phy);
- phy_exit(cdns->usb3_phy);
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int cdns3_set_platform_suspend(struct device *dev,
- bool suspend, bool wakeup)
-{
- struct cdns3 *cdns = dev_get_drvdata(dev);
- int ret = 0;
-
- if (cdns->pdata && cdns->pdata->platform_suspend)
- ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
-
- return ret;
-}
-
-static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
-{
- struct cdns3 *cdns = dev_get_drvdata(dev);
- bool wakeup;
- unsigned long flags;
-
- if (cdns->in_lpm)
- return 0;
-
- if (PMSG_IS_AUTO(msg))
- wakeup = true;
- else
- wakeup = device_may_wakeup(dev);
-
- cdns3_set_platform_suspend(cdns->dev, true, wakeup);
- set_phy_power_off(cdns);
- spin_lock_irqsave(&cdns->lock, flags);
- cdns->in_lpm = true;
- spin_unlock_irqrestore(&cdns->lock, flags);
- dev_dbg(cdns->dev, "%s ends\n", __func__);
return 0;
}
+EXPORT_SYMBOL_GPL(cdns_remove);
-static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
-{
- struct cdns3 *cdns = dev_get_drvdata(dev);
- int ret;
- unsigned long flags;
-
- if (!cdns->in_lpm)
- return 0;
-
- ret = set_phy_power_on(cdns);
- if (ret)
- return ret;
-
- cdns3_set_platform_suspend(cdns->dev, false, false);
-
- spin_lock_irqsave(&cdns->lock, flags);
- if (cdns->roles[cdns->role]->resume && !PMSG_IS_AUTO(msg))
- cdns->roles[cdns->role]->resume(cdns, false);
-
- cdns->in_lpm = false;
- spin_unlock_irqrestore(&cdns->lock, flags);
- if (cdns->wakeup_pending) {
- cdns->wakeup_pending = false;
- enable_irq(cdns->wakeup_irq);
- }
- dev_dbg(cdns->dev, "%s ends\n", __func__);
-
- return ret;
-}
-
-static int cdns3_runtime_suspend(struct device *dev)
-{
- return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
-}
-
-static int cdns3_runtime_resume(struct device *dev)
-{
- return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
-}
#ifdef CONFIG_PM_SLEEP
-
-static int cdns3_suspend(struct device *dev)
+int cdns_suspend(struct cdns *cdns)
{
- struct cdns3 *cdns = dev_get_drvdata(dev);
+ struct device *dev = cdns->dev;
unsigned long flags;
if (pm_runtime_status_suspended(dev))
@@ -703,52 +518,30 @@ static int cdns3_suspend(struct device *dev)
spin_unlock_irqrestore(&cdns->lock, flags);
}
- return cdns3_controller_suspend(dev, PMSG_SUSPEND);
+ return 0;
}
+EXPORT_SYMBOL_GPL(cdns_suspend);
-static int cdns3_resume(struct device *dev)
+int cdns_resume(struct cdns *cdns, u8 set_active)
{
- int ret;
+ struct device *dev = cdns->dev;
- ret = cdns3_controller_resume(dev, PMSG_RESUME);
- if (ret)
- return ret;
+ if (cdns->roles[cdns->role]->resume)
+ cdns->roles[cdns->role]->resume(cdns, false);
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ if (set_active) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
- return ret;
+ return 0;
}
+EXPORT_SYMBOL_GPL(cdns_resume);
#endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
-
-static const struct dev_pm_ops cdns3_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume)
- SET_RUNTIME_PM_OPS(cdns3_runtime_suspend, cdns3_runtime_resume, NULL)
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_cdns3_match[] = {
- { .compatible = "cdns,usb3" },
- { },
-};
-MODULE_DEVICE_TABLE(of, of_cdns3_match);
-#endif
-
-static struct platform_driver cdns3_driver = {
- .probe = cdns3_probe,
- .remove = cdns3_remove,
- .driver = {
- .name = "cdns-usb3",
- .of_match_table = of_match_ptr(of_cdns3_match),
- .pm = &cdns3_pm_ops,
- },
-};
-
-module_platform_driver(cdns3_driver);
-
-MODULE_ALIAS("platform:cdns3");
+
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_DESCRIPTION("Cadence USBSS and USBSSP DRD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 3176f924293a..ab0cb68acd23 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USBSS DRD Header File.
+ * Cadence USBSS and USBSSP DRD Header File.
*
* Copyright (C) 2017-2018 NXP
* Copyright (C) 2018-2019 Cadence.
@@ -14,10 +14,10 @@
#ifndef __LINUX_CDNS3_CORE_H
#define __LINUX_CDNS3_CORE_H
-struct cdns3;
+struct cdns;
/**
- * struct cdns3_role_driver - host/gadget role driver
+ * struct cdns_role_driver - host/gadget role driver
* @start: start this role
* @stop: stop this role
* @suspend: suspend callback for this role
@@ -26,18 +26,18 @@ struct cdns3;
* @name: role name string (host/gadget)
* @state: current state
*/
-struct cdns3_role_driver {
- int (*start)(struct cdns3 *cdns);
- void (*stop)(struct cdns3 *cdns);
- int (*suspend)(struct cdns3 *cdns, bool do_wakeup);
- int (*resume)(struct cdns3 *cdns, bool hibernated);
+struct cdns_role_driver {
+ int (*start)(struct cdns *cdns);
+ void (*stop)(struct cdns *cdns);
+ int (*suspend)(struct cdns *cdns, bool do_wakeup);
+ int (*resume)(struct cdns *cdns, bool hibernated);
const char *name;
-#define CDNS3_ROLE_STATE_INACTIVE 0
-#define CDNS3_ROLE_STATE_ACTIVE 1
+#define CDNS_ROLE_STATE_INACTIVE 0
+#define CDNS_ROLE_STATE_ACTIVE 1
int state;
};
-#define CDNS3_XHCI_RESOURCES_NUM 2
+#define CDNS_XHCI_RESOURCES_NUM 2
struct cdns3_platform_data {
int (*platform_suspend)(struct device *dev,
@@ -47,7 +47,7 @@ struct cdns3_platform_data {
};
/**
- * struct cdns3 - Representation of Cadence USB3 DRD controller.
+ * struct cdns - Representation of Cadence USB3 DRD controller.
* @dev: pointer to Cadence device struct
* @xhci_regs: pointer to base of xhci registers
* @xhci_res: the resource for xhci
@@ -55,14 +55,16 @@ struct cdns3_platform_data {
* @otg_res: the resource for otg
* @otg_v0_regs: pointer to base of v0 otg registers
* @otg_v1_regs: pointer to base of v1 otg registers
+ * @otg_cdnsp_regs: pointer to base of CDNSP otg registers
* @otg_regs: pointer to base of otg registers
+ * @otg_irq_regs: pointer to interrupt registers
* @otg_irq: irq number for otg controller
* @dev_irq: irq number for device controller
* @wakeup_irq: irq number for wakeup event, it is optional
* @roles: array of supported roles for this controller
* @role: current role
- * @host_dev: the child host device pointer for cdns3 core
- * @gadget_dev: the child gadget device pointer for cdns3 core
+ * @host_dev: the child host device pointer for cdns core
+ * @gadget_dev: the child gadget device pointer
* @usb2_phy: pointer to USB2 PHY
* @usb3_phy: pointer to USB3 PHY
* @mutex: the mutex for concurrent code at driver
@@ -76,29 +78,33 @@ struct cdns3_platform_data {
* @pdata: platform data from glue layer
* @lock: spinlock structure
* @xhci_plat_data: xhci private data structure pointer
+ * @gadget_init: pointer to gadget initialization function
*/
-struct cdns3 {
+struct cdns {
struct device *dev;
void __iomem *xhci_regs;
- struct resource xhci_res[CDNS3_XHCI_RESOURCES_NUM];
+ struct resource xhci_res[CDNS_XHCI_RESOURCES_NUM];
struct cdns3_usb_regs __iomem *dev_regs;
- struct resource otg_res;
- struct cdns3_otg_legacy_regs *otg_v0_regs;
- struct cdns3_otg_regs *otg_v1_regs;
- struct cdns3_otg_common_regs *otg_regs;
+ struct resource otg_res;
+ struct cdns3_otg_legacy_regs __iomem *otg_v0_regs;
+ struct cdns3_otg_regs __iomem *otg_v1_regs;
+ struct cdnsp_otg_regs __iomem *otg_cdnsp_regs;
+ struct cdns_otg_common_regs __iomem *otg_regs;
+ struct cdns_otg_irq_regs __iomem *otg_irq_regs;
#define CDNS3_CONTROLLER_V0 0
#define CDNS3_CONTROLLER_V1 1
+#define CDNSP_CONTROLLER_V2 2
u32 version;
bool phyrst_a_enable;
int otg_irq;
int dev_irq;
int wakeup_irq;
- struct cdns3_role_driver *roles[USB_ROLE_DEVICE + 1];
+ struct cdns_role_driver *roles[USB_ROLE_DEVICE + 1];
enum usb_role role;
struct platform_device *host_dev;
- struct cdns3_device *gadget_dev;
+ void *gadget_dev;
struct phy *usb2_phy;
struct phy *usb3_phy;
/* mutext used in workqueue*/
@@ -110,8 +116,21 @@ struct cdns3 {
struct cdns3_platform_data *pdata;
spinlock_t lock;
struct xhci_plat_priv *xhci_plat_data;
+
+ int (*gadget_init)(struct cdns *cdns);
};
-int cdns3_hw_role_switch(struct cdns3 *cdns);
+int cdns_hw_role_switch(struct cdns *cdns);
+int cdns_init(struct cdns *cdns);
+int cdns_remove(struct cdns *cdns);
+#ifdef CONFIG_PM_SLEEP
+int cdns_resume(struct cdns *cdns, u8 set_active);
+int cdns_suspend(struct cdns *cdns);
+#else /* CONFIG_PM_SLEEP */
+static inline int cdns_resume(struct cdns *cdns, u8 set_active)
+{ return 0; }
+static inline int cdns_suspend(struct cdns *cdns)
+{ return 0; }
+#endif /* CONFIG_PM_SLEEP */
#endif /* __LINUX_CDNS3_CORE_H */
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 38ccd29e4cde..fa5318ade3e1 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -1,35 +1,33 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Cadence USBSS DRD Driver.
+ * Cadence USBSS and USBSSP DRD Driver.
*
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
* Copyright (C) 2019 Texas Instruments
*
* Author: Pawel Laszczak <pawell@cadence.com>
* Roger Quadros <rogerq@ti.com>
*
- *
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/usb/otg.h>
-#include <linux/phy/phy.h>
-#include "gadget.h"
#include "drd.h"
#include "core.h"
/**
- * cdns3_set_mode - change mode of OTG Core
+ * cdns_set_mode - change mode of OTG Core
* @cdns: pointer to context structure
* @mode: selected mode from cdns_role
*
* Returns 0 on success otherwise negative errno
*/
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+static int cdns_set_mode(struct cdns *cdns, enum usb_dr_mode mode)
{
+ void __iomem *override_reg;
u32 reg;
switch (mode) {
@@ -39,11 +37,24 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
break;
case USB_DR_MODE_OTG:
dev_dbg(cdns->dev, "Set controller to OTG mode\n");
- if (cdns->version == CDNS3_CONTROLLER_V1) {
- reg = readl(&cdns->otg_v1_regs->override);
+
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ override_reg = &cdns->otg_cdnsp_regs->override;
+ else if (cdns->version == CDNS3_CONTROLLER_V1)
+ override_reg = &cdns->otg_v1_regs->override;
+ else
+ override_reg = &cdns->otg_v0_regs->ctrl1;
+
+ reg = readl(override_reg);
+
+ if (cdns->version != CDNS3_CONTROLLER_V0)
reg |= OVERRIDE_IDPULLUP;
- writel(reg, &cdns->otg_v1_regs->override);
+ else
+ reg |= OVERRIDE_IDPULLUP_V0;
+
+ writel(reg, override_reg);
+ if (cdns->version == CDNS3_CONTROLLER_V1) {
/*
* Enable work around feature built into the
* controller to address issue with RX Sensitivity
@@ -55,10 +66,6 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
reg |= PHYRST_CFG_PHYRST_A_ENABLE;
writel(reg, &cdns->otg_v1_regs->phyrst_cfg);
}
- } else {
- reg = readl(&cdns->otg_v0_regs->ctrl1);
- reg |= OVERRIDE_IDPULLUP_V0;
- writel(reg, &cdns->otg_v0_regs->ctrl1);
}
/*
@@ -76,7 +83,7 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
return 0;
}
-int cdns3_get_id(struct cdns3 *cdns)
+int cdns_get_id(struct cdns *cdns)
{
int id;
@@ -86,7 +93,7 @@ int cdns3_get_id(struct cdns3 *cdns)
return id;
}
-int cdns3_get_vbus(struct cdns3 *cdns)
+int cdns_get_vbus(struct cdns *cdns)
{
int vbus;
@@ -96,64 +103,95 @@ int cdns3_get_vbus(struct cdns3 *cdns)
return vbus;
}
-bool cdns3_is_host(struct cdns3 *cdns)
+void cdns_clear_vbus(struct cdns *cdns)
+{
+ u32 reg;
+
+ if (cdns->version != CDNSP_CONTROLLER_V2)
+ return;
+
+ reg = readl(&cdns->otg_cdnsp_regs->override);
+ reg |= OVERRIDE_SESS_VLD_SEL;
+ writel(reg, &cdns->otg_cdnsp_regs->override);
+}
+EXPORT_SYMBOL_GPL(cdns_clear_vbus);
+
+void cdns_set_vbus(struct cdns *cdns)
+{
+ u32 reg;
+
+ if (cdns->version != CDNSP_CONTROLLER_V2)
+ return;
+
+ reg = readl(&cdns->otg_cdnsp_regs->override);
+ reg &= ~OVERRIDE_SESS_VLD_SEL;
+ writel(reg, &cdns->otg_cdnsp_regs->override);
+}
+EXPORT_SYMBOL_GPL(cdns_set_vbus);
+
+bool cdns_is_host(struct cdns *cdns)
{
if (cdns->dr_mode == USB_DR_MODE_HOST)
return true;
- else if (cdns3_get_id(cdns) == CDNS3_ID_HOST)
+ else if (cdns_get_id(cdns) == CDNS3_ID_HOST)
return true;
return false;
}
-bool cdns3_is_device(struct cdns3 *cdns)
+bool cdns_is_device(struct cdns *cdns)
{
if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL)
return true;
else if (cdns->dr_mode == USB_DR_MODE_OTG)
- if (cdns3_get_id(cdns) == CDNS3_ID_PERIPHERAL)
+ if (cdns_get_id(cdns) == CDNS3_ID_PERIPHERAL)
return true;
return false;
}
/**
- * cdns3_otg_disable_irq - Disable all OTG interrupts
+ * cdns_otg_disable_irq - Disable all OTG interrupts
* @cdns: Pointer to controller context structure
*/
-static void cdns3_otg_disable_irq(struct cdns3 *cdns)
+static void cdns_otg_disable_irq(struct cdns *cdns)
{
- writel(0, &cdns->otg_regs->ien);
+ writel(0, &cdns->otg_irq_regs->ien);
}
/**
- * cdns3_otg_enable_irq - enable id and sess_valid interrupts
+ * cdns_otg_enable_irq - enable id and sess_valid interrupts
* @cdns: Pointer to controller context structure
*/
-static void cdns3_otg_enable_irq(struct cdns3 *cdns)
+static void cdns_otg_enable_irq(struct cdns *cdns)
{
writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT |
- OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien);
+ OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien);
}
/**
- * cdns3_drd_host_on - start host.
+ * cdns_drd_host_on - start host.
* @cdns: Pointer to controller context structure.
*
* Returns 0 on success otherwise negative errno.
*/
-int cdns3_drd_host_on(struct cdns3 *cdns)
+int cdns_drd_host_on(struct cdns *cdns)
{
- u32 val;
+ u32 val, ready_bit;
int ret;
/* Enable host mode. */
writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS,
&cdns->otg_regs->cmd);
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ ready_bit = OTGSTS_CDNSP_XHCI_READY;
+ else
+ ready_bit = OTGSTS_CDNS3_XHCI_READY;
+
dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n");
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
- val & OTGSTS_XHCI_READY, 1, 100000);
+ val & ready_bit, 1, 100000);
if (ret)
dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
@@ -163,10 +201,10 @@ int cdns3_drd_host_on(struct cdns3 *cdns)
}
/**
- * cdns3_drd_host_off - stop host.
+ * cdns_drd_host_off - stop host.
* @cdns: Pointer to controller context structure.
*/
-void cdns3_drd_host_off(struct cdns3 *cdns)
+void cdns_drd_host_off(struct cdns *cdns)
{
u32 val;
@@ -182,24 +220,29 @@ void cdns3_drd_host_off(struct cdns3 *cdns)
}
/**
- * cdns3_drd_gadget_on - start gadget.
+ * cdns_drd_gadget_on - start gadget.
* @cdns: Pointer to controller context structure.
*
* Returns 0 on success otherwise negative errno
*/
-int cdns3_drd_gadget_on(struct cdns3 *cdns)
+int cdns_drd_gadget_on(struct cdns *cdns)
{
- int ret, val;
u32 reg = OTGCMD_OTG_DIS;
+ u32 ready_bit;
+ int ret, val;
/* switch OTG core */
writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd);
dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n");
+ if (cdns->version == CDNSP_CONTROLLER_V2)
+ ready_bit = OTGSTS_CDNSP_DEV_READY;
+ else
+ ready_bit = OTGSTS_CDNS3_DEV_READY;
+
ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
- val & OTGSTS_DEV_READY,
- 1, 100000);
+ val & ready_bit, 1, 100000);
if (ret) {
dev_err(cdns->dev, "timeout waiting for dev_ready\n");
return ret;
@@ -208,12 +251,13 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns)
phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE);
return 0;
}
+EXPORT_SYMBOL_GPL(cdns_drd_gadget_on);
/**
- * cdns3_drd_gadget_off - stop gadget.
+ * cdns_drd_gadget_off - stop gadget.
* @cdns: Pointer to controller context structure.
*/
-void cdns3_drd_gadget_off(struct cdns3 *cdns)
+void cdns_drd_gadget_off(struct cdns *cdns)
{
u32 val;
@@ -231,49 +275,50 @@ void cdns3_drd_gadget_off(struct cdns3 *cdns)
1, 2000000);
phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
+EXPORT_SYMBOL_GPL(cdns_drd_gadget_off);
/**
- * cdns3_init_otg_mode - initialize drd controller
+ * cdns_init_otg_mode - initialize drd controller
* @cdns: Pointer to controller context structure
*
* Returns 0 on success otherwise negative errno
*/
-static int cdns3_init_otg_mode(struct cdns3 *cdns)
+static int cdns_init_otg_mode(struct cdns *cdns)
{
int ret;
- cdns3_otg_disable_irq(cdns);
+ cdns_otg_disable_irq(cdns);
/* clear all interrupts */
- writel(~0, &cdns->otg_regs->ivect);
+ writel(~0, &cdns->otg_irq_regs->ivect);
- ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG);
+ ret = cdns_set_mode(cdns, USB_DR_MODE_OTG);
if (ret)
return ret;
- cdns3_otg_enable_irq(cdns);
+ cdns_otg_enable_irq(cdns);
return 0;
}
/**
- * cdns3_drd_update_mode - initialize mode of operation
+ * cdns_drd_update_mode - initialize mode of operation
* @cdns: Pointer to controller context structure
*
* Returns 0 on success otherwise negative errno
*/
-int cdns3_drd_update_mode(struct cdns3 *cdns)
+int cdns_drd_update_mode(struct cdns *cdns)
{
int ret;
switch (cdns->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- ret = cdns3_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
+ ret = cdns_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
break;
case USB_DR_MODE_HOST:
- ret = cdns3_set_mode(cdns, USB_DR_MODE_HOST);
+ ret = cdns_set_mode(cdns, USB_DR_MODE_HOST);
break;
case USB_DR_MODE_OTG:
- ret = cdns3_init_otg_mode(cdns);
+ ret = cdns_init_otg_mode(cdns);
break;
default:
dev_err(cdns->dev, "Unsupported mode of operation %d\n",
@@ -284,27 +329,27 @@ int cdns3_drd_update_mode(struct cdns3 *cdns)
return ret;
}
-static irqreturn_t cdns3_drd_thread_irq(int irq, void *data)
+static irqreturn_t cdns_drd_thread_irq(int irq, void *data)
{
- struct cdns3 *cdns = data;
+ struct cdns *cdns = data;
- cdns3_hw_role_switch(cdns);
+ cdns_hw_role_switch(cdns);
return IRQ_HANDLED;
}
/**
- * cdns3_drd_irq - interrupt handler for OTG events
+ * cdns_drd_irq - interrupt handler for OTG events
*
- * @irq: irq number for cdns3 core device
- * @data: structure of cdns3
+ * @irq: irq number for cdns core device
+ * @data: structure of cdns
*
* Returns IRQ_HANDLED or IRQ_NONE
*/
-static irqreturn_t cdns3_drd_irq(int irq, void *data)
+static irqreturn_t cdns_drd_irq(int irq, void *data)
{
irqreturn_t ret = IRQ_NONE;
- struct cdns3 *cdns = data;
+ struct cdns *cdns = data;
u32 reg;
if (cdns->dr_mode != USB_DR_MODE_OTG)
@@ -313,30 +358,30 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data)
if (cdns->in_lpm)
return ret;
- reg = readl(&cdns->otg_regs->ivect);
+ reg = readl(&cdns->otg_irq_regs->ivect);
if (!reg)
return IRQ_NONE;
if (reg & OTGIEN_ID_CHANGE_INT) {
dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n",
- cdns3_get_id(cdns));
+ cdns_get_id(cdns));
ret = IRQ_WAKE_THREAD;
}
if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) {
dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n",
- cdns3_get_vbus(cdns));
+ cdns_get_vbus(cdns));
ret = IRQ_WAKE_THREAD;
}
- writel(~0, &cdns->otg_regs->ivect);
+ writel(~0, &cdns->otg_irq_regs->ivect);
return ret;
}
-int cdns3_drd_init(struct cdns3 *cdns)
+int cdns_drd_init(struct cdns *cdns)
{
void __iomem *regs;
u32 state;
@@ -347,28 +392,43 @@ int cdns3_drd_init(struct cdns3 *cdns)
return PTR_ERR(regs);
/* Detection of DRD version. Controller has been released
- * in two versions. Both are similar, but they have same changes
- * in register maps.
- * The first register in old version is command register and it's read
- * only, so driver should read 0 from it. On the other hand, in v1
- * the first register contains device ID number which is not set to 0.
- * Driver uses this fact to detect the proper version of
+ * in three versions. All are very similar and are software compatible,
+ * but they have same changes in register maps.
+ * The first register in oldest version is command register and it's
+ * read only. Driver should read 0 from it. On the other hand, in v1
+ * and v2 the first register contains device ID number which is not
+ * set to 0. Driver uses this fact to detect the proper version of
* controller.
*/
cdns->otg_v0_regs = regs;
if (!readl(&cdns->otg_v0_regs->cmd)) {
cdns->version = CDNS3_CONTROLLER_V0;
cdns->otg_v1_regs = NULL;
+ cdns->otg_cdnsp_regs = NULL;
cdns->otg_regs = regs;
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_v0_regs->ien;
writel(1, &cdns->otg_v0_regs->simulate);
dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
readl(&cdns->otg_v0_regs->version));
} else {
cdns->otg_v0_regs = NULL;
cdns->otg_v1_regs = regs;
- cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
- cdns->version = CDNS3_CONTROLLER_V1;
- writel(1, &cdns->otg_v1_regs->simulate);
+ cdns->otg_cdnsp_regs = regs;
+
+ cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd;
+
+ if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_cdnsp_regs->ien;
+ cdns->version = CDNSP_CONTROLLER_V2;
+ } else {
+ cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *)
+ &cdns->otg_v1_regs->ien;
+ writel(1, &cdns->otg_v1_regs->simulate);
+ cdns->version = CDNS3_CONTROLLER_V1;
+ }
+
dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
readl(&cdns->otg_v1_regs->did),
readl(&cdns->otg_v1_regs->rid));
@@ -378,17 +438,24 @@ int cdns3_drd_init(struct cdns3 *cdns)
/* Update dr_mode according to STRAP configuration. */
cdns->dr_mode = USB_DR_MODE_OTG;
- if (state == OTGSTS_STRAP_HOST) {
+
+ if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_CDNSP_STRAP_HOST) ||
+ (cdns->version != CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_STRAP_HOST)) {
dev_dbg(cdns->dev, "Controller strapped to HOST\n");
cdns->dr_mode = USB_DR_MODE_HOST;
- } else if (state == OTGSTS_STRAP_GADGET) {
+ } else if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_CDNSP_STRAP_GADGET) ||
+ (cdns->version != CDNSP_CONTROLLER_V2 &&
+ state == OTGSTS_STRAP_GADGET)) {
dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n");
cdns->dr_mode = USB_DR_MODE_PERIPHERAL;
}
ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq,
- cdns3_drd_irq,
- cdns3_drd_thread_irq,
+ cdns_drd_irq,
+ cdns_drd_thread_irq,
IRQF_SHARED,
dev_name(cdns->dev), cdns);
if (ret) {
@@ -405,8 +472,9 @@ int cdns3_drd_init(struct cdns3 *cdns)
return 0;
}
-int cdns3_drd_exit(struct cdns3 *cdns)
+int cdns_drd_exit(struct cdns *cdns)
{
- cdns3_otg_disable_irq(cdns);
+ cdns_otg_disable_irq(cdns);
+
return 0;
}
diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
index f1ccae285a16..9724acdecbbb 100644
--- a/drivers/usb/cdns3/drd.h
+++ b/drivers/usb/cdns3/drd.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USB3 DRD header file.
+ * Cadence USB3 and USBSSP DRD header file.
*
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
*
* Author: Pawel Laszczak <pawell@cadence.com>
*/
@@ -10,10 +10,9 @@
#define __LINUX_CDNS3_DRD
#include <linux/usb/otg.h>
-#include <linux/phy/phy.h>
#include "core.h"
-/* DRD register interface for version v1. */
+/* DRD register interface for version v1 of cdns3 driver. */
struct cdns3_otg_regs {
__le32 did;
__le32 rid;
@@ -38,7 +37,7 @@ struct cdns3_otg_regs {
__le32 ctrl2;
};
-/* DRD register interface for version v0. */
+/* DRD register interface for version v0 of cdns3 driver. */
struct cdns3_otg_legacy_regs {
__le32 cmd;
__le32 sts;
@@ -57,14 +56,45 @@ struct cdns3_otg_legacy_regs {
__le32 ctrl1;
};
+/* DRD register interface for cdnsp driver */
+struct cdnsp_otg_regs {
+ __le32 did;
+ __le32 rid;
+ __le32 cfgs1;
+ __le32 cfgs2;
+ __le32 cmd;
+ __le32 sts;
+ __le32 state;
+ __le32 ien;
+ __le32 ivect;
+ __le32 tmr;
+ __le32 simulate;
+ __le32 adpbc_sts;
+ __le32 adp_ramp_time;
+ __le32 adpbc_ctrl1;
+ __le32 adpbc_ctrl2;
+ __le32 override;
+ __le32 vbusvalid_dbnc_cfg;
+ __le32 sessvalid_dbnc_cfg;
+ __le32 susp_timing_ctrl;
+};
+
+#define OTG_CDNSP_DID 0x0004034E
+
/*
- * Common registers interface for both version of DRD.
+ * Common registers interface for both CDNS3 and CDNSP version of DRD.
*/
-struct cdns3_otg_common_regs {
+struct cdns_otg_common_regs {
__le32 cmd;
__le32 sts;
__le32 state;
- __le32 different1;
+};
+
+/*
+ * Interrupt related registers. This registers are mapped in different
+ * location for CDNSP controller.
+ */
+struct cdns_otg_irq_regs {
__le32 ien;
__le32 ivect;
};
@@ -92,9 +122,9 @@ struct cdns3_otg_common_regs {
#define OTGCMD_DEV_BUS_DROP BIT(8)
/* Drop the bus for Host mode*/
#define OTGCMD_HOST_BUS_DROP BIT(9)
-/* Power Down USBSS-DEV. */
+/* Power Down USBSS-DEV - only for CDNS3.*/
#define OTGCMD_DEV_POWER_OFF BIT(11)
-/* Power Down CDNSXHCI. */
+/* Power Down CDNSXHCI - only for CDNS3. */
#define OTGCMD_HOST_POWER_OFF BIT(12)
/* OTGIEN - bitmasks */
@@ -123,20 +153,31 @@ struct cdns3_otg_common_regs {
#define OTGSTS_OTG_NRDY_MASK BIT(11)
#define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK)
/*
- * Value of the strap pins.
+ * Value of the strap pins for:
+ * CDNS3:
* 000 - no default configuration
* 010 - Controller initiall configured as Host
* 100 - Controller initially configured as Device
+ * CDNSP:
+ * 000 - No default configuration.
+ * 010 - Controller initiall configured as Host.
+ * 100 - Controller initially configured as Device.
*/
#define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12)
#define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00
#define OTGSTS_STRAP_HOST_OTG 0x01
#define OTGSTS_STRAP_HOST 0x02
#define OTGSTS_STRAP_GADGET 0x04
+#define OTGSTS_CDNSP_STRAP_HOST 0x01
+#define OTGSTS_CDNSP_STRAP_GADGET 0x02
+
/* Host mode is turned on. */
-#define OTGSTS_XHCI_READY BIT(26)
+#define OTGSTS_CDNS3_XHCI_READY BIT(26)
+#define OTGSTS_CDNSP_XHCI_READY BIT(27)
+
/* "Device mode is turned on .*/
-#define OTGSTS_DEV_READY BIT(27)
+#define OTGSTS_CDNS3_DEV_READY BIT(27)
+#define OTGSTS_CDNSP_DEV_READY BIT(26)
/* OTGSTATE- bitmasks */
#define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0)
@@ -152,6 +193,8 @@ struct cdns3_otg_common_regs {
#define OVERRIDE_IDPULLUP BIT(0)
/* Only for CDNS3_CONTROLLER_V0 version */
#define OVERRIDE_IDPULLUP_V0 BIT(24)
+/* Vbusvalid/Sesvalid override select. */
+#define OVERRIDE_SESS_VLD_SEL BIT(10)
/* PHYRST_CFG - bitmasks */
#define PHYRST_CFG_PHYRST_A_ENABLE BIT(0)
@@ -159,17 +202,18 @@ struct cdns3_otg_common_regs {
#define CDNS3_ID_PERIPHERAL 1
#define CDNS3_ID_HOST 0
-bool cdns3_is_host(struct cdns3 *cdns);
-bool cdns3_is_device(struct cdns3 *cdns);
-int cdns3_get_id(struct cdns3 *cdns);
-int cdns3_get_vbus(struct cdns3 *cdns);
-int cdns3_drd_init(struct cdns3 *cdns);
-int cdns3_drd_exit(struct cdns3 *cdns);
-int cdns3_drd_update_mode(struct cdns3 *cdns);
-int cdns3_drd_gadget_on(struct cdns3 *cdns);
-void cdns3_drd_gadget_off(struct cdns3 *cdns);
-int cdns3_drd_host_on(struct cdns3 *cdns);
-void cdns3_drd_host_off(struct cdns3 *cdns);
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode);
+bool cdns_is_host(struct cdns *cdns);
+bool cdns_is_device(struct cdns *cdns);
+int cdns_get_id(struct cdns *cdns);
+int cdns_get_vbus(struct cdns *cdns);
+void cdns_clear_vbus(struct cdns *cdns);
+void cdns_set_vbus(struct cdns *cdns);
+int cdns_drd_init(struct cdns *cdns);
+int cdns_drd_exit(struct cdns *cdns);
+int cdns_drd_update_mode(struct cdns *cdns);
+int cdns_drd_gadget_on(struct cdns *cdns);
+void cdns_drd_gadget_off(struct cdns *cdns);
+int cdns_drd_host_on(struct cdns *cdns);
+void cdns_drd_host_off(struct cdns *cdns);
#endif /* __LINUX_CDNS3_DRD */
diff --git a/drivers/usb/cdns3/gadget-export.h b/drivers/usb/cdns3/gadget-export.h
index 702c5a267a92..c37b6269b001 100644
--- a/drivers/usb/cdns3/gadget-export.h
+++ b/drivers/usb/cdns3/gadget-export.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USBSS DRD Driver - Gadget Export APIs.
+ * Cadence USBSS and USBSSP DRD Driver - Gadget Export APIs.
*
* Copyright (C) 2017 NXP
* Copyright (C) 2017-2018 NXP
@@ -10,16 +10,28 @@
#ifndef __LINUX_CDNS3_GADGET_EXPORT
#define __LINUX_CDNS3_GADGET_EXPORT
-#ifdef CONFIG_USB_CDNS3_GADGET
+#if IS_ENABLED(CONFIG_USB_CDNSP_GADGET)
-int cdns3_gadget_init(struct cdns3 *cdns);
+int cdnsp_gadget_init(struct cdns *cdns);
#else
-static inline int cdns3_gadget_init(struct cdns3 *cdns)
+static inline int cdnsp_gadget_init(struct cdns *cdns)
{
return -ENXIO;
}
-#endif
+#endif /* CONFIG_USB_CDNSP_GADGET */
+
+#if IS_ENABLED(CONFIG_USB_CDNS3_GADGET)
+
+int cdns3_gadget_init(struct cdns *cdns);
+#else
+
+static inline int cdns3_gadget_init(struct cdns *cdns)
+{
+ return -ENXIO;
+}
+
+#endif /* CONFIG_USB_CDNS3_GADGET */
#endif /* __LINUX_CDNS3_GADGET_EXPORT */
diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h
index 26041718a086..cf92173ecf00 100644
--- a/drivers/usb/cdns3/host-export.h
+++ b/drivers/usb/cdns3/host-export.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Cadence USBSS DRD Driver - Host Export APIs
+ * Cadence USBSS and USBSSP DRD Driver - Host Export APIs
*
* Copyright (C) 2017-2018 NXP
*
@@ -9,25 +9,19 @@
#ifndef __LINUX_CDNS3_HOST_EXPORT
#define __LINUX_CDNS3_HOST_EXPORT
-struct usb_hcd;
-#ifdef CONFIG_USB_CDNS3_HOST
+#if IS_ENABLED(CONFIG_USB_CDNS_HOST)
-int cdns3_host_init(struct cdns3 *cdns);
-int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+int cdns_host_init(struct cdns *cdns);
#else
-static inline int cdns3_host_init(struct cdns3 *cdns)
+static inline int cdns_host_init(struct cdns *cdns)
{
return -ENXIO;
}
-static inline void cdns3_host_exit(struct cdns3 *cdns) { }
-static inline int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
+static inline void cdns_host_exit(struct cdns *cdns) { }
-#endif /* CONFIG_USB_CDNS3_HOST */
+#endif /* USB_CDNS_HOST */
#endif /* __LINUX_CDNS3_HOST_EXPORT */
diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c
index ec89f2e5430f..84dadfa726aa 100644
--- a/drivers/usb/cdns3/host.c
+++ b/drivers/usb/cdns3/host.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Cadence USBSS DRD Driver - host side
+ * Cadence USBSS and USBSSP DRD Driver - host side
*
* Copyright (C) 2018-2019 Cadence Design Systems.
* Copyright (C) 2017-2018 NXP
@@ -23,18 +23,20 @@
#define CFG_RXDET_P3_EN BIT(15)
#define LPM_2_STB_SWITCH_EN BIT(25)
+static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+
static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
.quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI,
.suspend_quirk = xhci_cdns3_suspend_quirk,
};
-static int __cdns3_host_init(struct cdns3 *cdns)
+static int __cdns_host_init(struct cdns *cdns)
{
struct platform_device *xhci;
int ret;
struct usb_hcd *hcd;
- cdns3_drd_host_on(cdns);
+ cdns_drd_host_on(cdns);
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
@@ -46,7 +48,7 @@ static int __cdns3_host_init(struct cdns3 *cdns)
cdns->host_dev = xhci;
ret = platform_device_add_resources(xhci, cdns->xhci_res,
- CDNS3_XHCI_RESOURCES_NUM);
+ CDNS_XHCI_RESOURCES_NUM);
if (ret) {
dev_err(cdns->dev, "couldn't add resources to xHCI device\n");
goto err1;
@@ -87,7 +89,7 @@ err1:
return ret;
}
-int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
+static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
u32 value;
@@ -113,25 +115,25 @@ int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
return 0;
}
-static void cdns3_host_exit(struct cdns3 *cdns)
+static void cdns_host_exit(struct cdns *cdns)
{
kfree(cdns->xhci_plat_data);
platform_device_unregister(cdns->host_dev);
cdns->host_dev = NULL;
- cdns3_drd_host_off(cdns);
+ cdns_drd_host_off(cdns);
}
-int cdns3_host_init(struct cdns3 *cdns)
+int cdns_host_init(struct cdns *cdns)
{
- struct cdns3_role_driver *rdrv;
+ struct cdns_role_driver *rdrv;
rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
- rdrv->start = __cdns3_host_init;
- rdrv->stop = cdns3_host_exit;
- rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+ rdrv->start = __cdns_host_init;
+ rdrv->stop = cdns_host_exit;
+ rdrv->state = CDNS_ROLE_STATE_INACTIVE;
rdrv->name = "host";
cdns->roles[USB_ROLE_HOST] = rdrv;