summaryrefslogtreecommitdiff
path: root/drivers/usb/gadget/udc/bdc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 01:57:16 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 01:57:16 +0300
commite7cf773d431a63a2417902696fcc9e0ebdc83bbe (patch)
tree86dbdceb7d91226507a3af0d57e03b0ca664b22e /drivers/usb/gadget/udc/bdc
parent7a02d089695a1217992434f03a78aa32bad85b5c (diff)
parent81e1dadfb5b2d47aa513ad60b1c9cf0ea17b6514 (diff)
downloadlinux-e7cf773d431a63a2417902696fcc9e0ebdc83bbe.tar.xz
Merge tag 'usb-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
Pull USB updates from Greg KH: "Here's the big set of USB and PHY patches for 3.19-rc1. The normal churn in the USB gadget area is in here, as well as xhci and other individual USB driver updates. The PHY tree is also in here, as there were dependancies on the USB tree. All of these have been in linux-next" * tag 'usb-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (351 commits) arm: omap3: twl: remove usb phy init data usbip: fix error handling in stub_probe() usb: gadget: udc: missing curly braces USB: mos7720: delete some unneeded code wusb: replace memset by memzero_explicit usbip: remove unneeded structure usb: xhci: fix comment for PORT_DEV_REMOVE xhci: don't use the same variable for stopped and halted rings current TD xhci: clear extra bits from slot context when setting max exit latency xhci: cleanup finish_td function USB: adutux: NULL dereferences on disconnect usb: chipidea: fix platform_no_drv_owner.cocci warnings usb: chipidea: Fixed a few typos in comments Documentation: bindings: add doc for the USB2 ChipIdea USB driver usb: chipidea: add a usb2 driver for ci13xxx usb: chipidea: fix phy handling usb: chipidea: remove duplicate dev_set_drvdata for host_start usb: chipidea: parameter 'mode' isn't needed for hw_device_reset usb: chipidea: add controller reset API usb: chipidea: remove flag CI_HDRC_REQUIRE_TRANSCEIVER ...
Diffstat (limited to 'drivers/usb/gadget/udc/bdc')
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig21
-rw-r--r--drivers/usb/gadget/udc/bdc/Makefile8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h490
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c376
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h29
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c533
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c123
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h37
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2024
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h22
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c132
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c587
12 files changed, 4382 insertions, 0 deletions
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
new file mode 100644
index 000000000000..0d7b8c9f72fd
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -0,0 +1,21 @@
+config USB_BDC_UDC
+ tristate "Broadcom USB3.0 device controller IP driver(BDC)"
+ depends on USB_GADGET && HAS_DMA
+
+ help
+ BDC is Broadcom's USB3.0 device controller IP. If your SOC has a BDC IP
+ then select this driver.
+
+ Say "y" here to link the driver statically, or "m" to build a dynamically
+ linked module called "bdc".
+
+if USB_BDC_UDC
+
+comment "Platform Support"
+config USB_BDC_PCI
+ tristate "BDC support for PCIe based platforms"
+ depends on PCI
+ default USB_BDC_UDC
+ help
+ Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
+endif
diff --git a/drivers/usb/gadget/udc/bdc/Makefile b/drivers/usb/gadget/udc/bdc/Makefile
new file mode 100644
index 000000000000..5cf6a3bcdf0f
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_USB_BDC_UDC) += bdc.o
+bdc-y := bdc_core.o bdc_cmd.o bdc_ep.o bdc_udc.o
+
+ifneq ($(CONFIG_USB_GADGET_VERBOSE),)
+ bdc-y += bdc_dbg.o
+endif
+
+obj-$(CONFIG_USB_BDC_PCI) += bdc_pci.o
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
new file mode 100644
index 000000000000..dc18a20bf040
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -0,0 +1,490 @@
+/*
+ * bdc.h - header for the BRCM BDC USB3.0 device controller
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_BDC_H__
+#define __LINUX_BDC_H__
+
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/debugfs.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <asm/unaligned.h>
+
+#define BRCM_BDC_NAME "bdc_usb3"
+#define BRCM_BDC_DESC "BDC device controller driver"
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+/* BDC command operation timeout in usec*/
+#define BDC_CMD_TIMEOUT 1000
+/* BDC controller operation timeout in usec*/
+#define BDC_COP_TIMEOUT 500
+
+/*
+ * Maximum size of ep0 response buffer for ch9 requests,
+ * the set_sel request uses 6 so far, the max.
+*/
+#define EP0_RESPONSE_BUFF 6
+/* Start with SS as default */
+#define EP0_MAX_PKT_SIZE 512
+
+/* 64 entries in a SRR */
+#define NUM_SR_ENTRIES 64
+
+/* Num of bds per table */
+#define NUM_BDS_PER_TABLE 32
+
+/* Num of tables in bd list for control,bulk and Int ep */
+#define NUM_TABLES 2
+
+/* Num of tables in bd list for Isoch ep */
+#define NUM_TABLES_ISOCH 6
+
+/* U1 Timeout default: 248usec */
+#define U1_TIMEOUT 0xf8
+
+/* Interrupt coalescence in usec */
+#define INT_CLS 500
+
+/* Register offsets */
+/* Configuration and Capability registers */
+#define BDC_BDCCFG0 0x00
+#define BDC_BDCCFG1 0x04
+#define BDC_BDCCAP0 0x08
+#define BDC_BDCCAP1 0x0c
+#define BDC_CMDPAR0 0x10
+#define BDC_CMDPAR1 0x14
+#define BDC_CMDPAR2 0x18
+#define BDC_CMDSC 0x1c
+#define BDC_USPC 0x20
+#define BDC_USPPMS 0x28
+#define BDC_USPPM2 0x2c
+#define BDC_SPBBAL 0x38
+#define BDC_SPBBAH 0x3c
+#define BDC_BDCSC 0x40
+#define BDC_XSFNTF 0x4c
+
+#define BDC_DVCSA 0x50
+#define BDC_DVCSB 0x54
+#define BDC_EPSTS0(n) (0x60 + (n * 0x10))
+#define BDC_EPSTS1(n) (0x64 + (n * 0x10))
+#define BDC_EPSTS2(n) (0x68 + (n * 0x10))
+#define BDC_EPSTS3(n) (0x6c + (n * 0x10))
+#define BDC_EPSTS4(n) (0x70 + (n * 0x10))
+#define BDC_EPSTS5(n) (0x74 + (n * 0x10))
+#define BDC_EPSTS6(n) (0x78 + (n * 0x10))
+#define BDC_EPSTS7(n) (0x7c + (n * 0x10))
+#define BDC_SRRBAL(n) (0x200 + (n * 0x10))
+#define BDC_SRRBAH(n) (0x204 + (n * 0x10))
+#define BDC_SRRINT(n) (0x208 + (n * 0x10))
+#define BDC_INTCTLS(n) (0x20c + (n * 0x10))
+
+/* Extended capability regs */
+#define BDC_FSCNOC 0xcd4
+#define BDC_FSCNIC 0xce4
+#define NUM_NCS(p) (p >> 28)
+
+/* Register bit fields and Masks */
+/* BDC Configuration 0 */
+#define BDC_PGS(p) (((p) & (0x7 << 8)) >> 8)
+#define BDC_SPB(p) (p & 0x7)
+
+/* BDC Capability1 */
+#define BDC_P64 (1 << 0)
+
+/* BDC Command register */
+#define BDC_CMD_FH 0xe
+#define BDC_CMD_DNC 0x6
+#define BDC_CMD_EPO 0x4
+#define BDC_CMD_BLA 0x3
+#define BDC_CMD_EPC 0x2
+#define BDC_CMD_DVC 0x1
+#define BDC_CMD_CWS (0x1 << 5)
+#define BDC_CMD_CST(p) (((p) & (0xf << 6))>>6)
+#define BDC_CMD_EPN(p) ((p & 0x1f) << 10)
+#define BDC_SUB_CMD_ADD (0x1 << 17)
+#define BDC_SUB_CMD_FWK (0x4 << 17)
+/* Reset sequence number */
+#define BDC_CMD_EPO_RST_SN (0x1 << 16)
+#define BDC_CMD_EP0_XSD (0x1 << 16)
+#define BDC_SUB_CMD_ADD_EP (0x1 << 17)
+#define BDC_SUB_CMD_DRP_EP (0x2 << 17)
+#define BDC_SUB_CMD_EP_STP (0x2 << 17)
+#define BDC_SUB_CMD_EP_STL (0x4 << 17)
+#define BDC_SUB_CMD_EP_RST (0x1 << 17)
+#define BDC_CMD_SRD (1 << 27)
+
+/* CMD completion status */
+#define BDC_CMDS_SUCC 0x1
+#define BDC_CMDS_PARA 0x3
+#define BDC_CMDS_STAT 0x4
+#define BDC_CMDS_FAIL 0x5
+#define BDC_CMDS_INTL 0x6
+#define BDC_CMDS_BUSY 0xf
+
+/* CMDSC Param 2 shifts */
+#define EPT_SHIFT 22
+#define MP_SHIFT 10
+#define MB_SHIFT 6
+#define EPM_SHIFT 4
+
+/* BDC USPSC */
+#define BDC_VBC (1 << 31)
+#define BDC_PRC (1 << 30)
+#define BDC_PCE (1 << 29)
+#define BDC_CFC (1 << 28)
+#define BDC_PCC (1 << 27)
+#define BDC_PSC (1 << 26)
+#define BDC_VBS (1 << 25)
+#define BDC_PRS (1 << 24)
+#define BDC_PCS (1 << 23)
+#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
+#define BDC_SCN (1 << 8)
+#define BDC_SDC (1 << 7)
+#define BDC_SWS (1 << 4)
+
+#define BDC_USPSC_RW (BDC_SCN|BDC_SDC|BDC_SWS|0xf)
+#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
+
+#define BDC_SPEED_FS 0x1
+#define BDC_SPEED_LS 0x2
+#define BDC_SPEED_HS 0x3
+#define BDC_SPEED_SS 0x4
+
+#define BDC_PST(p) (p & 0xf)
+#define BDC_PST_MASK 0xf
+
+/* USPPMS */
+#define BDC_U2E (0x1 << 31)
+#define BDC_U1E (0x1 << 30)
+#define BDC_U2A (0x1 << 29)
+#define BDC_PORT_W1S (0x1 << 17)
+#define BDC_U1T(p) ((p) & 0xff)
+#define BDC_U2T(p) (((p) & 0xff) << 8)
+#define BDC_U1T_MASK 0xff
+
+/* USBPM2 */
+/* Hardware LPM Enable */
+#define BDC_HLE (1 << 16)
+
+/* BDC Status and Control */
+#define BDC_COP_RST (1 << 29)
+#define BDC_COP_RUN (2 << 29)
+#define BDC_COP_STP (4 << 29)
+
+#define BDC_COP_MASK (BDC_COP_RST|BDC_COP_RUN|BDC_COP_STP)
+
+#define BDC_COS (1 << 28)
+#define BDC_CSTS(p) (((p) & (0x7 << 20)) >> 20)
+#define BDC_MASK_MCW (1 << 7)
+#define BDC_GIE (1 << 1)
+#define BDC_GIP (1 << 0)
+
+#define BDC_HLT 1
+#define BDC_NOR 2
+#define BDC_OIP 7
+
+/* Buffer descriptor and Status report bit fields and masks */
+#define BD_TYPE_BITMASK (0xf)
+#define BD_CHAIN 0xf
+
+#define BD_TFS_SHIFT 4
+#define BD_SOT (1 << 26)
+#define BD_EOT (1 << 27)
+#define BD_ISP (1 << 29)
+#define BD_IOC (1 << 30)
+#define BD_SBF (1 << 31)
+
+#define BD_INTR_TARGET(p) (((p) & 0x1f) << 27)
+
+#define BDC_SRR_RWS (1 << 4)
+#define BDC_SRR_RST (1 << 3)
+#define BDC_SRR_ISR (1 << 2)
+#define BDC_SRR_IE (1 << 1)
+#define BDC_SRR_IP (1 << 0)
+#define BDC_SRR_EPI(p) (((p) & (0xff << 24)) >> 24)
+#define BDC_SRR_DPI(p) (((p) & (0xff << 16)) >> 16)
+#define BDC_SRR_DPI_MASK 0x00ff0000
+
+#define MARK_CHAIN_BD (BD_CHAIN|BD_EOT|BD_SOT)
+
+/* Control transfer BD specific fields */
+#define BD_DIR_IN (1 << 25)
+
+#define BDC_PTC_MASK 0xf0000000
+
+/* status report defines */
+#define SR_XSF 0
+#define SR_USPC 4
+#define SR_BD_LEN(p) (p & 0xffffff)
+
+#define XSF_SUCC 0x1
+#define XSF_SHORT 0x3
+#define XSF_BABB 0x4
+#define XSF_SETUP_RECV 0x6
+#define XSF_DATA_START 0x7
+#define XSF_STATUS_START 0x8
+
+#define XSF_STS(p) (((p) >> 28) & 0xf)
+
+/* Transfer BD fields */
+#define BD_LEN(p) ((p) & 0x1ffff)
+#define BD_LTF (1 << 25)
+#define BD_TYPE_DS 0x1
+#define BD_TYPE_SS 0x2
+
+#define BDC_EP_ENABLED (1 << 0)
+#define BDC_EP_STALL (1 << 1)
+#define BDC_EP_STOP (1 << 2)
+
+/* One BD can transfer max 65536 bytes */
+#define BD_MAX_BUFF_SIZE (1 << 16)
+/* Maximum bytes in one XFR, Refer to BDC spec */
+#define MAX_XFR_LEN 16777215
+
+/* defines for Force Header command */
+#define DEV_NOTF_TYPE 6
+#define FWK_SUBTYPE 1
+#define TRA_PACKET 4
+
+#define to_bdc_ep(e) container_of(e, struct bdc_ep, usb_ep)
+#define to_bdc_req(r) container_of(r, struct bdc_req, usb_req)
+#define gadget_to_bdc(g) container_of(g, struct bdc, gadget)
+
+/* FUNCTION WAKE DEV NOTIFICATION interval, USB3 spec table 8.13 */
+#define BDC_TNOTIFY 2500 /*in ms*/
+/* Devstatus bitfields */
+#define REMOTE_WAKEUP_ISSUED (1 << 16)
+#define DEVICE_SUSPENDED (1 << 17)
+#define FUNC_WAKE_ISSUED (1 << 18)
+#define REMOTE_WAKE_ENABLE (1 << USB_DEVICE_REMOTE_WAKEUP)
+
+/* On disconnect, preserve these bits and clear rest */
+#define DEVSTATUS_CLEAR (1 << USB_DEVICE_SELF_POWERED)
+/* Hardware and software Data structures */
+
+/* Endpoint bd: buffer descriptor */
+struct bdc_bd {
+ __le32 offset[4];
+};
+
+/* Status report in Status report ring(srr) */
+struct bdc_sr {
+ __le32 offset[4];
+};
+
+/* bd_table: contigous bd's in a table */
+struct bd_table {
+ struct bdc_bd *start_bd;
+ /* dma address of start bd of table*/
+ dma_addr_t dma;
+};
+
+/*
+ * Each endpoint has a bdl(buffer descriptor list), bdl consists of 1 or more bd
+ * table's chained to each other through a chain bd, every table has equal
+ * number of bds. the software uses bdi(bd index) to refer to particular bd in
+ * the list.
+ */
+struct bd_list {
+ /* Array of bd table pointers*/
+ struct bd_table **bd_table_array;
+ /* How many tables chained to each other */
+ int num_tabs;
+ /* Max_bdi = num_tabs * num_bds_table - 1 */
+ int max_bdi;
+ /* current enq bdi from sw point of view */
+ int eqp_bdi;
+ /* current deq bdi from sw point of view */
+ int hwd_bdi;
+ /* numbers of bds per table */
+ int num_bds_table;
+};
+
+struct bdc_req;
+
+/* Representation of a transfer, one transfer can have multiple bd's */
+struct bd_transfer {
+ struct bdc_req *req;
+ /* start bd index */
+ int start_bdi;
+ /* this will be the next hw dqp when this transfer completes */
+ int next_hwd_bdi;
+ /* number of bds in this transfer */
+ int num_bds;
+};
+
+/*
+ * Representation of a gadget request, every gadget request is contained
+ * by 1 bd_transfer.
+ */
+struct bdc_req {
+ struct usb_request usb_req;
+ struct list_head queue;
+ struct bdc_ep *ep;
+ /* only one Transfer per request */
+ struct bd_transfer bd_xfr;
+ int epnum;
+};
+
+/* scratchpad buffer needed by bdc hardware */
+struct bdc_scratchpad {
+ dma_addr_t sp_dma;
+ void *buff;
+ u32 size;
+};
+
+/* endpoint representation */
+struct bdc_ep {
+ struct usb_ep usb_ep;
+ struct list_head queue;
+ struct bdc *bdc;
+ u8 ep_type;
+ u8 dir;
+ u8 ep_num;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+ unsigned int flags;
+ char name[20];
+ /* endpoint bd list*/
+ struct bd_list bd_list;
+ /*
+ * HW generates extra event for multi bd tranfers, this flag helps in
+ * ignoring the extra event
+ */
+ bool ignore_next_sr;
+};
+
+/* bdc cmmand parameter structure */
+struct bdc_cmd_params {
+ u32 param2;
+ u32 param1;
+ u32 param0;
+};
+
+/* status report ring(srr), currently one srr is supported for entire system */
+struct srr {
+ struct bdc_sr *sr_bds;
+ u16 eqp_index;
+ u16 dqp_index;
+ dma_addr_t dma_addr;
+};
+
+/* EP0 states */
+enum bdc_ep0_state {
+ WAIT_FOR_SETUP = 0,
+ WAIT_FOR_DATA_START,
+ WAIT_FOR_DATA_XMIT,
+ WAIT_FOR_STATUS_START,
+ WAIT_FOR_STATUS_XMIT,
+ STATUS_PENDING
+};
+
+/* Link states */
+enum bdc_link_state {
+ BDC_LINK_STATE_U0 = 0x00,
+ BDC_LINK_STATE_U3 = 0x03,
+ BDC_LINK_STATE_RX_DET = 0x05,
+ BDC_LINK_STATE_RESUME = 0x0f
+};
+
+/* representation of bdc */
+struct bdc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *gadget_driver;
+ struct device *dev;
+ /* device lock */
+ spinlock_t lock;
+
+ /* num of endpoints for a particular instantiation of IP */
+ unsigned int num_eps;
+ /*
+ * Array of ep's, it uses the same index covention as bdc hw i.e.
+ * 1 for ep0, 2 for 1out,3 for 1in ....
+ */
+ struct bdc_ep **bdc_ep_array;
+ void __iomem *regs;
+ struct bdc_scratchpad scratchpad;
+ u32 sp_buff_size;
+ /* current driver supports 1 status ring */
+ struct srr srr;
+ /* Last received setup packet */
+ struct usb_ctrlrequest setup_pkt;
+ struct bdc_req ep0_req;
+ struct bdc_req status_req;
+ enum bdc_ep0_state ep0_state;
+ bool delayed_status;
+ bool zlp_needed;
+ bool reinit;
+ bool pullup;
+ /* Bits 0-15 are standard and 16-31 for proprietary information */
+ u32 devstatus;
+ int irq;
+ void *mem;
+ u32 dev_addr;
+ /* DMA pools */
+ struct dma_pool *bd_table_pool;
+ u8 test_mode;
+ /* array of callbacks for various status report handlers */
+ void (*sr_handler[2])(struct bdc *, struct bdc_sr *);
+ /* ep0 callback handlers */
+ void (*sr_xsf_ep0[3])(struct bdc *, struct bdc_sr *);
+ /* ep0 response buffer for ch9 requests like GET_STATUS and SET_SEL */
+ unsigned char ep0_response_buff[EP0_RESPONSE_BUFF];
+ /*
+ * Timer to check if host resumed transfer after bdc sent Func wake
+ * notification packet after a remote wakeup. if not, then resend the
+ * Func Wake packet every 2.5 secs. Refer to USB3 spec section 8.5.6.4
+ */
+ struct delayed_work func_wake_notify;
+};
+
+static inline u32 bdc_readl(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static inline void bdc_writel(void __iomem *base, u32 offset, u32 value)
+{
+ writel(value, base + offset);
+}
+
+/* Buffer descriptor list operations */
+void bdc_notify_xfr(struct bdc *, u32);
+void bdc_softconn(struct bdc *);
+void bdc_softdisconn(struct bdc *);
+int bdc_run(struct bdc *);
+int bdc_stop(struct bdc *);
+int bdc_reset(struct bdc *);
+int bdc_udc_init(struct bdc *);
+void bdc_udc_exit(struct bdc *);
+int bdc_reinit(struct bdc *);
+
+/* Status report handlers */
+/* Upstream port status change sr */
+void bdc_sr_uspc(struct bdc *, struct bdc_sr *);
+/* transfer sr */
+void bdc_sr_xsf(struct bdc *, struct bdc_sr *);
+/* EP0 XSF handlers */
+void bdc_xsf_ep0_setup_recv(struct bdc *, struct bdc_sr *);
+void bdc_xsf_ep0_data_start(struct bdc *, struct bdc_sr *);
+void bdc_xsf_ep0_status_start(struct bdc *, struct bdc_sr *);
+
+#endif /* __LINUX_BDC_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
new file mode 100644
index 000000000000..6a4155c4bd86
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -0,0 +1,376 @@
+/*
+ * bdc_cmd.c - BRCM BDC USB3.0 device controller
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "bdc.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+/* Issues a cmd to cmd processor and waits for cmd completion */
+static int bdc_issue_cmd(struct bdc *bdc, u32 cmd_sc, u32 param0,
+ u32 param1, u32 param2)
+{
+ u32 timeout = BDC_CMD_TIMEOUT;
+ u32 cmd_status;
+ u32 temp;
+
+ bdc_writel(bdc->regs, BDC_CMDPAR0, param0);
+ bdc_writel(bdc->regs, BDC_CMDPAR1, param1);
+ bdc_writel(bdc->regs, BDC_CMDPAR2, param2);
+
+ /* Issue the cmd */
+ /* Make sure the cmd params are written before asking HW to exec cmd */
+ wmb();
+ bdc_writel(bdc->regs, BDC_CMDSC, cmd_sc | BDC_CMD_CWS | BDC_CMD_SRD);
+ do {
+ temp = bdc_readl(bdc->regs, BDC_CMDSC);
+ dev_dbg_ratelimited(bdc->dev, "cmdsc=%x", temp);
+ cmd_status = BDC_CMD_CST(temp);
+ if (cmd_status != BDC_CMDS_BUSY) {
+ dev_dbg(bdc->dev,
+ "command completed cmd_sts:%x\n", cmd_status);
+ return cmd_status;
+ }
+ udelay(1);
+ } while (timeout--);
+
+ dev_err(bdc->dev,
+ "command operation timedout cmd_status=%d\n", cmd_status);
+
+ return cmd_status;
+}
+
+/* Submits cmd and analyze the return value of bdc_issue_cmd */
+static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
+ u32 param0, u32 param1, u32 param2)
+{
+ u32 temp, cmd_status;
+ int reset_bdc = 0;
+ int ret;
+
+ temp = bdc_readl(bdc->regs, BDC_CMDSC);
+ dev_dbg(bdc->dev,
+ "%s:CMDSC:%08x cmdsc:%08x param0=%08x param1=%08x param2=%08x\n",
+ __func__, temp, cmd_sc, param0, param1, param2);
+
+ cmd_status = BDC_CMD_CST(temp);
+ if (cmd_status == BDC_CMDS_BUSY) {
+ dev_err(bdc->dev, "command processor busy: %x\n", cmd_status);
+ return -EBUSY;
+ }
+ ret = bdc_issue_cmd(bdc, cmd_sc, param0, param1, param2);
+ switch (ret) {
+ case BDC_CMDS_SUCC:
+ dev_dbg(bdc->dev, "command completed successfully\n");
+ ret = 0;
+ break;
+
+ case BDC_CMDS_PARA:
+ dev_err(bdc->dev, "command parameter error\n");
+ ret = -EINVAL;
+ break;
+
+ case BDC_CMDS_STAT:
+ dev_err(bdc->dev, "Invalid device/ep state\n");
+ ret = -EINVAL;
+ break;
+
+ case BDC_CMDS_FAIL:
+ dev_err(bdc->dev, "Command failed?\n");
+ ret = -EAGAIN;
+ break;
+
+ case BDC_CMDS_INTL:
+ dev_err(bdc->dev, "BDC Internal error\n");
+ reset_bdc = 1;
+ ret = -ECONNRESET;
+ break;
+
+ case BDC_CMDS_BUSY:
+ dev_err(bdc->dev,
+ "command timedout waited for %dusec\n",
+ BDC_CMD_TIMEOUT);
+ reset_bdc = 1;
+ ret = -ECONNRESET;
+ break;
+ default:
+ dev_dbg(bdc->dev, "Unknown command completion code:%x\n", ret);
+ }
+
+ return ret;
+}
+
+/* Deconfigure the endpoint from HW */
+int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep)
+{
+ u32 cmd_sc;
+
+ cmd_sc = BDC_SUB_CMD_DRP_EP|BDC_CMD_EPN(ep->ep_num)|BDC_CMD_EPC;
+ dev_dbg(bdc->dev, "%s ep->ep_num =%d cmd_sc=%x\n", __func__,
+ ep->ep_num, cmd_sc);
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+}
+
+/* Reinitalize the bdlist after config ep command */
+static void ep_bd_list_reinit(struct bdc_ep *ep)
+{
+ struct bdc *bdc = ep->bdc;
+ struct bdc_bd *bd;
+
+ ep->bd_list.eqp_bdi = 0;
+ ep->bd_list.hwd_bdi = 0;
+ bd = ep->bd_list.bd_table_array[0]->start_bd;
+ dev_dbg(bdc->dev, "%s ep:%p bd:%p\n", __func__, ep, bd);
+ memset(bd, 0, sizeof(struct bdc_bd));
+ bd->offset[3] |= cpu_to_le32(BD_SBF);
+}
+
+/* Configure an endpoint */
+int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
+{
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+ const struct usb_endpoint_descriptor *desc;
+ u32 param0, param1, param2, cmd_sc;
+ u32 mps, mbs, mul, si;
+ int ret;
+
+ desc = ep->desc;
+ comp_desc = ep->comp_desc;
+ cmd_sc = mul = mbs = param2 = 0;
+ param0 = lower_32_bits(ep->bd_list.bd_table_array[0]->dma);
+ param1 = upper_32_bits(ep->bd_list.bd_table_array[0]->dma);
+ cpu_to_le32s(&param0);
+ cpu_to_le32s(&param1);
+
+ dev_dbg(bdc->dev, "%s: param0=%08x param1=%08x",
+ __func__, param0, param1);
+ si = desc->bInterval;
+ si = clamp_val(si, 1, 16) - 1;
+
+ mps = usb_endpoint_maxp(desc);
+ mps &= 0x7ff;
+ param2 |= mps << MP_SHIFT;
+ param2 |= usb_endpoint_type(desc) << EPT_SHIFT;
+
+ switch (bdc->gadget.speed) {
+ case USB_SPEED_SUPER:
+ if (usb_endpoint_xfer_int(desc) ||
+ usb_endpoint_xfer_isoc(desc)) {
+ param2 |= si;
+ if (usb_endpoint_xfer_isoc(desc) && comp_desc)
+ mul = comp_desc->bmAttributes;
+
+ }
+ param2 |= mul << EPM_SHIFT;
+ if (comp_desc)
+ mbs = comp_desc->bMaxBurst;
+ param2 |= mbs << MB_SHIFT;
+ break;
+
+ case USB_SPEED_HIGH:
+ if (usb_endpoint_xfer_isoc(desc) ||
+ usb_endpoint_xfer_int(desc)) {
+ param2 |= si;
+
+ mbs = (usb_endpoint_maxp(desc) & 0x1800) >> 11;
+ param2 |= mbs << MB_SHIFT;
+ }
+ break;
+
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ /* the hardware accepts SI in 125usec range */
+ if (usb_endpoint_xfer_isoc(desc))
+ si += 3;
+
+ /*
+ * FS Int endpoints can have si of 1-255ms but the controller
+ * accepts 2^bInterval*125usec, so convert ms to nearest power
+ * of 2
+ */
+ if (usb_endpoint_xfer_int(desc))
+ si = fls(desc->bInterval * 8) - 1;
+
+ param2 |= si;
+ break;
+ default:
+ dev_err(bdc->dev, "UNKNOWN speed ERR\n");
+ return -EINVAL;
+ }
+
+ cmd_sc |= BDC_CMD_EPC|BDC_CMD_EPN(ep->ep_num)|BDC_SUB_CMD_ADD_EP;
+
+ dev_dbg(bdc->dev, "cmd_sc=%x param2=%08x\n", cmd_sc, param2);
+ ret = bdc_submit_cmd(bdc, cmd_sc, param0, param1, param2);
+ if (ret) {
+ dev_err(bdc->dev, "command failed :%x\n", ret);
+ return ret;
+ }
+ ep_bd_list_reinit(ep);
+
+ return ret;
+}
+
+/*
+ * Change the HW deq pointer, if this command is successful, HW will start
+ * fetching the next bd from address dma_addr.
+ */
+int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr)
+{
+ u32 param0, param1;
+ u32 cmd_sc = 0;
+
+ dev_dbg(bdc->dev, "%s: add=%08llx\n", __func__,
+ (unsigned long long)(dma_addr));
+ param0 = lower_32_bits(dma_addr);
+ param1 = upper_32_bits(dma_addr);
+ cpu_to_le32s(&param0);
+ cpu_to_le32s(&param1);
+
+ cmd_sc |= BDC_CMD_EPN(ep->ep_num)|BDC_CMD_BLA;
+ dev_dbg(bdc->dev, "cmd_sc=%x\n", cmd_sc);
+
+ return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
+}
+
+/* Set the address sent bu Host in SET_ADD request */
+int bdc_address_device(struct bdc *bdc, u32 add)
+{
+ u32 cmd_sc = 0;
+ u32 param2;
+
+ dev_dbg(bdc->dev, "%s: add=%d\n", __func__, add);
+ cmd_sc |= BDC_SUB_CMD_ADD|BDC_CMD_DVC;
+ param2 = add & 0x7f;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
+}
+
+/* Send a Function Wake notification packet using FH command */
+int bdc_function_wake_fh(struct bdc *bdc, u8 intf)
+{
+ u32 param0, param1;
+ u32 cmd_sc = 0;
+
+ param0 = param1 = 0;
+ dev_dbg(bdc->dev, "%s intf=%d\n", __func__, intf);
+ cmd_sc |= BDC_CMD_FH;
+ param0 |= TRA_PACKET;
+ param0 |= (bdc->dev_addr << 25);
+ param1 |= DEV_NOTF_TYPE;
+ param1 |= (FWK_SUBTYPE<<4);
+ dev_dbg(bdc->dev, "param0=%08x param1=%08x\n", param0, param1);
+
+ return bdc_submit_cmd(bdc, cmd_sc, param0, param1, 0);
+}
+
+/* Send a Function Wake notification packet using DNC command */
+int bdc_function_wake(struct bdc *bdc, u8 intf)
+{
+ u32 cmd_sc = 0;
+ u32 param2 = 0;
+
+ dev_dbg(bdc->dev, "%s intf=%d", __func__, intf);
+ param2 |= intf;
+ cmd_sc |= BDC_SUB_CMD_FWK|BDC_CMD_DNC;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, param2);
+}
+
+/* Stall the endpoint */
+int bdc_ep_set_stall(struct bdc *bdc, int epnum)
+{
+ u32 cmd_sc = 0;
+
+ dev_dbg(bdc->dev, "%s epnum=%d\n", __func__, epnum);
+ /* issue a stall endpoint command */
+ cmd_sc |= BDC_SUB_CMD_EP_STL | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ return bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+}
+
+/* resets the endpoint, called when host sends CLEAR_FEATURE(HALT) */
+int bdc_ep_clear_stall(struct bdc *bdc, int epnum)
+{
+ struct bdc_ep *ep;
+ u32 cmd_sc = 0;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s: epnum=%d\n", __func__, epnum);
+ ep = bdc->bdc_ep_array[epnum];
+ /*
+ * If we are not in stalled then stall Endpoint and issue clear stall,
+ * his will reset the seq number for non EP0.
+ */
+ if (epnum != 1) {
+ /* if the endpoint it not stallled */
+ if (!(ep->flags & BDC_EP_STALL)) {
+ ret = bdc_ep_set_stall(bdc, epnum);
+ if (ret)
+ return ret;
+ }
+ }
+ /* Preserve the seq number for ep0 only */
+ if (epnum != 1)
+ cmd_sc |= BDC_CMD_EPO_RST_SN;
+
+ /* issue a reset endpoint command */
+ cmd_sc |= BDC_SUB_CMD_EP_RST | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+ if (ret) {
+ dev_err(bdc->dev, "command failed:%x\n", ret);
+ return ret;
+ }
+ bdc_notify_xfr(bdc, epnum);
+
+ return ret;
+}
+
+/* Stop the endpoint, called when software wants to dequeue some request */
+int bdc_stop_ep(struct bdc *bdc, int epnum)
+{
+ struct bdc_ep *ep;
+ u32 cmd_sc = 0;
+ int ret;
+
+ ep = bdc->bdc_ep_array[epnum];
+ dev_dbg(bdc->dev, "%s: ep:%s ep->flags:%08x\n", __func__,
+ ep->name, ep->flags);
+ /* Endpoint has to be in running state to execute stop ep command */
+ if (!(ep->flags & BDC_EP_ENABLED)) {
+ dev_err(bdc->dev, "stop endpoint called for disabled ep\n");
+ return -EINVAL;
+ }
+ if ((ep->flags & BDC_EP_STALL) || (ep->flags & BDC_EP_STOP))
+ return 0;
+
+ /* issue a stop endpoint command */
+ cmd_sc |= BDC_CMD_EP0_XSD | BDC_SUB_CMD_EP_STP
+ | BDC_CMD_EPN(epnum) | BDC_CMD_EPO;
+
+ ret = bdc_submit_cmd(bdc, cmd_sc, 0, 0, 0);
+ if (ret) {
+ dev_err(bdc->dev,
+ "stop endpoint command didn't complete:%d ep:%s\n",
+ ret, ep->name);
+ return ret;
+ }
+ ep->flags |= BDC_EP_STOP;
+ bdc_dump_epsts(bdc);
+
+ return ret;
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
new file mode 100644
index 000000000000..61d0e3bf9853
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
@@ -0,0 +1,29 @@
+/*
+ * bdc_cmd.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __LINUX_BDC_CMD_H__
+#define __LINUX_BDC_CMD_H__
+
+/* Command operations */
+int bdc_address_device(struct bdc *, u32);
+int bdc_config_ep(struct bdc *, struct bdc_ep *);
+int bdc_dconfig_ep(struct bdc *, struct bdc_ep *);
+int bdc_stop_ep(struct bdc *, int);
+int bdc_ep_set_stall(struct bdc *, int);
+int bdc_ep_clear_stall(struct bdc *, int);
+int bdc_ep_set_halt(struct bdc_ep *, u32 , int);
+int bdc_ep_bla(struct bdc *, struct bdc_ep *, dma_addr_t);
+int bdc_function_wake(struct bdc*, u8);
+int bdc_function_wake_fh(struct bdc*, u8);
+
+#endif /* __LINUX_BDC_CMD_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
new file mode 100644
index 000000000000..c6dfef8c7bbc
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -0,0 +1,533 @@
+/*
+ * bdc_core.c - BRCM BDC USB3.0 device controller core operations
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/of.h>
+#include <linux/moduleparam.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "bdc.h"
+#include "bdc_dbg.h"
+
+/* Poll till controller status is not OIP */
+static int poll_oip(struct bdc *bdc, int usec)
+{
+ u32 status;
+ /* Poll till STS!= OIP */
+ while (usec) {
+ status = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (BDC_CSTS(status) != BDC_OIP) {
+ dev_dbg(bdc->dev,
+ "poll_oip complete status=%d",
+ BDC_CSTS(status));
+ return 0;
+ }
+ udelay(10);
+ usec -= 10;
+ }
+ dev_err(bdc->dev, "Err: operation timedout BDCSC: 0x%08x\n", status);
+
+ return -ETIMEDOUT;
+}
+
+/* Stop the BDC controller */
+int bdc_stop(struct bdc *bdc)
+{
+ int ret;
+ u32 temp;
+
+ dev_dbg(bdc->dev, "%s ()\n\n", __func__);
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* Check if BDC is already halted */
+ if (BDC_CSTS(temp) == BDC_HLT) {
+ dev_vdbg(bdc->dev, "BDC already halted\n");
+ return 0;
+ }
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COS|BDC_COP_STP;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret)
+ dev_err(bdc->dev, "bdc stop operation failed");
+
+ return ret;
+}
+
+/* Issue a reset to BDC controller */
+int bdc_reset(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ /* First halt the controller */
+ ret = bdc_stop(bdc);
+ if (ret)
+ return ret;
+
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COS|BDC_COP_RST;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret)
+ dev_err(bdc->dev, "bdc reset operation failed");
+
+ return ret;
+}
+
+/* Run the BDC controller */
+int bdc_run(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* if BDC is already in running state then do not do anything */
+ if (BDC_CSTS(temp) == BDC_NOR) {
+ dev_warn(bdc->dev, "bdc is already in running state\n");
+ return 0;
+ }
+ temp &= ~BDC_COP_MASK;
+ temp |= BDC_COP_RUN;
+ temp |= BDC_COS;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ ret = poll_oip(bdc, BDC_COP_TIMEOUT);
+ if (ret) {
+ dev_err(bdc->dev, "bdc run operation failed:%d", ret);
+ return ret;
+ }
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (BDC_CSTS(temp) != BDC_NOR) {
+ dev_err(bdc->dev, "bdc not in normal mode after RUN op :%d\n",
+ BDC_CSTS(temp));
+ return -ESHUTDOWN;
+ }
+
+ return 0;
+}
+
+/*
+ * Present the termination to the host, typically called from upstream port
+ * event with Vbus present =1
+ */
+void bdc_softconn(struct bdc *bdc)
+{
+ u32 uspc;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc &= ~BDC_PST_MASK;
+ uspc |= BDC_LINK_STATE_RX_DET;
+ uspc |= BDC_SWS;
+ dev_dbg(bdc->dev, "%s () uspc=%08x\n", __func__, uspc);
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+}
+
+/* Remove the termination */
+void bdc_softdisconn(struct bdc *bdc)
+{
+ u32 uspc;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc |= BDC_SDC;
+ uspc &= ~BDC_SCN;
+ dev_dbg(bdc->dev, "%s () uspc=%x\n", __func__, uspc);
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+}
+
+/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
+static int scratchpad_setup(struct bdc *bdc)
+{
+ int sp_buff_size;
+ u32 low32;
+ u32 upp32;
+
+ sp_buff_size = BDC_SPB(bdc_readl(bdc->regs, BDC_BDCCFG0));
+ dev_dbg(bdc->dev, "%s() sp_buff_size=%d\n", __func__, sp_buff_size);
+ if (!sp_buff_size) {
+ dev_dbg(bdc->dev, "Scratchpad buffer not needed\n");
+ return 0;
+ }
+ /* Refer to BDC spec, Table 4 for description of SPB */
+ sp_buff_size = 1 << (sp_buff_size + 5);
+ dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
+ bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size,
+ &bdc->scratchpad.sp_dma, GFP_KERNEL);
+
+ if (!bdc->scratchpad.buff)
+ goto fail;
+
+ bdc->sp_buff_size = sp_buff_size;
+ bdc->scratchpad.size = sp_buff_size;
+ low32 = lower_32_bits(bdc->scratchpad.sp_dma);
+ upp32 = upper_32_bits(bdc->scratchpad.sp_dma);
+ cpu_to_le32s(&low32);
+ cpu_to_le32s(&upp32);
+ bdc_writel(bdc->regs, BDC_SPBBAL, low32);
+ bdc_writel(bdc->regs, BDC_SPBBAH, upp32);
+ return 0;
+
+fail:
+ bdc->scratchpad.buff = NULL;
+
+ return -ENOMEM;
+}
+
+/* Allocate the status report ring */
+static int setup_srr(struct bdc *bdc, int interrupter)
+{
+ dev_dbg(bdc->dev, "%s() NUM_SR_ENTRIES:%d\n", __func__, NUM_SR_ENTRIES);
+ /* Reset the SRR */
+ bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
+ bdc->srr.dqp_index = 0;
+ /* allocate the status report descriptors */
+ bdc->srr.sr_bds = dma_zalloc_coherent(
+ bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ &bdc->srr.dma_addr,
+ GFP_KERNEL);
+ if (!bdc->srr.sr_bds)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Initialize the HW regs and internal data structures */
+static void bdc_mem_init(struct bdc *bdc, bool reinit)
+{
+ u8 size = 0;
+ u32 usb2_pm;
+ u32 low32;
+ u32 upp32;
+ u32 temp;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc->ep0_state = WAIT_FOR_SETUP;
+ bdc->dev_addr = 0;
+ bdc->srr.eqp_index = 0;
+ bdc->srr.dqp_index = 0;
+ bdc->zlp_needed = false;
+ bdc->delayed_status = false;
+
+ bdc_writel(bdc->regs, BDC_SPBBAL, bdc->scratchpad.sp_dma);
+ /* Init the SRR */
+ temp = BDC_SRR_RWS | BDC_SRR_RST;
+ /* Reset the SRR */
+ bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
+ dev_dbg(bdc->dev, "bdc->srr.sr_bds =%p\n", bdc->srr.sr_bds);
+ temp = lower_32_bits(bdc->srr.dma_addr);
+ size = fls(NUM_SR_ENTRIES) - 2;
+ temp |= size;
+ dev_dbg(bdc->dev, "SRRBAL[0]=%08x NUM_SR_ENTRIES:%d size:%d\n",
+ temp, NUM_SR_ENTRIES, size);
+
+ low32 = lower_32_bits(temp);
+ upp32 = upper_32_bits(bdc->srr.dma_addr);
+ cpu_to_le32s(&low32);
+ cpu_to_le32s(&upp32);
+
+ /* Write the dma addresses into regs*/
+ bdc_writel(bdc->regs, BDC_SRRBAL(0), low32);
+ bdc_writel(bdc->regs, BDC_SRRBAH(0), upp32);
+
+ temp = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ temp |= BDC_SRR_IE;
+ temp &= ~(BDC_SRR_RST | BDC_SRR_RWS);
+ bdc_writel(bdc->regs, BDC_SRRINT(0), temp);
+
+ /* Set the Interrupt Coalescence ~500 usec */
+ temp = bdc_readl(bdc->regs, BDC_INTCTLS(0));
+ temp &= ~0xffff;
+ temp |= INT_CLS;
+ bdc_writel(bdc->regs, BDC_INTCTLS(0), temp);
+
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
+ /* Enable hardware LPM Enable */
+ usb2_pm |= BDC_HLE;
+ bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
+
+ /* readback for debug */
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ dev_dbg(bdc->dev, "usb2_pm=%08x\n", usb2_pm);
+
+ /* Disable any unwanted SR's on SRR */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ /* We don't want Microframe counter wrap SR */
+ temp |= BDC_MASK_MCW;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+
+ /*
+ * In some error cases, driver has to reset the entire BDC controller
+ * in that case reinit is passed as 1
+ */
+ if (reinit) {
+ /* Enable interrupts */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp |= BDC_GIE;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ /* Init scratchpad to 0 */
+ memset(bdc->scratchpad.buff, 0, bdc->sp_buff_size);
+ /* Initialize SRR to 0 */
+ memset(bdc->srr.sr_bds, 0,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd));
+ } else {
+ /* One time initiaization only */
+ /* Enable status report function pointers */
+ bdc->sr_handler[0] = bdc_sr_xsf;
+ bdc->sr_handler[1] = bdc_sr_uspc;
+
+ /* EP0 status report function pointers */
+ bdc->sr_xsf_ep0[0] = bdc_xsf_ep0_setup_recv;
+ bdc->sr_xsf_ep0[1] = bdc_xsf_ep0_data_start;
+ bdc->sr_xsf_ep0[2] = bdc_xsf_ep0_status_start;
+ }
+}
+
+/* Free the dynamic memory */
+static void bdc_mem_free(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ /* Free SRR */
+ if (bdc->srr.sr_bds)
+ dma_free_coherent(bdc->dev,
+ NUM_SR_ENTRIES * sizeof(struct bdc_bd),
+ bdc->srr.sr_bds, bdc->srr.dma_addr);
+
+ /* Free scratchpad */
+ if (bdc->scratchpad.buff)
+ dma_free_coherent(bdc->dev, bdc->sp_buff_size,
+ bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
+
+ /* Destroy the dma pools */
+ if (bdc->bd_table_pool)
+ dma_pool_destroy(bdc->bd_table_pool);
+
+ /* Free the bdc_ep array */
+ kfree(bdc->bdc_ep_array);
+
+ bdc->srr.sr_bds = NULL;
+ bdc->scratchpad.buff = NULL;
+ bdc->bd_table_pool = NULL;
+ bdc->bdc_ep_array = NULL;
+}
+
+/*
+ * bdc reinit gives a controller reset and reinitialize the registers,
+ * called from disconnect/bus reset scenario's, to ensure proper HW cleanup
+ */
+int bdc_reinit(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ret = bdc_stop(bdc);
+ if (ret)
+ goto out;
+
+ ret = bdc_reset(bdc);
+ if (ret)
+ goto out;
+
+ /* the reinit flag is 1 */
+ bdc_mem_init(bdc, true);
+ ret = bdc_run(bdc);
+out:
+ bdc->reinit = false;
+
+ return ret;
+}
+
+/* Allocate all the dyanmic memory */
+static int bdc_mem_alloc(struct bdc *bdc)
+{
+ u32 page_size;
+ unsigned int num_ieps, num_oeps;
+
+ dev_dbg(bdc->dev,
+ "%s() NUM_BDS_PER_TABLE:%d\n", __func__,
+ NUM_BDS_PER_TABLE);
+ page_size = BDC_PGS(bdc_readl(bdc->regs, BDC_BDCCFG0));
+ /* page size is 2^pgs KB */
+ page_size = 1 << page_size;
+ /* KB */
+ page_size <<= 10;
+ dev_dbg(bdc->dev, "page_size=%d\n", page_size);
+
+ /* Create a pool of bd tables */
+ bdc->bd_table_pool =
+ dma_pool_create("BDC BD tables", bdc->dev, NUM_BDS_PER_TABLE * 16,
+ 16, page_size);
+
+ if (!bdc->bd_table_pool)
+ goto fail;
+
+ if (scratchpad_setup(bdc))
+ goto fail;
+
+ /* read from regs */
+ num_ieps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNIC));
+ num_oeps = NUM_NCS(bdc_readl(bdc->regs, BDC_FSCNOC));
+ /* +2: 1 for ep0 and the other is rsvd i.e. bdc_ep[0] is rsvd */
+ bdc->num_eps = num_ieps + num_oeps + 2;
+ dev_dbg(bdc->dev,
+ "ieps:%d eops:%d num_eps:%d\n",
+ num_ieps, num_oeps, bdc->num_eps);
+ /* allocate array of ep pointers */
+ bdc->bdc_ep_array = kcalloc(bdc->num_eps, sizeof(struct bdc_ep *),
+ GFP_KERNEL);
+ if (!bdc->bdc_ep_array)
+ goto fail;
+
+ dev_dbg(bdc->dev, "Allocating sr report0\n");
+ if (setup_srr(bdc, 0))
+ goto fail;
+
+ return 0;
+fail:
+ dev_warn(bdc->dev, "Couldn't initialize memory\n");
+ bdc_mem_free(bdc);
+
+ return -ENOMEM;
+}
+
+/* opposite to bdc_hw_init */
+static void bdc_hw_exit(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc_mem_free(bdc);
+}
+
+/* Initialize the bdc HW and memory */
+static int bdc_hw_init(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ ret = bdc_reset(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "err resetting bdc abort bdc init%d\n", ret);
+ return ret;
+ }
+ ret = bdc_mem_alloc(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "Mem alloc failed, aborting\n");
+ return -ENOMEM;
+ }
+ bdc_mem_init(bdc, 0);
+ bdc_dbg_regs(bdc);
+ dev_dbg(bdc->dev, "HW Init done\n");
+
+ return 0;
+}
+
+static int bdc_probe(struct platform_device *pdev)
+{
+ struct bdc *bdc;
+ struct resource *res;
+ int ret = -ENOMEM;
+ int irq;
+ u32 temp;
+ struct device *dev = &pdev->dev;
+
+ dev_dbg(dev, "%s()\n", __func__);
+ bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
+ if (!bdc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bdc->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(bdc->regs)) {
+ dev_err(dev, "ioremap error\n");
+ return -ENOMEM;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "platform_get_irq failed:%d\n", irq);
+ return irq;
+ }
+ spin_lock_init(&bdc->lock);
+ platform_set_drvdata(pdev, bdc);
+ bdc->irq = irq;
+ bdc->dev = dev;
+ dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
+
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ if ((temp & BDC_P64) &&
+ !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ dev_dbg(bdc->dev, "Using 64-bit address\n");
+ } else {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(bdc->dev, "No suitable DMA config available, abort\n");
+ return -ENOTSUPP;
+ }
+ dev_dbg(bdc->dev, "Using 32-bit address\n");
+ }
+ ret = bdc_hw_init(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "BDC init failure:%d\n", ret);
+ return ret;
+ }
+ ret = bdc_udc_init(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "BDC Gadget init failure:%d\n", ret);
+ goto cleanup;
+ }
+ return 0;
+
+cleanup:
+ bdc_hw_exit(bdc);
+
+ return ret;
+}
+
+static int bdc_remove(struct platform_device *pdev)
+{
+ struct bdc *bdc;
+
+ bdc = platform_get_drvdata(pdev);
+ dev_dbg(bdc->dev, "%s ()\n", __func__);
+ bdc_udc_exit(bdc);
+ bdc_hw_exit(bdc);
+
+ return 0;
+}
+
+static struct platform_driver bdc_driver = {
+ .driver = {
+ .name = BRCM_BDC_NAME,
+ .owner = THIS_MODULE
+ },
+ .probe = bdc_probe,
+ .remove = bdc_remove,
+};
+
+module_platform_driver(bdc_driver);
+MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(BRCM_BDC_DESC);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
new file mode 100644
index 000000000000..5945dbc47825
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
@@ -0,0 +1,123 @@
+/*
+ * bdc_dbg.c - BRCM BDC USB3.0 device controller debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "bdc.h"
+#include "bdc_dbg.h"
+
+void bdc_dbg_regs(struct bdc *bdc)
+{
+ u32 temp;
+
+ dev_vdbg(bdc->dev, "bdc->regs:%p\n", bdc->regs);
+ temp = bdc_readl(bdc->regs, BDC_BDCCFG0);
+ dev_vdbg(bdc->dev, "bdccfg0:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCFG1);
+ dev_vdbg(bdc->dev, "bdccfg1:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP0);
+ dev_vdbg(bdc->dev, "bdccap0:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
+ dev_vdbg(bdc->dev, "bdccap1:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_USPC);
+ dev_vdbg(bdc->dev, "uspc:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_DVCSA);
+ dev_vdbg(bdc->dev, "dvcsa:0x%08x\n", temp);
+ temp = bdc_readl(bdc->regs, BDC_DVCSB);
+ dev_vdbg(bdc->dev, "dvcsb:0x%x08\n", temp);
+}
+
+void bdc_dump_epsts(struct bdc *bdc)
+{
+ u32 temp;
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS0(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS0:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS1(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS1:0x%x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS2(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS2:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS3(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS3:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS4(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS4:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS5(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS5:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS6(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS6:0x%08x\n", temp);
+
+ temp = bdc_readl(bdc->regs, BDC_EPSTS7(0));
+ dev_vdbg(bdc->dev, "BDC_EPSTS7:0x%08x\n", temp);
+}
+
+void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
+{
+ struct bdc_sr *sr;
+ dma_addr_t addr;
+ int i;
+
+ sr = bdc->srr.sr_bds;
+ addr = bdc->srr.dma_addr;
+ dev_vdbg(bdc->dev, "bdc_dbg_srr sr:%p dqp_index:%d\n",
+ sr, bdc->srr.dqp_index);
+ for (i = 0; i < NUM_SR_ENTRIES; i++) {
+ sr = &bdc->srr.sr_bds[i];
+ dev_vdbg(bdc->dev, "%llx %08x %08x %08x %08x\n",
+ (unsigned long long)addr,
+ le32_to_cpu(sr->offset[0]),
+ le32_to_cpu(sr->offset[1]),
+ le32_to_cpu(sr->offset[2]),
+ le32_to_cpu(sr->offset[3]));
+ addr += sizeof(*sr);
+ }
+}
+
+void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ struct bd_table *bd_table;
+ struct bdc_bd *bd;
+ int tbi, bdi, gbdi;
+ dma_addr_t dma;
+
+ gbdi = 0;
+ dev_vdbg(bdc->dev,
+ "Dump bd list for %s epnum:%d\n",
+ ep->name, ep->ep_num);
+
+ dev_vdbg(bdc->dev,
+ "tabs:%d max_bdi:%d eqp_bdi:%d hwd_bdi:%d num_bds_table:%d\n",
+ bd_list->num_tabs, bd_list->max_bdi, bd_list->eqp_bdi,
+ bd_list->hwd_bdi, bd_list->num_bds_table);
+
+ for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
+ bd_table = bd_list->bd_table_array[tbi];
+ for (bdi = 0; bdi < bd_list->num_bds_table; bdi++) {
+ bd = bd_table->start_bd + bdi;
+ dma = bd_table->dma + (sizeof(struct bdc_bd) * bdi);
+ dev_vdbg(bdc->dev,
+ "tbi:%2d bdi:%2d gbdi:%2d virt:%p phys:%llx %08x %08x %08x %08x\n",
+ tbi, bdi, gbdi++, bd, (unsigned long long)dma,
+ le32_to_cpu(bd->offset[0]),
+ le32_to_cpu(bd->offset[1]),
+ le32_to_cpu(bd->offset[2]),
+ le32_to_cpu(bd->offset[3]));
+ }
+ dev_vdbg(bdc->dev, "\n\n");
+ }
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
new file mode 100644
index 000000000000..338a6c701315
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
@@ -0,0 +1,37 @@
+/*
+ * bdc_dbg.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __LINUX_BDC_DBG_H__
+#define __LINUX_BDC_DBG_H__
+
+#include "bdc.h"
+
+#ifdef CONFIG_USB_GADGET_VERBOSE
+void bdc_dbg_bd_list(struct bdc *, struct bdc_ep*);
+void bdc_dbg_srr(struct bdc *, u32);
+void bdc_dbg_regs(struct bdc *);
+void bdc_dump_epsts(struct bdc *);
+#else
+static inline void bdc_dbg_regs(struct bdc *bdc)
+{ }
+
+static inline void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
+{ }
+
+static inline void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep)
+{ }
+
+static inline void bdc_dump_epsts(struct bdc *bdc)
+{ }
+#endif /* CONFIG_USB_GADGET_VERBOSE */
+#endif /* __LINUX_BDC_DBG_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
new file mode 100644
index 000000000000..ff67ceac77c4
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -0,0 +1,2024 @@
+/*
+ * bdc_ep.c - BRCM BDC USB3.0 device controller endpoint related functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/unaligned.h>
+#include <linux/platform_device.h>
+#include <linux/usb/composite.h>
+
+#include "bdc.h"
+#include "bdc_ep.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+static const char * const ep0_state_string[] = {
+ "WAIT_FOR_SETUP",
+ "WAIT_FOR_DATA_START",
+ "WAIT_FOR_DATA_XMIT",
+ "WAIT_FOR_STATUS_START",
+ "WAIT_FOR_STATUS_XMIT",
+ "STATUS_PENDING"
+};
+
+/* Free the bdl during ep disable */
+static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ int index;
+
+ dev_dbg(bdc->dev, "%s ep:%s num_tabs:%d\n",
+ __func__, ep->name, num_tabs);
+
+ if (!bd_list->bd_table_array) {
+ dev_dbg(bdc->dev, "%s already freed\n", ep->name);
+ return;
+ }
+ for (index = 0; index < num_tabs; index++) {
+ /*
+ * check if the bd_table struct is allocated ?
+ * if yes, then check if bd memory has been allocated, then
+ * free the dma_pool and also the bd_table struct memory
+ */
+ bd_table = bd_list->bd_table_array[index];
+ dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
+ if (!bd_table) {
+ dev_dbg(bdc->dev, "bd_table not allocated\n");
+ continue;
+ }
+ if (!bd_table->start_bd) {
+ dev_dbg(bdc->dev, "bd dma pool not allocted\n");
+ continue;
+ }
+
+ dev_dbg(bdc->dev,
+ "Free dma pool start_bd:%p dma:%llx\n",
+ bd_table->start_bd,
+ (unsigned long long)bd_table->dma);
+
+ dma_pool_free(bdc->bd_table_pool,
+ bd_table->start_bd,
+ bd_table->dma);
+ /* Free the bd_table structure */
+ kfree(bd_table);
+ }
+ /* Free the bd table array */
+ kfree(ep->bd_list.bd_table_array);
+}
+
+/*
+ * chain the tables, by insteting a chain bd at the end of prev_table, pointing
+ * to next_table
+ */
+static inline void chain_table(struct bd_table *prev_table,
+ struct bd_table *next_table,
+ u32 bd_p_tab)
+{
+ /* Chain the prev table to next table */
+ prev_table->start_bd[bd_p_tab-1].offset[0] =
+ cpu_to_le32(lower_32_bits(next_table->dma));
+
+ prev_table->start_bd[bd_p_tab-1].offset[1] =
+ cpu_to_le32(upper_32_bits(next_table->dma));
+
+ prev_table->start_bd[bd_p_tab-1].offset[2] =
+ 0x0;
+
+ prev_table->start_bd[bd_p_tab-1].offset[3] =
+ cpu_to_le32(MARK_CHAIN_BD);
+}
+
+/* Allocate the bdl for ep, during config ep */
+static int ep_bd_list_alloc(struct bdc_ep *ep)
+{
+ struct bd_table *prev_table = NULL;
+ int index, num_tabs, bd_p_tab;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ dma_addr_t dma;
+
+ if (usb_endpoint_xfer_isoc(ep->desc))
+ num_tabs = NUM_TABLES_ISOCH;
+ else
+ num_tabs = NUM_TABLES;
+
+ bd_p_tab = NUM_BDS_PER_TABLE;
+ /* if there is only 1 table in bd list then loop chain to self */
+ dev_dbg(bdc->dev,
+ "%s ep:%p num_tabs:%d\n",
+ __func__, ep, num_tabs);
+
+ /* Allocate memory for table array */
+ ep->bd_list.bd_table_array = kzalloc(
+ num_tabs * sizeof(struct bd_table *),
+ GFP_ATOMIC);
+ if (!ep->bd_list.bd_table_array)
+ return -ENOMEM;
+
+ /* Allocate memory for each table */
+ for (index = 0; index < num_tabs; index++) {
+ /* Allocate memory for bd_table structure */
+ bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC);
+ if (!bd_table)
+ goto fail;
+
+ bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
+ GFP_ATOMIC,
+ &dma);
+ if (!bd_table->start_bd)
+ goto fail;
+
+ bd_table->dma = dma;
+
+ dev_dbg(bdc->dev,
+ "index:%d start_bd:%p dma=%08llx prev_table:%p\n",
+ index, bd_table->start_bd,
+ (unsigned long long)bd_table->dma, prev_table);
+
+ ep->bd_list.bd_table_array[index] = bd_table;
+ memset(bd_table->start_bd, 0, bd_p_tab * sizeof(struct bdc_bd));
+ if (prev_table)
+ chain_table(prev_table, bd_table, bd_p_tab);
+
+ prev_table = bd_table;
+ }
+ chain_table(prev_table, ep->bd_list.bd_table_array[0], bd_p_tab);
+ /* Memory allocation is successful, now init the internal fields */
+ ep->bd_list.num_tabs = num_tabs;
+ ep->bd_list.max_bdi = (num_tabs * bd_p_tab) - 1;
+ ep->bd_list.num_tabs = num_tabs;
+ ep->bd_list.num_bds_table = bd_p_tab;
+ ep->bd_list.eqp_bdi = 0;
+ ep->bd_list.hwd_bdi = 0;
+
+ return 0;
+fail:
+ /* Free the bd_table_array, bd_table struct, bd's */
+ ep_bd_list_free(ep, num_tabs);
+
+ return -ENOMEM;
+}
+
+/* returns how many bd's are need for this transfer */
+static inline int bd_needed_req(struct bdc_req *req)
+{
+ int bd_needed = 0;
+ int remaining;
+
+ /* 1 bd needed for 0 byte transfer */
+ if (req->usb_req.length == 0)
+ return 1;
+
+ /* remaining bytes after tranfering all max BD size BD's */
+ remaining = req->usb_req.length % BD_MAX_BUFF_SIZE;
+ if (remaining)
+ bd_needed++;
+
+ /* How many maximum BUFF size BD's ? */
+ remaining = req->usb_req.length / BD_MAX_BUFF_SIZE;
+ bd_needed += remaining;
+
+ return bd_needed;
+}
+
+/* returns the bd index(bdi) corresponding to bd dma address */
+static int bd_add_to_bdi(struct bdc_ep *ep, dma_addr_t bd_dma_addr)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ dma_addr_t dma_first_bd, dma_last_bd;
+ struct bdc *bdc = ep->bdc;
+ struct bd_table *bd_table;
+ bool found = false;
+ int tbi, bdi;
+
+ dma_first_bd = dma_last_bd = 0;
+ dev_dbg(bdc->dev, "%s %llx\n",
+ __func__, (unsigned long long)bd_dma_addr);
+ /*
+ * Find in which table this bd_dma_addr belongs?, go through the table
+ * array and compare addresses of first and last address of bd of each
+ * table
+ */
+ for (tbi = 0; tbi < bd_list->num_tabs; tbi++) {
+ bd_table = bd_list->bd_table_array[tbi];
+ dma_first_bd = bd_table->dma;
+ dma_last_bd = bd_table->dma +
+ (sizeof(struct bdc_bd) *
+ (bd_list->num_bds_table - 1));
+ dev_dbg(bdc->dev, "dma_first_bd:%llx dma_last_bd:%llx\n",
+ (unsigned long long)dma_first_bd,
+ (unsigned long long)dma_last_bd);
+ if (bd_dma_addr >= dma_first_bd && bd_dma_addr <= dma_last_bd) {
+ found = true;
+ break;
+ }
+ }
+ if (unlikely(!found)) {
+ dev_err(bdc->dev, "%s FATAL err, bd not found\n", __func__);
+ return -EINVAL;
+ }
+ /* Now we know the table, find the bdi */
+ bdi = (bd_dma_addr - dma_first_bd) / sizeof(struct bdc_bd);
+
+ /* return the global bdi, to compare with ep eqp_bdi */
+ return (bdi + (tbi * bd_list->num_bds_table));
+}
+
+/* returns the table index(tbi) of the given bdi */
+static int bdi_to_tbi(struct bdc_ep *ep, int bdi)
+{
+ int tbi;
+
+ tbi = bdi / ep->bd_list.num_bds_table;
+ dev_vdbg(ep->bdc->dev,
+ "bdi:%d num_bds_table:%d tbi:%d\n",
+ bdi, ep->bd_list.num_bds_table, tbi);
+
+ return tbi;
+}
+
+/* Find the bdi last bd in the transfer */
+static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
+{
+ int end_bdi;
+
+ end_bdi = next_hwd_bdi - 1;
+ if (end_bdi < 0)
+ end_bdi = ep->bd_list.max_bdi - 1;
+ else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
+ end_bdi--;
+
+ return end_bdi;
+}
+
+/*
+ * How many transfer bd's are available on this ep bdl, chain bds are not
+ * counted in available bds
+ */
+static int bd_available_ep(struct bdc_ep *ep)
+{
+ struct bd_list *bd_list = &ep->bd_list;
+ int available1, available2;
+ struct bdc *bdc = ep->bdc;
+ int chain_bd1, chain_bd2;
+ int available_bd = 0;
+
+ available1 = available2 = chain_bd1 = chain_bd2 = 0;
+ /* if empty then we have all bd's available - number of chain bd's */
+ if (bd_list->eqp_bdi == bd_list->hwd_bdi)
+ return bd_list->max_bdi - bd_list->num_tabs;
+
+ /*
+ * Depending upon where eqp and dqp pointers are, caculate number
+ * of avaialble bd's
+ */
+ if (bd_list->hwd_bdi < bd_list->eqp_bdi) {
+ /* available bd's are from eqp..max_bds + 0..dqp - chain_bds */
+ available1 = bd_list->max_bdi - bd_list->eqp_bdi;
+ available2 = bd_list->hwd_bdi;
+ chain_bd1 = available1 / bd_list->num_bds_table;
+ chain_bd2 = available2 / bd_list->num_bds_table;
+ dev_vdbg(bdc->dev, "chain_bd1:%d chain_bd2:%d\n",
+ chain_bd1, chain_bd2);
+ available_bd = available1 + available2 - chain_bd1 - chain_bd2;
+ } else {
+ /* available bd's are from eqp..dqp - number of chain bd's */
+ available1 = bd_list->hwd_bdi - bd_list->eqp_bdi;
+ /* if gap between eqp and dqp is less than NUM_BDS_PER_TABLE */
+ if ((bd_list->hwd_bdi - bd_list->eqp_bdi)
+ <= bd_list->num_bds_table) {
+ /* If there any chain bd in between */
+ if (!(bdi_to_tbi(ep, bd_list->hwd_bdi)
+ == bdi_to_tbi(ep, bd_list->eqp_bdi))) {
+ available_bd = available1 - 1;
+ }
+ } else {
+ chain_bd1 = available1 / bd_list->num_bds_table;
+ available_bd = available1 - chain_bd1;
+ }
+ }
+ /*
+ * we need to keep one extra bd to check if ring is full or empty so
+ * reduce by 1
+ */
+ available_bd--;
+ dev_vdbg(bdc->dev, "available_bd:%d\n", available_bd);
+
+ return available_bd;
+}
+
+/* Notify the hardware after queueing the bd to bdl */
+void bdc_notify_xfr(struct bdc *bdc, u32 epnum)
+{
+ struct bdc_ep *ep = bdc->bdc_ep_array[epnum];
+
+ dev_vdbg(bdc->dev, "%s epnum:%d\n", __func__, epnum);
+ /*
+ * We don't have anyway to check if ep state is running,
+ * except the software flags.
+ */
+ if (unlikely(ep->flags & BDC_EP_STOP))
+ ep->flags &= ~BDC_EP_STOP;
+
+ bdc_writel(bdc->regs, BDC_XSFNTF, epnum);
+}
+
+/* returns the bd corresponding to bdi */
+static struct bdc_bd *bdi_to_bd(struct bdc_ep *ep, int bdi)
+{
+ int tbi = bdi_to_tbi(ep, bdi);
+ int local_bdi = 0;
+
+ local_bdi = bdi - (tbi * ep->bd_list.num_bds_table);
+ dev_vdbg(ep->bdc->dev,
+ "%s bdi:%d local_bdi:%d\n",
+ __func__, bdi, local_bdi);
+
+ return (ep->bd_list.bd_table_array[tbi]->start_bd + local_bdi);
+}
+
+/* Advance the enqueue pointer */
+static void ep_bdlist_eqp_adv(struct bdc_ep *ep)
+{
+ ep->bd_list.eqp_bdi++;
+ /* if it's chain bd, then move to next */
+ if (((ep->bd_list.eqp_bdi + 1) % ep->bd_list.num_bds_table) == 0)
+ ep->bd_list.eqp_bdi++;
+
+ /* if the eqp is pointing to last + 1 then move back to 0 */
+ if (ep->bd_list.eqp_bdi == (ep->bd_list.max_bdi + 1))
+ ep->bd_list.eqp_bdi = 0;
+}
+
+/* Setup the first bd for ep0 transfer */
+static int setup_first_bd_ep0(struct bdc *bdc, struct bdc_req *req, u32 *dword3)
+{
+ u16 wValue;
+ u32 req_len;
+
+ req->ep->dir = 0;
+ req_len = req->usb_req.length;
+ switch (bdc->ep0_state) {
+ case WAIT_FOR_DATA_START:
+ *dword3 |= BD_TYPE_DS;
+ if (bdc->setup_pkt.bRequestType & USB_DIR_IN)
+ *dword3 |= BD_DIR_IN;
+
+ /* check if zlp will be needed */
+ wValue = le16_to_cpu(bdc->setup_pkt.wValue);
+ if ((wValue > req_len) &&
+ (req_len % bdc->gadget.ep0->maxpacket == 0)) {
+ dev_dbg(bdc->dev, "ZLP needed wVal:%d len:%d MaxP:%d\n",
+ wValue, req_len,
+ bdc->gadget.ep0->maxpacket);
+ bdc->zlp_needed = true;
+ }
+ break;
+
+ case WAIT_FOR_STATUS_START:
+ *dword3 |= BD_TYPE_SS;
+ if (!le16_to_cpu(bdc->setup_pkt.wLength) ||
+ !(bdc->setup_pkt.bRequestType & USB_DIR_IN))
+ *dword3 |= BD_DIR_IN;
+ break;
+ default:
+ dev_err(bdc->dev,
+ "Unknown ep0 state for queueing bd ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Setup the bd dma descriptor for a given request */
+static int setup_bd_list_xfr(struct bdc *bdc, struct bdc_req *req, int num_bds)
+{
+ dma_addr_t buf_add = req->usb_req.dma;
+ u32 maxp, tfs, dword2, dword3;
+ struct bd_transfer *bd_xfr;
+ struct bd_list *bd_list;
+ struct bdc_ep *ep;
+ struct bdc_bd *bd;
+ int ret, bdnum;
+ u32 req_len;
+
+ ep = req->ep;
+ bd_list = &ep->bd_list;
+ bd_xfr = &req->bd_xfr;
+ bd_xfr->req = req;
+ bd_xfr->start_bdi = bd_list->eqp_bdi;
+ bd = bdi_to_bd(ep, bd_list->eqp_bdi);
+ req_len = req->usb_req.length;
+ maxp = usb_endpoint_maxp(ep->desc) & 0x7ff;
+ tfs = roundup(req->usb_req.length, maxp);
+ tfs = tfs/maxp;
+ dev_vdbg(bdc->dev, "%s ep:%s num_bds:%d tfs:%d r_len:%d bd:%p\n",
+ __func__, ep->name, num_bds, tfs, req_len, bd);
+
+ for (bdnum = 0; bdnum < num_bds; bdnum++) {
+ dword2 = dword3 = 0;
+ /* First bd */
+ if (!bdnum) {
+ dword3 |= BD_SOT|BD_SBF|(tfs<<BD_TFS_SHIFT);
+ dword2 |= BD_LTF;
+ /* format of first bd for ep0 is different than other */
+ if (ep->ep_num == 1) {
+ ret = setup_first_bd_ep0(bdc, req, &dword3);
+ if (ret)
+ return ret;
+ }
+ }
+ if (!req->ep->dir)
+ dword3 |= BD_ISP;
+
+ if (req_len > BD_MAX_BUFF_SIZE) {
+ dword2 |= BD_MAX_BUFF_SIZE;
+ req_len -= BD_MAX_BUFF_SIZE;
+ } else {
+ /* this should be the last bd */
+ dword2 |= req_len;
+ dword3 |= BD_IOC;
+ dword3 |= BD_EOT;
+ }
+ /* Currently only 1 INT target is supported */
+ dword2 |= BD_INTR_TARGET(0);
+ bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
+ if (unlikely(!bd)) {
+ dev_err(bdc->dev, "Err bd pointing to wrong addr\n");
+ return -EINVAL;
+ }
+ /* write bd */
+ bd->offset[0] = cpu_to_le32(lower_32_bits(buf_add));
+ bd->offset[1] = cpu_to_le32(upper_32_bits(buf_add));
+ bd->offset[2] = cpu_to_le32(dword2);
+ bd->offset[3] = cpu_to_le32(dword3);
+ /* advance eqp pointer */
+ ep_bdlist_eqp_adv(ep);
+ /* advance the buff pointer */
+ buf_add += BD_MAX_BUFF_SIZE;
+ dev_vdbg(bdc->dev, "buf_add:%08llx req_len:%d bd:%p eqp:%d\n",
+ (unsigned long long)buf_add, req_len, bd,
+ ep->bd_list.eqp_bdi);
+ bd = bdi_to_bd(ep, ep->bd_list.eqp_bdi);
+ bd->offset[3] = cpu_to_le32(BD_SBF);
+ }
+ /* clear the STOP BD fetch bit from the first bd of this xfr */
+ bd = bdi_to_bd(ep, bd_xfr->start_bdi);
+ bd->offset[3] &= cpu_to_le32(~BD_SBF);
+ /* the new eqp will be next hw dqp */
+ bd_xfr->num_bds = num_bds;
+ bd_xfr->next_hwd_bdi = ep->bd_list.eqp_bdi;
+ /* everything is written correctly before notifying the HW */
+ wmb();
+
+ return 0;
+}
+
+/* Queue the xfr */
+static int bdc_queue_xfr(struct bdc *bdc, struct bdc_req *req)
+{
+ int num_bds, bd_available;
+ struct bdc_ep *ep;
+ int ret;
+
+ ep = req->ep;
+ dev_dbg(bdc->dev, "%s req:%p\n", __func__, req);
+ dev_dbg(bdc->dev, "eqp_bdi:%d hwd_bdi:%d\n",
+ ep->bd_list.eqp_bdi, ep->bd_list.hwd_bdi);
+
+ num_bds = bd_needed_req(req);
+ bd_available = bd_available_ep(ep);
+
+ /* how many bd's are avaialble on ep */
+ if (num_bds > bd_available)
+ return -ENOMEM;
+
+ ret = setup_bd_list_xfr(bdc, req, num_bds);
+ if (ret)
+ return ret;
+ list_add_tail(&req->queue, &ep->queue);
+ bdc_dbg_bd_list(bdc, ep);
+ bdc_notify_xfr(bdc, ep->ep_num);
+
+ return 0;
+}
+
+/* callback to gadget layer when xfr completes */
+static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
+ int status)
+{
+ struct bdc *bdc = ep->bdc;
+
+ if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
+ return;
+
+ dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
+ list_del(&req->queue);
+ req->usb_req.status = status;
+ usb_gadget_unmap_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (req->usb_req.complete) {
+ spin_unlock(&bdc->lock);
+ usb_gadget_giveback_request(&ep->usb_ep, &req->usb_req);
+ spin_lock(&bdc->lock);
+ }
+}
+
+/* Disable the endpoint */
+int bdc_ep_disable(struct bdc_ep *ep)
+{
+ struct bdc_req *req;
+ struct bdc *bdc;
+ int ret;
+
+ ret = 0;
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s() ep->ep_num=%d\n", __func__, ep->ep_num);
+ /* Stop the endpoint */
+ ret = bdc_stop_ep(bdc, ep->ep_num);
+
+ /*
+ * Intentionally don't check the ret value of stop, it can fail in
+ * disconnect scenarios, continue with dconfig
+ */
+ /* de-queue any pending requests */
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+ bdc_req_complete(ep, req, -ESHUTDOWN);
+ }
+ /* deconfigure the endpoint */
+ ret = bdc_dconfig_ep(bdc, ep);
+ if (ret)
+ dev_warn(bdc->dev,
+ "dconfig fail but continue with memory free");
+
+ ep->flags = 0;
+ /* ep0 memory is not freed, but reused on next connect sr */
+ if (ep->ep_num == 1)
+ return 0;
+
+ /* Free the bdl memory */
+ ep_bd_list_free(ep, ep->bd_list.num_tabs);
+ ep->desc = NULL;
+ ep->comp_desc = NULL;
+ ep->usb_ep.desc = NULL;
+ ep->ep_type = 0;
+
+ return ret;
+}
+
+/* Enable the ep */
+int bdc_ep_enable(struct bdc_ep *ep)
+{
+ struct bdc *bdc;
+ int ret = 0;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s NUM_TABLES:%d %d\n",
+ __func__, NUM_TABLES, NUM_TABLES_ISOCH);
+
+ ret = ep_bd_list_alloc(ep);
+ if (ret) {
+ dev_err(bdc->dev, "ep bd list allocation failed:%d\n", ret);
+ return -ENOMEM;
+ }
+ bdc_dbg_bd_list(bdc, ep);
+ /* only for ep0: config ep is called for ep0 from connect event */
+ ep->flags |= BDC_EP_ENABLED;
+ if (ep->ep_num == 1)
+ return ret;
+
+ /* Issue a configure endpoint command */
+ ret = bdc_config_ep(bdc, ep);
+ if (ret)
+ return ret;
+
+ ep->usb_ep.maxpacket = usb_endpoint_maxp(ep->desc);
+ ep->usb_ep.desc = ep->desc;
+ ep->usb_ep.comp_desc = ep->comp_desc;
+ ep->ep_type = usb_endpoint_type(ep->desc);
+ ep->flags |= BDC_EP_ENABLED;
+
+ return 0;
+}
+
+/* EP0 related code */
+
+/* Queue a status stage BD */
+static int ep0_queue_status_stage(struct bdc *bdc)
+{
+ struct bdc_req *status_req;
+ struct bdc_ep *ep;
+
+ status_req = &bdc->status_req;
+ ep = bdc->bdc_ep_array[1];
+ status_req->ep = ep;
+ status_req->usb_req.length = 0;
+ status_req->usb_req.status = -EINPROGRESS;
+ status_req->usb_req.actual = 0;
+ status_req->usb_req.complete = NULL;
+ bdc_queue_xfr(bdc, status_req);
+
+ return 0;
+}
+
+/* Queue xfr on ep0 */
+static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ struct bdc *bdc;
+ int ret;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ req->usb_req.actual = 0;
+ req->usb_req.status = -EINPROGRESS;
+ req->epnum = ep->ep_num;
+
+ if (bdc->delayed_status) {
+ bdc->delayed_status = false;
+ /* if status stage was delayed? */
+ if (bdc->ep0_state == WAIT_FOR_STATUS_START) {
+ /* Queue a status stage BD */
+ ep0_queue_status_stage(bdc);
+ bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
+ return 0;
+ }
+ } else {
+ /*
+ * if delayed status is false and 0 length transfer is requested
+ * i.e. for status stage of some setup request, then just
+ * return from here the status stage is queued independently
+ */
+ if (req->usb_req.length == 0)
+ return 0;
+
+ }
+ ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (ret) {
+ dev_err(bdc->dev, "dma mapping failed %s\n", ep->name);
+ return ret;
+ }
+
+ return bdc_queue_xfr(bdc, req);
+}
+
+/* Queue data stage */
+static int ep0_queue_data_stage(struct bdc *bdc)
+{
+ struct usb_request *ep0_usb_req;
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ep0_usb_req = &bdc->ep0_req.usb_req;
+ ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.ep = ep;
+ bdc->ep0_req.usb_req.complete = NULL;
+
+ return ep0_queue(ep, &bdc->ep0_req);
+}
+
+/* Queue req on ep */
+static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ struct bdc *bdc;
+ int ret = 0;
+
+ bdc = ep->bdc;
+ if (!req || !ep || !ep->usb_ep.desc)
+ return -EINVAL;
+
+ req->usb_req.actual = 0;
+ req->usb_req.status = -EINPROGRESS;
+ req->epnum = ep->ep_num;
+
+ ret = usb_gadget_map_request(&bdc->gadget, &req->usb_req, ep->dir);
+ if (ret) {
+ dev_err(bdc->dev, "dma mapping failed\n");
+ return ret;
+ }
+
+ return bdc_queue_xfr(bdc, req);
+}
+
+/* Dequeue a request from ep */
+static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
+{
+ int start_bdi, end_bdi, tbi, eqp_bdi, curr_hw_dqpi;
+ bool start_pending, end_pending;
+ bool first_remove = false;
+ struct bdc_req *first_req;
+ struct bdc_bd *bd_start;
+ struct bd_table *table;
+ dma_addr_t next_bd_dma;
+ u64 deq_ptr_64 = 0;
+ struct bdc *bdc;
+ u32 tmp_32;
+ int ret;
+
+ bdc = ep->bdc;
+ start_pending = end_pending = false;
+ eqp_bdi = ep->bd_list.eqp_bdi - 1;
+
+ if (eqp_bdi < 0)
+ eqp_bdi = ep->bd_list.max_bdi;
+
+ start_bdi = req->bd_xfr.start_bdi;
+ end_bdi = find_end_bdi(ep, req->bd_xfr.next_hwd_bdi);
+
+ dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
+ __func__, ep->name, start_bdi, end_bdi);
+ dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
+ ep, (void *)ep->usb_ep.desc);
+ /* Stop the ep to see where the HW is ? */
+ ret = bdc_stop_ep(bdc, ep->ep_num);
+ /* if there is an issue with stopping ep, then no need to go further */
+ if (ret)
+ return 0;
+
+ /*
+ * After endpoint is stopped, there can be 3 cases, the request
+ * is processed, pending or in the middle of processing
+ */
+
+ /* The current hw dequeue pointer */
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
+ deq_ptr_64 = tmp_32;
+ tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1));
+ deq_ptr_64 |= ((u64)tmp_32 << 32);
+
+ /* we have the dma addr of next bd that will be fetched by hardware */
+ curr_hw_dqpi = bd_add_to_bdi(ep, deq_ptr_64);
+ if (curr_hw_dqpi < 0)
+ return curr_hw_dqpi;
+
+ /*
+ * curr_hw_dqpi points to actual dqp of HW and HW owns bd's from
+ * curr_hw_dqbdi..eqp_bdi.
+ */
+
+ /* Check if start_bdi and end_bdi are in range of HW owned BD's */
+ if (curr_hw_dqpi > eqp_bdi) {
+ /* there is a wrap from last to 0 */
+ if (start_bdi >= curr_hw_dqpi || start_bdi <= eqp_bdi) {
+ start_pending = true;
+ end_pending = true;
+ } else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
+ end_pending = true;
+ }
+ } else {
+ if (start_bdi >= curr_hw_dqpi) {
+ start_pending = true;
+ end_pending = true;
+ } else if (end_bdi >= curr_hw_dqpi) {
+ end_pending = true;
+ }
+ }
+ dev_dbg(bdc->dev,
+ "start_pending:%d end_pending:%d speed:%d\n",
+ start_pending, end_pending, bdc->gadget.speed);
+
+ /* If both start till end are processes, we cannot deq req */
+ if (!start_pending && !end_pending)
+ return -EINVAL;
+
+ /*
+ * if ep_dequeue is called after disconnect then just return
+ * success from here
+ */
+ if (bdc->gadget.speed == USB_SPEED_UNKNOWN)
+ return 0;
+ tbi = bdi_to_tbi(ep, req->bd_xfr.next_hwd_bdi);
+ table = ep->bd_list.bd_table_array[tbi];
+ next_bd_dma = table->dma +
+ sizeof(struct bdc_bd)*(req->bd_xfr.next_hwd_bdi -
+ tbi * ep->bd_list.num_bds_table);
+
+ first_req = list_first_entry(&ep->queue, struct bdc_req,
+ queue);
+
+ if (req == first_req)
+ first_remove = true;
+
+ /*
+ * Due to HW limitation we need to bypadd chain bd's and issue ep_bla,
+ * incase if start is pending this is the first request in the list
+ * then issue ep_bla instead of marking as chain bd
+ */
+ if (start_pending && !first_remove) {
+ /*
+ * Mark the start bd as Chain bd, and point the chain
+ * bd to next_bd_dma
+ */
+ bd_start = bdi_to_bd(ep, start_bdi);
+ bd_start->offset[0] = cpu_to_le32(lower_32_bits(next_bd_dma));
+ bd_start->offset[1] = cpu_to_le32(upper_32_bits(next_bd_dma));
+ bd_start->offset[2] = 0x0;
+ bd_start->offset[3] = cpu_to_le32(MARK_CHAIN_BD);
+ bdc_dbg_bd_list(bdc, ep);
+ } else if (end_pending) {
+ /*
+ * The transfer is stopped in the middle, move the
+ * HW deq pointer to next_bd_dma
+ */
+ ret = bdc_ep_bla(bdc, ep, next_bd_dma);
+ if (ret) {
+ dev_err(bdc->dev, "error in ep_bla:%d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Halt/Clear the ep based on value */
+static int ep_set_halt(struct bdc_ep *ep, u32 value)
+{
+ struct bdc *bdc;
+ int ret;
+
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
+
+ if (value) {
+ dev_dbg(bdc->dev, "Halt\n");
+ if (ep->ep_num == 1)
+ bdc->ep0_state = WAIT_FOR_SETUP;
+
+ ret = bdc_ep_set_stall(bdc, ep->ep_num);
+ if (ret)
+ dev_err(bdc->dev, "failed to %s STALL on %s\n",
+ value ? "set" : "clear", ep->name);
+ else
+ ep->flags |= BDC_EP_STALL;
+ } else {
+ /* Clear */
+ dev_dbg(bdc->dev, "Before Clear\n");
+ ret = bdc_ep_clear_stall(bdc, ep->ep_num);
+ if (ret)
+ dev_err(bdc->dev, "failed to %s STALL on %s\n",
+ value ? "set" : "clear", ep->name);
+ else
+ ep->flags &= ~BDC_EP_STALL;
+ dev_dbg(bdc->dev, "After Clear\n");
+ }
+
+ return ret;
+}
+
+/* Free all the ep */
+void bdc_free_ep(struct bdc *bdc)
+{
+ struct bdc_ep *ep;
+ u8 epnum;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ for (epnum = 1; epnum < bdc->num_eps; epnum++) {
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep)
+ continue;
+
+ if (ep->flags & BDC_EP_ENABLED)
+ ep_bd_list_free(ep, ep->bd_list.num_tabs);
+
+ /* ep0 is not in this gadget list */
+ if (epnum != 1)
+ list_del(&ep->usb_ep.ep_list);
+
+ kfree(ep);
+ }
+}
+
+/* USB2 spec, section 7.1.20 */
+static int bdc_set_test_mode(struct bdc *bdc)
+{
+ u32 usb2_pm;
+
+ usb2_pm = bdc_readl(bdc->regs, BDC_USPPM2);
+ usb2_pm &= ~BDC_PTC_MASK;
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ switch (bdc->test_mode) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ usb2_pm |= bdc->test_mode << 28;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(bdc->dev, "usb2_pm=%08x", usb2_pm);
+ bdc_writel(bdc->regs, BDC_USPPM2, usb2_pm);
+
+ return 0;
+}
+
+/*
+ * Helper function to handle Transfer status report with status as either
+ * success or short
+ */
+static void handle_xsr_succ_status(struct bdc *bdc, struct bdc_ep *ep,
+ struct bdc_sr *sreport)
+{
+ int short_bdi, start_bdi, end_bdi, max_len_bds, chain_bds;
+ struct bd_list *bd_list = &ep->bd_list;
+ int actual_length, length_short;
+ struct bd_transfer *bd_xfr;
+ struct bdc_bd *short_bd;
+ struct bdc_req *req;
+ u64 deq_ptr_64 = 0;
+ int status = 0;
+ int sr_status;
+ u32 tmp_32;
+
+ dev_dbg(bdc->dev, "%s ep:%p\n", __func__, ep);
+ bdc_dbg_srr(bdc, 0);
+ /* do not process thie sr if ignore flag is set */
+ if (ep->ignore_next_sr) {
+ ep->ignore_next_sr = false;
+ return;
+ }
+
+ if (unlikely(list_empty(&ep->queue))) {
+ dev_warn(bdc->dev, "xfr srr with no BD's queued\n");
+ return;
+ }
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+
+ bd_xfr = &req->bd_xfr;
+ sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
+
+ /*
+ * sr_status is short and this transfer has more than 1 bd then it needs
+ * special handling, this is only applicable for bulk and ctrl
+ */
+ if (sr_status == XSF_SHORT && bd_xfr->num_bds > 1) {
+ /*
+ * This is multi bd xfr, lets see which bd
+ * caused short transfer and how many bytes have been
+ * transferred so far.
+ */
+ tmp_32 = le32_to_cpu(sreport->offset[0]);
+ deq_ptr_64 = tmp_32;
+ tmp_32 = le32_to_cpu(sreport->offset[1]);
+ deq_ptr_64 |= ((u64)tmp_32 << 32);
+ short_bdi = bd_add_to_bdi(ep, deq_ptr_64);
+ if (unlikely(short_bdi < 0))
+ dev_warn(bdc->dev, "bd doesn't exist?\n");
+
+ start_bdi = bd_xfr->start_bdi;
+ /*
+ * We know the start_bdi and short_bdi, how many xfr
+ * bds in between
+ */
+ if (start_bdi <= short_bdi) {
+ max_len_bds = short_bdi - start_bdi;
+ if (max_len_bds <= bd_list->num_bds_table) {
+ if (!(bdi_to_tbi(ep, start_bdi) ==
+ bdi_to_tbi(ep, short_bdi)))
+ max_len_bds--;
+ } else {
+ chain_bds = max_len_bds/bd_list->num_bds_table;
+ max_len_bds -= chain_bds;
+ }
+ } else {
+ /* there is a wrap in the ring within a xfr */
+ chain_bds = (bd_list->max_bdi - start_bdi)/
+ bd_list->num_bds_table;
+ chain_bds += short_bdi/bd_list->num_bds_table;
+ max_len_bds = bd_list->max_bdi - start_bdi;
+ max_len_bds += short_bdi;
+ max_len_bds -= chain_bds;
+ }
+ /* max_len_bds is the number of full length bds */
+ end_bdi = find_end_bdi(ep, bd_xfr->next_hwd_bdi);
+ if (!(end_bdi == short_bdi))
+ ep->ignore_next_sr = true;
+
+ actual_length = max_len_bds * BD_MAX_BUFF_SIZE;
+ short_bd = bdi_to_bd(ep, short_bdi);
+ /* length queued */
+ length_short = le32_to_cpu(short_bd->offset[2]) & 0x1FFFFF;
+ /* actual length trensfered */
+ length_short -= SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
+ actual_length += length_short;
+ req->usb_req.actual = actual_length;
+ } else {
+ req->usb_req.actual = req->usb_req.length -
+ SR_BD_LEN(le32_to_cpu(sreport->offset[2]));
+ dev_dbg(bdc->dev,
+ "len=%d actual=%d bd_xfr->next_hwd_bdi:%d\n",
+ req->usb_req.length, req->usb_req.actual,
+ bd_xfr->next_hwd_bdi);
+ }
+
+ /* Update the dequeue pointer */
+ ep->bd_list.hwd_bdi = bd_xfr->next_hwd_bdi;
+ if (req->usb_req.actual < req->usb_req.length) {
+ dev_dbg(bdc->dev, "short xfr on %d\n", ep->ep_num);
+ if (req->usb_req.short_not_ok)
+ status = -EREMOTEIO;
+ }
+ bdc_req_complete(ep, bd_xfr->req, status);
+}
+
+/* EP0 setup related packet handlers */
+
+/*
+ * Setup packet received, just store the packet and process on next DS or SS
+ * started SR
+ */
+void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct usb_ctrlrequest *setup_pkt;
+ u32 len;
+
+ dev_dbg(bdc->dev,
+ "%s ep0_state:%s\n",
+ __func__, ep0_state_string[bdc->ep0_state]);
+ /* Store received setup packet */
+ setup_pkt = &bdc->setup_pkt;
+ memcpy(setup_pkt, &sreport->offset[0], sizeof(*setup_pkt));
+ len = le16_to_cpu(setup_pkt->wLength);
+ if (!len)
+ bdc->ep0_state = WAIT_FOR_STATUS_START;
+ else
+ bdc->ep0_state = WAIT_FOR_DATA_START;
+
+
+ dev_dbg(bdc->dev,
+ "%s exit ep0_state:%s\n",
+ __func__, ep0_state_string[bdc->ep0_state]);
+}
+
+/* Stall ep0 */
+static void ep0_stall(struct bdc *bdc)
+{
+ struct bdc_ep *ep = bdc->bdc_ep_array[1];
+ struct bdc_req *req;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ bdc->delayed_status = false;
+ ep_set_halt(ep, 1);
+
+ /* de-queue any pendig requests */
+ while (!list_empty(&ep->queue)) {
+ req = list_entry(ep->queue.next, struct bdc_req,
+ queue);
+ bdc_req_complete(ep, req, -ESHUTDOWN);
+ }
+}
+
+/* SET_ADD handlers */
+static int ep0_set_address(struct bdc *bdc, struct usb_ctrlrequest *ctrl)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ int ret = 0;
+ u32 addr;
+
+ addr = le16_to_cpu(ctrl->wValue);
+ dev_dbg(bdc->dev,
+ "%s addr:%d dev state:%d\n",
+ __func__, addr, state);
+
+ if (addr > 127)
+ return -EINVAL;
+
+ switch (state) {
+ case USB_STATE_DEFAULT:
+ case USB_STATE_ADDRESS:
+ /* Issue Address device command */
+ ret = bdc_address_device(bdc, addr);
+ if (ret)
+ return ret;
+
+ if (addr)
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_ADDRESS);
+ else
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
+
+ bdc->dev_addr = addr;
+ break;
+ default:
+ dev_warn(bdc->dev,
+ "SET Address in wrong device state %d\n",
+ state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Handler for SET/CLEAR FEATURE requests for device */
+static int ep0_handle_feature_dev(struct bdc *bdc, u16 wValue,
+ u16 wIndex, bool set)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ u32 usppms = 0;
+
+ dev_dbg(bdc->dev, "%s set:%d dev state:%d\n",
+ __func__, set, state);
+ switch (wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ dev_dbg(bdc->dev, "USB_DEVICE_REMOTE_WAKEUP\n");
+ if (set)
+ bdc->devstatus |= REMOTE_WAKE_ENABLE;
+ else
+ bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
+ break;
+
+ case USB_DEVICE_TEST_MODE:
+ dev_dbg(bdc->dev, "USB_DEVICE_TEST_MODE\n");
+ if ((wIndex & 0xFF) ||
+ (bdc->gadget.speed != USB_SPEED_HIGH) || !set)
+ return -EINVAL;
+
+ bdc->test_mode = wIndex >> 8;
+ break;
+
+ case USB_DEVICE_U1_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_U1_ENABLE\n");
+
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ if (set) {
+ /* clear previous u1t */
+ usppms &= ~BDC_U1T(BDC_U1T_MASK);
+ usppms |= BDC_U1T(U1_TIMEOUT);
+ usppms |= BDC_U1E | BDC_PORT_W1S;
+ bdc->devstatus |= (1 << USB_DEV_STAT_U1_ENABLED);
+ } else {
+ usppms &= ~BDC_U1E;
+ usppms |= BDC_PORT_W1S;
+ bdc->devstatus &= ~(1 << USB_DEV_STAT_U1_ENABLED);
+ }
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case USB_DEVICE_U2_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_U2_ENABLE\n");
+
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ if (set) {
+ usppms |= BDC_U2E;
+ usppms |= BDC_U2A;
+ bdc->devstatus |= (1 << USB_DEV_STAT_U2_ENABLED);
+ } else {
+ usppms &= ~BDC_U2E;
+ usppms &= ~BDC_U2A;
+ bdc->devstatus &= ~(1 << USB_DEV_STAT_U2_ENABLED);
+ }
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case USB_DEVICE_LTM_ENABLE:
+ dev_dbg(bdc->dev, "USB_DEVICE_LTM_ENABLE?\n");
+ if (bdc->gadget.speed != USB_SPEED_SUPER ||
+ state != USB_STATE_CONFIGURED)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(bdc->dev, "Unknown wValue:%d\n", wValue);
+ return -EOPNOTSUPP;
+ } /* USB_RECIP_DEVICE end */
+
+ return 0;
+}
+
+/* SET/CLEAR FEATURE handler */
+static int ep0_handle_feature(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt, bool set)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct bdc_ep *ep;
+ u16 wValue;
+ u16 wIndex;
+ int epnum;
+
+ wValue = le16_to_cpu(setup_pkt->wValue);
+ wIndex = le16_to_cpu(setup_pkt->wIndex);
+
+ dev_dbg(bdc->dev,
+ "%s wValue=%d wIndex=%d devstate=%08x speed=%d set=%d",
+ __func__, wValue, wIndex, state,
+ bdc->gadget.speed, set);
+
+ switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ return ep0_handle_feature_dev(bdc, wValue, wIndex, set);
+ case USB_RECIP_INTERFACE:
+ dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
+ /* USB3 spec, sec 9.4.9 */
+ if (wValue != USB_INTRF_FUNC_SUSPEND)
+ return -EINVAL;
+ /* USB3 spec, Table 9-8 */
+ if (set) {
+ if (wIndex & USB_INTRF_FUNC_SUSPEND_RW) {
+ dev_dbg(bdc->dev, "SET REMOTE_WAKEUP\n");
+ bdc->devstatus |= REMOTE_WAKE_ENABLE;
+ } else {
+ dev_dbg(bdc->dev, "CLEAR REMOTE_WAKEUP\n");
+ bdc->devstatus &= ~REMOTE_WAKE_ENABLE;
+ }
+ }
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
+ if (wValue != USB_ENDPOINT_HALT)
+ return -EINVAL;
+
+ epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum) {
+ if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum = epnum * 2 + 1;
+ else
+ epnum *= 2;
+ } else {
+ epnum = 1; /*EP0*/
+ }
+ /*
+ * If CLEAR_FEATURE on ep0 then don't do anything as the stall
+ * condition on ep0 has already been cleared when SETUP packet
+ * was received.
+ */
+ if (epnum == 1 && !set) {
+ dev_dbg(bdc->dev, "ep0 stall already cleared\n");
+ return 0;
+ }
+ dev_dbg(bdc->dev, "epnum=%d\n", epnum);
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep)
+ return -EINVAL;
+
+ return ep_set_halt(ep, set);
+ default:
+ dev_err(bdc->dev, "Unknown recipient\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* GET_STATUS request handler */
+static int ep0_handle_status(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct bdc_ep *ep;
+ u16 usb_status = 0;
+ u32 epnum;
+ u16 wIndex;
+
+ /* USB2.0 spec sec 9.4.5 */
+ if (state == USB_STATE_DEFAULT)
+ return -EINVAL;
+ wIndex = le16_to_cpu(setup_pkt->wIndex);
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ usb_status = bdc->devstatus;
+ switch (setup_pkt->bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ dev_dbg(bdc->dev,
+ "USB_RECIP_DEVICE devstatus:%08x\n",
+ bdc->devstatus);
+ /* USB3 spec, sec 9.4.5 */
+ if (bdc->gadget.speed == USB_SPEED_SUPER)
+ usb_status &= ~REMOTE_WAKE_ENABLE;
+ break;
+
+ case USB_RECIP_INTERFACE:
+ dev_dbg(bdc->dev, "USB_RECIP_INTERFACE\n");
+ if (bdc->gadget.speed == USB_SPEED_SUPER) {
+ /*
+ * This should come from func for Func remote wkup
+ * usb_status |=1;
+ */
+ if (bdc->devstatus & REMOTE_WAKE_ENABLE)
+ usb_status |= REMOTE_WAKE_ENABLE;
+ } else {
+ usb_status = 0;
+ }
+
+ break;
+
+ case USB_RECIP_ENDPOINT:
+ dev_dbg(bdc->dev, "USB_RECIP_ENDPOINT\n");
+ epnum = wIndex & USB_ENDPOINT_NUMBER_MASK;
+ if (epnum) {
+ if ((wIndex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
+ epnum = epnum*2 + 1;
+ else
+ epnum *= 2;
+ } else {
+ epnum = 1; /* EP0 */
+ }
+
+ ep = bdc->bdc_ep_array[epnum];
+ if (!ep) {
+ dev_err(bdc->dev, "ISSUE, GET_STATUS for invalid EP ?");
+ return -EINVAL;
+ }
+ if (ep->flags & BDC_EP_STALL)
+ usb_status |= 1 << USB_ENDPOINT_HALT;
+
+ break;
+ default:
+ dev_err(bdc->dev, "Unknown recipient for get_status\n");
+ return -EINVAL;
+ }
+ /* prepare a data stage for GET_STATUS */
+ dev_dbg(bdc->dev, "usb_status=%08x\n", usb_status);
+ *(__le16 *)bdc->ep0_response_buff = cpu_to_le16(usb_status);
+ bdc->ep0_req.usb_req.length = 2;
+ bdc->ep0_req.usb_req.buf = &bdc->ep0_response_buff;
+ ep0_queue_data_stage(bdc);
+
+ return 0;
+}
+
+static void ep0_set_sel_cmpl(struct usb_ep *_ep, struct usb_request *_req)
+{
+ /* ep0_set_sel_cmpl */
+}
+
+/* Queue data stage to handle 6 byte SET_SEL request */
+static int ep0_set_sel(struct bdc *bdc,
+ struct usb_ctrlrequest *setup_pkt)
+{
+ struct bdc_ep *ep;
+ u16 wLength;
+ u16 wValue;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ wValue = le16_to_cpu(setup_pkt->wValue);
+ wLength = le16_to_cpu(setup_pkt->wLength);
+ if (unlikely(wLength != 6)) {
+ dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
+ return -EINVAL;
+ }
+ ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.ep = ep;
+ bdc->ep0_req.usb_req.length = 6;
+ bdc->ep0_req.usb_req.buf = bdc->ep0_response_buff;
+ bdc->ep0_req.usb_req.complete = ep0_set_sel_cmpl;
+ ep0_queue_data_stage(bdc);
+
+ return 0;
+}
+
+/*
+ * Queue a 0 byte bd only if wLength is more than the length and and length is
+ * a multiple of MaxPacket then queue 0 byte BD
+ */
+static int ep0_queue_zlp(struct bdc *bdc)
+{
+ int ret;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ bdc->ep0_req.ep = bdc->bdc_ep_array[1];
+ bdc->ep0_req.usb_req.length = 0;
+ bdc->ep0_req.usb_req.complete = NULL;
+ bdc->ep0_state = WAIT_FOR_DATA_START;
+ ret = bdc_queue_xfr(bdc, &bdc->ep0_req);
+ if (ret) {
+ dev_err(bdc->dev, "err queueing zlp :%d\n", ret);
+ return ret;
+ }
+ bdc->ep0_state = WAIT_FOR_DATA_XMIT;
+
+ return 0;
+}
+
+/* Control request handler */
+static int handle_control_request(struct bdc *bdc)
+{
+ enum usb_device_state state = bdc->gadget.state;
+ struct usb_ctrlrequest *setup_pkt;
+ int delegate_setup = 0;
+ int ret = 0;
+ int config = 0;
+
+ setup_pkt = &bdc->setup_pkt;
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ if ((setup_pkt->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+ switch (setup_pkt->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ dev_dbg(bdc->dev, "USB_REQ_SET_ADDRESS\n");
+ ret = ep0_set_address(bdc, setup_pkt);
+ bdc->devstatus &= DEVSTATUS_CLEAR;
+ break;
+
+ case USB_REQ_SET_CONFIGURATION:
+ dev_dbg(bdc->dev, "USB_REQ_SET_CONFIGURATION\n");
+ if (state == USB_STATE_ADDRESS) {
+ usb_gadget_set_state(&bdc->gadget,
+ USB_STATE_CONFIGURED);
+ } else if (state == USB_STATE_CONFIGURED) {
+ /*
+ * USB2 spec sec 9.4.7, if wValue is 0 then dev
+ * is moved to addressed state
+ */
+ config = le16_to_cpu(setup_pkt->wValue);
+ if (!config)
+ usb_gadget_set_state(
+ &bdc->gadget,
+ USB_STATE_ADDRESS);
+ }
+ delegate_setup = 1;
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(bdc->dev, "USB_REQ_SET_FEATURE\n");
+ ret = ep0_handle_feature(bdc, setup_pkt, 1);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ dev_dbg(bdc->dev, "USB_REQ_CLEAR_FEATURE\n");
+ ret = ep0_handle_feature(bdc, setup_pkt, 0);
+ break;
+
+ case USB_REQ_GET_STATUS:
+ dev_dbg(bdc->dev, "USB_REQ_GET_STATUS\n");
+ ret = ep0_handle_status(bdc, setup_pkt);
+ break;
+
+ case USB_REQ_SET_SEL:
+ dev_dbg(bdc->dev, "USB_REQ_SET_SEL\n");
+ ret = ep0_set_sel(bdc, setup_pkt);
+ break;
+
+ case USB_REQ_SET_ISOCH_DELAY:
+ dev_warn(bdc->dev,
+ "USB_REQ_SET_ISOCH_DELAY not handled\n");
+ ret = 0;
+ break;
+ default:
+ delegate_setup = 1;
+ }
+ } else {
+ delegate_setup = 1;
+ }
+
+ if (delegate_setup) {
+ spin_unlock(&bdc->lock);
+ ret = bdc->gadget_driver->setup(&bdc->gadget, setup_pkt);
+ spin_lock(&bdc->lock);
+ }
+
+ return ret;
+}
+
+/* EP0: Data stage started */
+void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct bdc_ep *ep;
+ int ret = 0;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ ep = bdc->bdc_ep_array[1];
+ /* If ep0 was stalled, the clear it first */
+ if (ep->flags & BDC_EP_STALL) {
+ ret = ep_set_halt(ep, 0);
+ if (ret)
+ goto err;
+ }
+ if (bdc->ep0_state != WAIT_FOR_DATA_START)
+ dev_warn(bdc->dev,
+ "Data stage not expected ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ ret = handle_control_request(bdc);
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ /*
+ * The ep0 state will remain WAIT_FOR_DATA_START till
+ * we received ep_queue on ep0
+ */
+ bdc->delayed_status = true;
+ return;
+ }
+ if (!ret) {
+ bdc->ep0_state = WAIT_FOR_DATA_XMIT;
+ dev_dbg(bdc->dev,
+ "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
+ return;
+ }
+err:
+ ep0_stall(bdc);
+}
+
+/* EP0: status stage started */
+void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct usb_ctrlrequest *setup_pkt;
+ struct bdc_ep *ep;
+ int ret = 0;
+
+ dev_dbg(bdc->dev,
+ "%s ep0_state:%s",
+ __func__, ep0_state_string[bdc->ep0_state]);
+ ep = bdc->bdc_ep_array[1];
+
+ /* check if ZLP was queued? */
+ if (bdc->zlp_needed)
+ bdc->zlp_needed = false;
+
+ if (ep->flags & BDC_EP_STALL) {
+ ret = ep_set_halt(ep, 0);
+ if (ret)
+ goto err;
+ }
+
+ if ((bdc->ep0_state != WAIT_FOR_STATUS_START) &&
+ (bdc->ep0_state != WAIT_FOR_DATA_XMIT))
+ dev_err(bdc->dev,
+ "Status stage recv but ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ /* check if data stage is in progress ? */
+ if (bdc->ep0_state == WAIT_FOR_DATA_XMIT) {
+ bdc->ep0_state = STATUS_PENDING;
+ /* Status stage will be queued upon Data stage transmit event */
+ dev_dbg(bdc->dev,
+ "status started but data not transmitted yet\n");
+ return;
+ }
+ setup_pkt = &bdc->setup_pkt;
+
+ /*
+ * 2 stage setup then only process the setup, for 3 stage setup the date
+ * stage is already handled
+ */
+ if (!le16_to_cpu(setup_pkt->wLength)) {
+ ret = handle_control_request(bdc);
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ bdc->delayed_status = true;
+ /* ep0_state will remain WAIT_FOR_STATUS_START */
+ return;
+ }
+ }
+ if (!ret) {
+ /* Queue a status stage BD */
+ ep0_queue_status_stage(bdc);
+ bdc->ep0_state = WAIT_FOR_STATUS_XMIT;
+ dev_dbg(bdc->dev,
+ "ep0_state:%s", ep0_state_string[bdc->ep0_state]);
+ return;
+ }
+err:
+ ep0_stall(bdc);
+}
+
+/* Helper function to update ep0 upon SR with xsf_succ or xsf_short */
+static void ep0_xsf_complete(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ switch (bdc->ep0_state) {
+ case WAIT_FOR_DATA_XMIT:
+ bdc->ep0_state = WAIT_FOR_STATUS_START;
+ break;
+ case WAIT_FOR_STATUS_XMIT:
+ bdc->ep0_state = WAIT_FOR_SETUP;
+ if (bdc->test_mode) {
+ int ret;
+
+ dev_dbg(bdc->dev, "test_mode:%d\n", bdc->test_mode);
+ ret = bdc_set_test_mode(bdc);
+ if (ret < 0) {
+ dev_err(bdc->dev, "Err in setting Test mode\n");
+ return;
+ }
+ bdc->test_mode = 0;
+ }
+ break;
+ case STATUS_PENDING:
+ bdc_xsf_ep0_status_start(bdc, sreport);
+ break;
+
+ default:
+ dev_err(bdc->dev,
+ "Unknown ep0_state:%s\n",
+ ep0_state_string[bdc->ep0_state]);
+
+ }
+}
+
+/* xfr completion status report handler */
+void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ struct bdc_ep *ep;
+ u32 sr_status;
+ u8 ep_num;
+
+ ep_num = (le32_to_cpu(sreport->offset[3])>>4) & 0x1f;
+ ep = bdc->bdc_ep_array[ep_num];
+ if (!ep || !(ep->flags & BDC_EP_ENABLED)) {
+ dev_err(bdc->dev, "xsf for ep not enabled\n");
+ return;
+ }
+ /*
+ * check if this transfer is after link went from U3->U0 due
+ * to remote wakeup
+ */
+ if (bdc->devstatus & FUNC_WAKE_ISSUED) {
+ bdc->devstatus &= ~(FUNC_WAKE_ISSUED);
+ dev_dbg(bdc->dev, "%s clearing FUNC_WAKE_ISSUED flag\n",
+ __func__);
+ }
+ sr_status = XSF_STS(le32_to_cpu(sreport->offset[3]));
+ dev_dbg_ratelimited(bdc->dev, "%s sr_status=%d ep:%s\n",
+ __func__, sr_status, ep->name);
+
+ switch (sr_status) {
+ case XSF_SUCC:
+ case XSF_SHORT:
+ handle_xsr_succ_status(bdc, ep, sreport);
+ if (ep_num == 1)
+ ep0_xsf_complete(bdc, sreport);
+ break;
+
+ case XSF_SETUP_RECV:
+ case XSF_DATA_START:
+ case XSF_STATUS_START:
+ if (ep_num != 1) {
+ dev_err(bdc->dev,
+ "ep0 related packets on non ep0 endpoint");
+ return;
+ }
+ bdc->sr_xsf_ep0[sr_status - XSF_SETUP_RECV](bdc, sreport);
+ break;
+
+ case XSF_BABB:
+ if (ep_num == 1) {
+ dev_dbg(bdc->dev, "Babble on ep0 zlp_need:%d\n",
+ bdc->zlp_needed);
+ /*
+ * If the last completed transfer had wLength >Data Len,
+ * and Len is multiple of MaxPacket,then queue ZLP
+ */
+ if (bdc->zlp_needed) {
+ /* queue 0 length bd */
+ ep0_queue_zlp(bdc);
+ return;
+ }
+ }
+ dev_warn(bdc->dev, "Babble on ep not handled\n");
+ break;
+ default:
+ dev_warn(bdc->dev, "sr status not handled:%x\n", sr_status);
+ break;
+ }
+}
+
+static int bdc_gadget_ep_queue(struct usb_ep *_ep,
+ struct usb_request *_req, gfp_t gfp_flags)
+{
+ struct bdc_req *req;
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !_ep->desc)
+ return -ESHUTDOWN;
+
+ if (!_req || !_req->complete || !_req->buf)
+ return -EINVAL;
+
+ ep = to_bdc_ep(_ep);
+ req = to_bdc_req(_req);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%p req:%p\n", __func__, ep, req);
+ dev_dbg(bdc->dev, "queuing request %p to %s length %d zero:%d\n",
+ _req, ep->name, _req->length, _req->zero);
+
+ if (!ep->usb_ep.desc) {
+ dev_warn(bdc->dev,
+ "trying to queue req %p to disabled %s\n",
+ _req, ep->name);
+ return -ESHUTDOWN;
+ }
+
+ if (_req->length > MAX_XFR_LEN) {
+ dev_warn(bdc->dev,
+ "req length > supported MAX:%d requested:%d\n",
+ MAX_XFR_LEN, _req->length);
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (ep == bdc->bdc_ep_array[1])
+ ret = ep0_queue(ep, req);
+ else
+ ret = ep_queue(ep, req);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_dequeue(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct bdc_req *req;
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ ep = to_bdc_ep(_ep);
+ req = to_bdc_req(_req);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
+ bdc_dbg_bd_list(bdc, ep);
+ spin_lock_irqsave(&bdc->lock, flags);
+ /* make sure it's still queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->usb_req == _req)
+ break;
+ }
+ if (&req->usb_req != _req) {
+ spin_unlock_irqrestore(&bdc->lock, flags);
+ dev_err(bdc->dev, "usb_req !=req n");
+ return -EINVAL;
+ }
+ ret = ep_dequeue(ep, req);
+ if (ret) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ bdc_req_complete(ep, req, -ECONNRESET);
+
+err:
+ bdc_dbg_bd_list(bdc, ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_set_halt(struct usb_ep *_ep, int value)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+ dev_dbg(bdc->dev, "%s ep:%s value=%d\n", __func__, ep->name, value);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (usb_endpoint_xfer_isoc(ep->usb_ep.desc))
+ ret = -EINVAL;
+ else if (!list_empty(&ep->queue))
+ ret = -EAGAIN;
+ else
+ ret = ep_set_halt(ep, value);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static struct usb_request *bdc_gadget_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct bdc_req *req;
+ struct bdc_ep *ep;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ ep = to_bdc_ep(_ep);
+ req->ep = ep;
+ req->epnum = ep->ep_num;
+ req->usb_req.dma = DMA_ADDR_INVALID;
+ dev_dbg(ep->bdc->dev, "%s ep:%s req:%p\n", __func__, ep->name, req);
+
+ return &req->usb_req;
+}
+
+static void bdc_gadget_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct bdc_req *req;
+
+ req = to_bdc_req(_req);
+ kfree(req);
+}
+
+/* endpoint operations */
+
+/* configure endpoint and also allocate resources */
+static int bdc_gadget_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
+ pr_debug("bdc_gadget_ep_enable invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (!desc->wMaxPacketSize) {
+ pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
+ return -EINVAL;
+ }
+
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+
+ /* Sanity check, upper layer will not send enable for ep0 */
+ if (ep == bdc->bdc_ep_array[1])
+ return -EINVAL;
+
+ if (!bdc->gadget_driver
+ || bdc->gadget.speed == USB_SPEED_UNKNOWN) {
+ return -ESHUTDOWN;
+ }
+
+ dev_dbg(bdc->dev, "%s Enabling %s\n", __func__, ep->name);
+ spin_lock_irqsave(&bdc->lock, flags);
+ ep->desc = desc;
+ ep->comp_desc = _ep->comp_desc;
+ ret = bdc_ep_enable(ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_gadget_ep_disable(struct usb_ep *_ep)
+{
+ unsigned long flags;
+ struct bdc_ep *ep;
+ struct bdc *bdc;
+ int ret;
+
+ if (!_ep) {
+ pr_debug("bdc: invalid parameters\n");
+ return -EINVAL;
+ }
+ ep = to_bdc_ep(_ep);
+ bdc = ep->bdc;
+
+ /* Upper layer will not call this for ep0, but do a sanity check */
+ if (ep == bdc->bdc_ep_array[1]) {
+ dev_warn(bdc->dev, "%s called for ep0\n", __func__);
+ return -EINVAL;
+ }
+ dev_dbg(bdc->dev,
+ "%s() ep:%s ep->flags:%08x\n",
+ __func__, ep->name, ep->flags);
+
+ if (!(ep->flags & BDC_EP_ENABLED)) {
+ dev_warn(bdc->dev, "%s is already disabled\n", ep->name);
+ return 0;
+ }
+ spin_lock_irqsave(&bdc->lock, flags);
+ ret = bdc_ep_disable(ep);
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_ep_ops bdc_gadget_ep_ops = {
+ .enable = bdc_gadget_ep_enable,
+ .disable = bdc_gadget_ep_disable,
+ .alloc_request = bdc_gadget_alloc_request,
+ .free_request = bdc_gadget_free_request,
+ .queue = bdc_gadget_ep_queue,
+ .dequeue = bdc_gadget_ep_dequeue,
+ .set_halt = bdc_gadget_ep_set_halt
+};
+
+/* dir = 1 is IN */
+static int init_ep(struct bdc *bdc, u32 epnum, u32 dir)
+{
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s epnum=%d dir=%d\n", __func__, epnum, dir);
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->bdc = bdc;
+ ep->dir = dir;
+
+ /* ep->ep_num is the index inside bdc_ep */
+ if (epnum == 1) {
+ ep->ep_num = 1;
+ bdc->bdc_ep_array[ep->ep_num] = ep;
+ snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
+ ep->comp_desc = NULL;
+ bdc->gadget.ep0 = &ep->usb_ep;
+ } else {
+ if (dir)
+ ep->ep_num = epnum * 2 - 1;
+ else
+ ep->ep_num = epnum * 2 - 2;
+
+ bdc->bdc_ep_array[ep->ep_num] = ep;
+ snprintf(ep->name, sizeof(ep->name), "ep%d%s", epnum - 1,
+ dir & 1 ? "in" : "out");
+
+ usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+ ep->usb_ep.max_streams = 0;
+ list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
+ }
+ ep->usb_ep.ops = &bdc_gadget_ep_ops;
+ ep->usb_ep.name = ep->name;
+ ep->flags = 0;
+ ep->ignore_next_sr = false;
+ dev_dbg(bdc->dev, "ep=%p ep->usb_ep.name=%s epnum=%d ep->epnum=%d\n",
+ ep, ep->usb_ep.name, epnum, ep->ep_num);
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ return 0;
+}
+
+/* Init all ep */
+int bdc_init_ep(struct bdc *bdc)
+{
+ u8 epnum;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ INIT_LIST_HEAD(&bdc->gadget.ep_list);
+ /* init ep0 */
+ ret = init_ep(bdc, 1, 0);
+ if (ret) {
+ dev_err(bdc->dev, "init ep ep0 fail %d\n", ret);
+ return ret;
+ }
+
+ for (epnum = 2; epnum <= bdc->num_eps / 2; epnum++) {
+ /* OUT */
+ ret = init_ep(bdc, epnum, 0);
+ if (ret) {
+ dev_err(bdc->dev,
+ "init ep failed for:%d error: %d\n",
+ epnum, ret);
+ return ret;
+ }
+
+ /* IN */
+ ret = init_ep(bdc, epnum, 1);
+ if (ret) {
+ dev_err(bdc->dev,
+ "init ep failed for:%d error: %d\n",
+ epnum, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h
new file mode 100644
index 000000000000..8a6b36cbf2ea
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h
@@ -0,0 +1,22 @@
+/*
+ * bdc_ep.h - header for the BDC debug functions
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifndef __LINUX_BDC_EP_H__
+#define __LINUX_BDC_EP_H__
+
+int bdc_init_ep(struct bdc *);
+int bdc_ep_disable(struct bdc_ep *);
+int bdc_ep_enable(struct bdc_ep *);
+void bdc_free_ep(struct bdc *);
+
+#endif /* __LINUX_BDC_EP_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
new file mode 100644
index 000000000000..02968842b359
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -0,0 +1,132 @@
+/*
+ * bdc_pci.c - BRCM BDC USB3.0 device controller PCI interface file.
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/platform_device.h>
+
+#include "bdc.h"
+
+#define BDC_PCI_PID 0x1570
+
+struct bdc_pci {
+ struct device *dev;
+ struct platform_device *bdc;
+};
+
+static int bdc_setup_msi(struct pci_dev *pci)
+{
+ int ret;
+
+ ret = pci_enable_msi(pci);
+ if (ret) {
+ pr_err("failed to allocate MSI entry\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct resource res[2];
+ struct platform_device *bdc;
+ struct bdc_pci *glue;
+ int ret = -ENOMEM;
+
+ glue = devm_kzalloc(&pci->dev, sizeof(*glue), GFP_KERNEL);
+ if (!glue)
+ return -ENOMEM;
+
+ glue->dev = &pci->dev;
+ ret = pci_enable_device(pci);
+ if (ret) {
+ dev_err(&pci->dev, "failed to enable pci device\n");
+ return -ENODEV;
+ }
+ pci_set_master(pci);
+
+ bdc = platform_device_alloc(BRCM_BDC_NAME, PLATFORM_DEVID_AUTO);
+ if (!bdc)
+ return -ENOMEM;
+
+ memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
+ bdc_setup_msi(pci);
+
+ res[0].start = pci_resource_start(pci, 0);
+ res[0].end = pci_resource_end(pci, 0);
+ res[0].name = BRCM_BDC_NAME;
+ res[0].flags = IORESOURCE_MEM;
+
+ res[1].start = pci->irq;
+ res[1].name = BRCM_BDC_NAME;
+ res[1].flags = IORESOURCE_IRQ;
+
+ ret = platform_device_add_resources(bdc, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(&pci->dev,
+ "couldn't add resources to bdc device\n");
+ return ret;
+ }
+
+ pci_set_drvdata(pci, glue);
+
+ dma_set_coherent_mask(&bdc->dev, pci->dev.coherent_dma_mask);
+
+ bdc->dev.dma_mask = pci->dev.dma_mask;
+ bdc->dev.dma_parms = pci->dev.dma_parms;
+ bdc->dev.parent = &pci->dev;
+ glue->bdc = bdc;
+
+ ret = platform_device_add(bdc);
+ if (ret) {
+ dev_err(&pci->dev, "failed to register bdc device\n");
+ platform_device_put(bdc);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void bdc_pci_remove(struct pci_dev *pci)
+{
+ struct bdc_pci *glue = pci_get_drvdata(pci);
+
+ platform_device_unregister(glue->bdc);
+ pci_disable_msi(pci);
+}
+
+static struct pci_device_id bdc_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BDC_PCI_PID), },
+ {} /* Terminating Entry */
+};
+
+MODULE_DEVICE_TABLE(pci, bdc_pci_id_table);
+
+static struct pci_driver bdc_pci_driver = {
+ .name = "bdc-pci",
+ .id_table = bdc_pci_id_table,
+ .probe = bdc_pci_probe,
+ .remove = bdc_pci_remove,
+};
+
+MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("BRCM BDC USB3 PCI Glue layer");
+module_pci_driver(bdc_pci_driver);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
new file mode 100644
index 000000000000..3700ce70b0be
--- /dev/null
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -0,0 +1,587 @@
+/*
+ * bdc_udc.c - BRCM BDC USB3.0 device controller gagdet ops
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * Author: Ashwini Pahuja
+ *
+ * Based on drivers under drivers/usb/gadget/udc/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
+#include <linux/pm.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <asm/unaligned.h>
+#include <linux/platform_device.h>
+
+#include "bdc.h"
+#include "bdc_ep.h"
+#include "bdc_cmd.h"
+#include "bdc_dbg.h"
+
+static const struct usb_gadget_ops bdc_gadget_ops;
+
+static const char * const conn_speed_str[] = {
+ "Not connected",
+ "Full Speed",
+ "Low Speed",
+ "High Speed",
+ "Super Speed",
+};
+
+/* EP0 initial descripror */
+static struct usb_endpoint_descriptor bdc_gadget_ep0_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
+ .bEndpointAddress = 0,
+ .wMaxPacketSize = cpu_to_le16(EP0_MAX_PKT_SIZE),
+};
+
+/* Advance the srr dqp maintained by SW */
+static void srr_dqp_index_advc(struct bdc *bdc, u32 srr_num)
+{
+ struct srr *srr;
+
+ srr = &bdc->srr;
+ dev_dbg_ratelimited(bdc->dev, "srr->dqp_index:%d\n", srr->dqp_index);
+ srr->dqp_index++;
+ /* rollback to 0 if we are past the last */
+ if (srr->dqp_index == NUM_SR_ENTRIES)
+ srr->dqp_index = 0;
+}
+
+/* connect sr */
+static void bdc_uspc_connected(struct bdc *bdc)
+{
+ u32 speed, temp;
+ u32 usppms;
+ int ret;
+
+ temp = bdc_readl(bdc->regs, BDC_USPC);
+ speed = BDC_PSP(temp);
+ dev_dbg(bdc->dev, "%s speed=%x\n", __func__, speed);
+ switch (speed) {
+ case BDC_SPEED_SS:
+ bdc_gadget_ep0_desc.wMaxPacketSize =
+ cpu_to_le16(EP0_MAX_PKT_SIZE);
+ bdc->gadget.ep0->maxpacket = EP0_MAX_PKT_SIZE;
+ bdc->gadget.speed = USB_SPEED_SUPER;
+ /* Enable U1T in SS mode */
+ usppms = bdc_readl(bdc->regs, BDC_USPPMS);
+ usppms &= ~BDC_U1T(0xff);
+ usppms |= BDC_U1T(U1_TIMEOUT);
+ usppms |= BDC_PORT_W1S;
+ bdc_writel(bdc->regs, BDC_USPPMS, usppms);
+ break;
+
+ case BDC_SPEED_HS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ bdc->gadget.ep0->maxpacket = 64;
+ bdc->gadget.speed = USB_SPEED_HIGH;
+ break;
+
+ case BDC_SPEED_FS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+ bdc->gadget.ep0->maxpacket = 64;
+ bdc->gadget.speed = USB_SPEED_FULL;
+ break;
+
+ case BDC_SPEED_LS:
+ bdc_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
+ bdc->gadget.ep0->maxpacket = 8;
+ bdc->gadget.speed = USB_SPEED_LOW;
+ break;
+ default:
+ dev_err(bdc->dev, "UNDEFINED SPEED\n");
+ return;
+ }
+ dev_dbg(bdc->dev, "connected at %s\n", conn_speed_str[speed]);
+ /* Now we know the speed, configure ep0 */
+ bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
+ ret = bdc_config_ep(bdc, bdc->bdc_ep_array[1]);
+ if (ret)
+ dev_err(bdc->dev, "EP0 config failed\n");
+ bdc->bdc_ep_array[1]->usb_ep.desc = &bdc_gadget_ep0_desc;
+ bdc->bdc_ep_array[1]->flags |= BDC_EP_ENABLED;
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_DEFAULT);
+}
+
+/* device got disconnected */
+static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit)
+{
+ struct bdc_ep *ep;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ /*
+ * Only stop ep0 from here, rest of the endpoints will be disabled
+ * from gadget_disconnect
+ */
+ ep = bdc->bdc_ep_array[1];
+ if (ep && (ep->flags & BDC_EP_ENABLED))
+ /* if enabled then stop and remove requests */
+ bdc_ep_disable(ep);
+
+ if (bdc->gadget_driver && bdc->gadget_driver->disconnect) {
+ spin_unlock(&bdc->lock);
+ bdc->gadget_driver->disconnect(&bdc->gadget);
+ spin_lock(&bdc->lock);
+ }
+ /* Set Unknown speed */
+ bdc->gadget.speed = USB_SPEED_UNKNOWN;
+ bdc->devstatus &= DEVSTATUS_CLEAR;
+ bdc->delayed_status = false;
+ bdc->reinit = reinit;
+ bdc->test_mode = false;
+}
+
+/* TNotify wkaeup timer */
+static void bdc_func_wake_timer(struct work_struct *work)
+{
+ struct bdc *bdc = container_of(work, struct bdc, func_wake_notify.work);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ /*
+ * Check if host has started transferring on endpoints
+ * FUNC_WAKE_ISSUED is cleared when transfer has started after resume
+ */
+ if (bdc->devstatus & FUNC_WAKE_ISSUED) {
+ dev_dbg(bdc->dev, "FUNC_WAKE_ISSUED FLAG IS STILL SET\n");
+ /* flag is still set, so again send func wake */
+ bdc_function_wake_fh(bdc, 0);
+ schedule_delayed_work(&bdc->func_wake_notify,
+ msecs_to_jiffies(BDC_TNOTIFY));
+ }
+ spin_unlock_irqrestore(&bdc->lock, flags);
+}
+
+/* handler for Link state change condition */
+static void handle_link_state_change(struct bdc *bdc, u32 uspc)
+{
+ u32 link_state;
+
+ dev_dbg(bdc->dev, "Link state change");
+ link_state = BDC_PST(uspc);
+ switch (link_state) {
+ case BDC_LINK_STATE_U3:
+ if ((bdc->gadget.speed != USB_SPEED_UNKNOWN) &&
+ bdc->gadget_driver->suspend) {
+ dev_dbg(bdc->dev, "Entered Suspend mode\n");
+ spin_unlock(&bdc->lock);
+ bdc->devstatus |= DEVICE_SUSPENDED;
+ bdc->gadget_driver->suspend(&bdc->gadget);
+ spin_lock(&bdc->lock);
+ }
+ break;
+ case BDC_LINK_STATE_U0:
+ if (bdc->devstatus & REMOTE_WAKEUP_ISSUED) {
+ bdc->devstatus &= ~REMOTE_WAKEUP_ISSUED;
+ if (bdc->gadget.speed == USB_SPEED_SUPER) {
+ bdc_function_wake_fh(bdc, 0);
+ bdc->devstatus |= FUNC_WAKE_ISSUED;
+ /*
+ * Start a Notification timer and check if the
+ * Host transferred anything on any of the EPs,
+ * if not then send function wake again every
+ * TNotification secs until host initiates
+ * transfer to BDC, USB3 spec Table 8.13
+ */
+ schedule_delayed_work(
+ &bdc->func_wake_notify,
+ msecs_to_jiffies(BDC_TNOTIFY));
+ dev_dbg(bdc->dev, "sched func_wake_notify\n");
+ }
+ }
+ break;
+
+ case BDC_LINK_STATE_RESUME:
+ dev_dbg(bdc->dev, "Resumed from Suspend\n");
+ if (bdc->devstatus & DEVICE_SUSPENDED) {
+ bdc->gadget_driver->resume(&bdc->gadget);
+ bdc->devstatus &= ~DEVICE_SUSPENDED;
+ }
+ break;
+ default:
+ dev_dbg(bdc->dev, "link state:%d\n", link_state);
+ }
+}
+
+/* something changes on upstream port, handle it here */
+void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport)
+{
+ u32 clear_flags = 0;
+ u32 uspc;
+ bool connected = false;
+ bool disconn = false;
+
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ dev_dbg(bdc->dev, "%s uspc=0x%08x\n", __func__, uspc);
+
+ /* Port connect changed */
+ if (uspc & BDC_PCC) {
+ /* Vbus not present, and not connected to Downstream port */
+ if ((uspc & BDC_VBC) && !(uspc & BDC_VBS) && !(uspc & BDC_PCS))
+ disconn = true;
+ else if ((uspc & BDC_PCS) && !BDC_PST(uspc))
+ connected = true;
+ }
+
+ /* Change in VBus and VBus is present */
+ if ((uspc & BDC_VBC) && (uspc & BDC_VBS)) {
+ if (bdc->pullup) {
+ dev_dbg(bdc->dev, "Do a softconnect\n");
+ /* Attached state, do a softconnect */
+ bdc_softconn(bdc);
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_POWERED);
+ }
+ clear_flags = BDC_VBC;
+ } else if ((uspc & BDC_PRS) || (uspc & BDC_PRC) || disconn) {
+ /* Hot reset, warm reset, 2.0 bus reset or disconn */
+ dev_dbg(bdc->dev, "Port reset or disconn\n");
+ bdc_uspc_disconnected(bdc, disconn);
+ clear_flags = BDC_PCC|BDC_PCS|BDC_PRS|BDC_PRC;
+ } else if ((uspc & BDC_PSC) && (uspc & BDC_PCS)) {
+ /* Change in Link state */
+ handle_link_state_change(bdc, uspc);
+ clear_flags = BDC_PSC|BDC_PCS;
+ }
+
+ /*
+ * In SS we might not have PRC bit set before connection, but in 2.0
+ * the PRC bit is set before connection, so moving this condition out
+ * of bus reset to handle both SS/2.0 speeds.
+ */
+ if (connected) {
+ /* This is the connect event for U0/L0 */
+ dev_dbg(bdc->dev, "Connected\n");
+ bdc_uspc_connected(bdc);
+ bdc->devstatus &= ~(DEVICE_SUSPENDED);
+ }
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ uspc &= (~BDC_USPSC_RW);
+ dev_dbg(bdc->dev, "uspc=%x\n", uspc);
+ bdc_writel(bdc->regs, BDC_USPC, clear_flags);
+}
+
+/* Main interrupt handler for bdc */
+static irqreturn_t bdc_udc_interrupt(int irq, void *_bdc)
+{
+ u32 eqp_index, dqp_index, sr_type, srr_int;
+ struct bdc_sr *sreport;
+ struct bdc *bdc = _bdc;
+ u32 status;
+ int ret;
+
+ spin_lock(&bdc->lock);
+ status = bdc_readl(bdc->regs, BDC_BDCSC);
+ if (!(status & BDC_GIP)) {
+ spin_unlock(&bdc->lock);
+ return IRQ_NONE;
+ }
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ /* Check if the SRR IP bit it set? */
+ if (!(srr_int & BDC_SRR_IP)) {
+ dev_warn(bdc->dev, "Global irq pending but SRR IP is 0\n");
+ spin_unlock(&bdc->lock);
+ return IRQ_NONE;
+ }
+ eqp_index = BDC_SRR_EPI(srr_int);
+ dqp_index = BDC_SRR_DPI(srr_int);
+ dev_dbg(bdc->dev,
+ "%s eqp_index=%d dqp_index=%d srr.dqp_index=%d\n\n",
+ __func__, eqp_index, dqp_index, bdc->srr.dqp_index);
+
+ /* check for ring empty condition */
+ if (eqp_index == dqp_index) {
+ dev_dbg(bdc->dev, "SRR empty?\n");
+ spin_unlock(&bdc->lock);
+ return IRQ_HANDLED;
+ }
+
+ while (bdc->srr.dqp_index != eqp_index) {
+ sreport = &bdc->srr.sr_bds[bdc->srr.dqp_index];
+ /* sreport is read before using it */
+ rmb();
+ sr_type = le32_to_cpu(sreport->offset[3]) & BD_TYPE_BITMASK;
+ dev_dbg_ratelimited(bdc->dev, "sr_type=%d\n", sr_type);
+ switch (sr_type) {
+ case SR_XSF:
+ bdc->sr_handler[0](bdc, sreport);
+ break;
+
+ case SR_USPC:
+ bdc->sr_handler[1](bdc, sreport);
+ break;
+ default:
+ dev_warn(bdc->dev, "SR:%d not handled\n", sr_type);
+ }
+ /* Advance the srr dqp index */
+ srr_dqp_index_advc(bdc, 0);
+ }
+ /* update the hw dequeue pointer */
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ srr_int &= ~BDC_SRR_DPI_MASK;
+ srr_int &= ~(BDC_SRR_RWS|BDC_SRR_RST|BDC_SRR_ISR);
+ srr_int |= ((bdc->srr.dqp_index) << 16);
+ srr_int |= BDC_SRR_IP;
+ bdc_writel(bdc->regs, BDC_SRRINT(0), srr_int);
+ srr_int = bdc_readl(bdc->regs, BDC_SRRINT(0));
+ if (bdc->reinit) {
+ ret = bdc_reinit(bdc);
+ if (ret)
+ dev_err(bdc->dev, "err in bdc reinit\n");
+ }
+
+ spin_unlock(&bdc->lock);
+
+ return IRQ_HANDLED;
+}
+
+/* Gadget ops */
+static int bdc_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (bdc->gadget_driver) {
+ dev_err(bdc->dev, "%s is already bound to %s\n",
+ bdc->gadget.name,
+ bdc->gadget_driver->driver.name);
+ ret = -EBUSY;
+ goto err;
+ }
+ /*
+ * Run the controller from here and when BDC is connected to
+ * Host then driver will receive a USPC SR with VBUS present
+ * and then driver will do a softconnect.
+ */
+ ret = bdc_run(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "%s bdc run fail\n", __func__);
+ goto err;
+ }
+ bdc->gadget_driver = driver;
+ bdc->gadget.dev.driver = &driver->driver;
+err:
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static int bdc_udc_stop(struct usb_gadget *gadget)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ bdc_stop(bdc);
+ bdc->gadget_driver = NULL;
+ bdc->gadget.dev.driver = NULL;
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ u32 uspc;
+
+ dev_dbg(bdc->dev, "%s() is_on:%d\n", __func__, is_on);
+ if (!gadget)
+ return -EINVAL;
+
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (!is_on) {
+ bdc_softdisconn(bdc);
+ bdc->pullup = false;
+ } else {
+ /*
+ * For a self powered device, we need to wait till we receive
+ * a VBUS change and Vbus present event, then if pullup flag
+ * is set, then only we present the Termintation.
+ */
+ bdc->pullup = true;
+ /*
+ * Check if BDC is already connected to Host i.e Vbus=1,
+ * if yes, then present TERM now, this is typical for bus
+ * powered devices.
+ */
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ if (uspc & BDC_VBS)
+ bdc_softconn(bdc);
+ }
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_set_selfpowered(struct usb_gadget *gadget,
+ int is_self)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ spin_lock_irqsave(&bdc->lock, flags);
+ if (!is_self)
+ bdc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
+ else
+ bdc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return 0;
+}
+
+static int bdc_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct bdc *bdc = gadget_to_bdc(gadget);
+ unsigned long flags;
+ u8 link_state;
+ u32 uspc;
+ int ret = 0;
+
+ dev_dbg(bdc->dev,
+ "%s() bdc->devstatus=%08x\n",
+ __func__, bdc->devstatus);
+
+ if (!(bdc->devstatus & REMOTE_WAKE_ENABLE))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(&bdc->lock, flags);
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ link_state = BDC_PST(uspc);
+ dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
+ if (link_state != BDC_LINK_STATE_U3) {
+ dev_warn(bdc->dev,
+ "can't wakeup from link state %d\n",
+ link_state);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (bdc->gadget.speed == USB_SPEED_SUPER)
+ bdc->devstatus |= REMOTE_WAKEUP_ISSUED;
+
+ uspc &= ~BDC_PST_MASK;
+ uspc &= (~BDC_USPSC_RW);
+ uspc |= BDC_PST(BDC_LINK_STATE_U0);
+ uspc |= BDC_SWS;
+ bdc_writel(bdc->regs, BDC_USPC, uspc);
+ uspc = bdc_readl(bdc->regs, BDC_USPC);
+ link_state = BDC_PST(uspc);
+ dev_dbg(bdc->dev, "link_state =%d portsc=%x", link_state, uspc);
+out:
+ spin_unlock_irqrestore(&bdc->lock, flags);
+
+ return ret;
+}
+
+static const struct usb_gadget_ops bdc_gadget_ops = {
+ .wakeup = bdc_udc_wakeup,
+ .set_selfpowered = bdc_udc_set_selfpowered,
+ .pullup = bdc_udc_pullup,
+ .udc_start = bdc_udc_start,
+ .udc_stop = bdc_udc_stop,
+};
+
+/* Init the gadget interface and register the udc */
+int bdc_udc_init(struct bdc *bdc)
+{
+ u32 temp;
+ int ret;
+
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ bdc->gadget.ops = &bdc_gadget_ops;
+ bdc->gadget.max_speed = USB_SPEED_SUPER;
+ bdc->gadget.speed = USB_SPEED_UNKNOWN;
+ bdc->gadget.dev.parent = bdc->dev;
+
+ bdc->gadget.sg_supported = false;
+
+
+ bdc->gadget.name = BRCM_BDC_NAME;
+ ret = devm_request_irq(bdc->dev, bdc->irq, bdc_udc_interrupt,
+ IRQF_SHARED , BRCM_BDC_NAME, bdc);
+ if (ret) {
+ dev_err(bdc->dev,
+ "failed to request irq #%d %d\n",
+ bdc->irq, ret);
+ return ret;
+ }
+
+ ret = bdc_init_ep(bdc);
+ if (ret) {
+ dev_err(bdc->dev, "bdc init ep fail: %d\n", ret);
+ return ret;
+ }
+
+ ret = usb_add_gadget_udc(bdc->dev, &bdc->gadget);
+ if (ret) {
+ dev_err(bdc->dev, "failed to register udc\n");
+ goto err0;
+ }
+ usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED);
+ bdc->bdc_ep_array[1]->desc = &bdc_gadget_ep0_desc;
+ /*
+ * Allocate bd list for ep0 only, ep0 will be enabled on connect
+ * status report when the speed is known
+ */
+ ret = bdc_ep_enable(bdc->bdc_ep_array[1]);
+ if (ret) {
+ dev_err(bdc->dev, "fail to enable %s\n",
+ bdc->bdc_ep_array[1]->name);
+ goto err1;
+ }
+ INIT_DELAYED_WORK(&bdc->func_wake_notify, bdc_func_wake_timer);
+ /* Enable Interrupts */
+ temp = bdc_readl(bdc->regs, BDC_BDCSC);
+ temp |= BDC_GIE;
+ bdc_writel(bdc->regs, BDC_BDCSC, temp);
+ return 0;
+err1:
+ usb_del_gadget_udc(&bdc->gadget);
+err0:
+ bdc_free_ep(bdc);
+
+ return ret;
+}
+
+void bdc_udc_exit(struct bdc *bdc)
+{
+ dev_dbg(bdc->dev, "%s()\n", __func__);
+ bdc_ep_disable(bdc->bdc_ep_array[1]);
+ usb_del_gadget_udc(&bdc->gadget);
+ bdc_free_ep(bdc);
+}