summaryrefslogtreecommitdiff
path: root/drivers/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/class/cdc-acm.c91
-rw-r--r--drivers/usb/class/cdc-acm.h3
-rw-r--r--drivers/usb/core/driver.c101
-rw-r--r--drivers/usb/core/hcd.c9
-rw-r--r--drivers/usb/core/hcd.h4
-rw-r--r--drivers/usb/core/hub.c9
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/urb.c9
-rw-r--r--drivers/usb/core/usb.c73
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/amd5536udc.c1
-rw-r--r--drivers/usb/gadget/at91_udc.c9
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/f_acm.c196
-rw-r--r--drivers/usb/gadget/f_ecm.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c2
-rw-r--r--drivers/usb/gadget/f_serial.c2
-rw-r--r--drivers/usb/gadget/f_subset.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h6
-rw-r--r--drivers/usb/gadget/lh7a40x_udc.h2
-rw-r--r--drivers/usb/gadget/omap_udc.c9
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.h2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c8
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c10
-rw-r--r--drivers/usb/gadget/u_serial.c290
-rw-r--r--drivers/usb/gadget/u_serial.h12
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-q.c2
-rw-r--r--drivers/usb/host/isp1760-hcd.c53
-rw-r--r--drivers/usb/host/isp1760-hcd.h5
-rw-r--r--drivers/usb/host/ohci-at91.c8
-rw-r--r--drivers/usb/host/ohci-au1xxx.c1
-rw-r--r--drivers/usb/host/ohci-ep93xx.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c23
-rw-r--r--drivers/usb/host/ohci-hub.c64
-rw-r--r--drivers/usb/host/ohci-lh7a404.c3
-rw-r--r--drivers/usb/host/ohci-omap.c16
-rw-r--r--drivers/usb/host/ohci-pci.c133
-rw-r--r--drivers/usb/host/ohci-pnx4008.c10
-rw-r--r--drivers/usb/host/ohci-pnx8550.c1
-rw-r--r--drivers/usb/host/ohci-ppc-of.c1
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c1
-rw-r--r--drivers/usb/host/ohci-ps3.c1
-rw-r--r--drivers/usb/host/ohci-pxa27x.c10
-rw-r--r--drivers/usb/host/ohci-q.c6
-rw-r--r--drivers/usb/host/ohci-s3c2410.c5
-rw-r--r--drivers/usb/host/ohci-sa1111.c7
-rw-r--r--drivers/usb/host/ohci-sh.c1
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-ssb.c1
-rw-r--r--drivers/usb/host/ohci.h11
-rw-r--r--drivers/usb/host/r8a66597-hcd.c49
-rw-r--r--drivers/usb/host/u132-hcd.c11
-rw-r--r--drivers/usb/misc/Kconfig10
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/auerswald.c2152
-rw-r--r--drivers/usb/misc/iowarrior.c1
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/musb/Kconfig175
-rw-r--r--drivers/usb/musb/Makefile69
-rw-r--r--drivers/usb/musb/cppi_dma.c1540
-rw-r--r--drivers/usb/musb/cppi_dma.h133
-rw-r--r--drivers/usb/musb/davinci.c462
-rw-r--r--drivers/usb/musb/davinci.h100
-rw-r--r--drivers/usb/musb/musb_core.c2253
-rw-r--r--drivers/usb/musb/musb_core.h488
-rw-r--r--drivers/usb/musb/musb_debug.h62
-rw-r--r--drivers/usb/musb/musb_dma.h172
-rw-r--r--drivers/usb/musb/musb_gadget.c2031
-rw-r--r--drivers/usb/musb/musb_gadget.h108
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c983
-rw-r--r--drivers/usb/musb/musb_host.c2170
-rw-r--r--drivers/usb/musb/musb_host.h110
-rw-r--r--drivers/usb/musb/musb_io.h115
-rw-r--r--drivers/usb/musb/musb_regs.h300
-rw-r--r--drivers/usb/musb/musb_virthub.c425
-rw-r--r--drivers/usb/musb/musbhsdma.c433
-rw-r--r--drivers/usb/musb/omap2430.c324
-rw-r--r--drivers/usb/musb/omap2430.h56
-rw-r--r--drivers/usb/musb/tusb6010.c1151
-rw-r--r--drivers/usb/musb/tusb6010.h233
-rw-r--r--drivers/usb/musb/tusb6010_omap.c719
-rw-r--r--drivers/usb/serial/Kconfig7
-rw-r--r--drivers/usb/serial/ftdi_sio.c6
-rw-r--r--drivers/usb/serial/ftdi_sio.h7
-rw-r--r--drivers/usb/serial/garmin_gps.c2
-rw-r--r--drivers/usb/serial/option.c46
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h4
-rw-r--r--drivers/usb/serial/sierra.c170
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/storage/Kconfig12
-rw-r--r--drivers/usb/storage/Makefile1
-rw-r--r--drivers/usb/storage/freecom.c2
-rw-r--r--drivers/usb/storage/sierra_ms.c207
-rw-r--r--drivers/usb/storage/sierra_ms.h4
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/unusual_devs.h40
-rw-r--r--drivers/usb/storage/usb.c3
106 files changed, 15942 insertions, 2694 deletions
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 755823cdf62a..bcefbddeba50 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -95,16 +95,18 @@ config USB
source "drivers/usb/core/Kconfig"
+source "drivers/usb/mon/Kconfig"
+
source "drivers/usb/host/Kconfig"
+source "drivers/usb/musb/Kconfig"
+
source "drivers/usb/class/Kconfig"
source "drivers/usb/storage/Kconfig"
source "drivers/usb/image/Kconfig"
-source "drivers/usb/mon/Kconfig"
-
comment "USB port drivers"
depends on USB
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 507a9bd0d77c..9aea43a8c4ad 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -602,7 +602,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
offd = le32_to_cpu(buf[offb++]);
if (offd >= size) {
if (printk_ratelimit())
- usb_err(instance->usbatm, "wrong index #%x in response to cm #%x\n",
+ usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n",
offd, cm);
ret = -EIO;
goto cleanup;
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index cb01b5106efd..b6483dd98acc 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -64,7 +64,6 @@
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/kthread.h>
-#include <linux/version.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 0725b1871f23..c257453fa9de 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -51,6 +51,7 @@
*/
#undef DEBUG
+#undef VERBOSE_DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -70,6 +71,9 @@
#include "cdc-acm.h"
+
+#define ACM_CLOSE_TIMEOUT 15 /* seconds to let writes drain */
+
/*
* Version Information
*/
@@ -85,6 +89,12 @@ static DEFINE_MUTEX(open_mutex);
#define ACM_READY(acm) (acm && acm->dev && acm->used)
+#ifdef VERBOSE_DEBUG
+#define verbose 1
+#else
+#define verbose 0
+#endif
+
/*
* Functions for ACM control messages.
*/
@@ -136,19 +146,17 @@ static int acm_wb_alloc(struct acm *acm)
static int acm_wb_is_avail(struct acm *acm)
{
int i, n;
+ unsigned long flags;
n = ACM_NW;
+ spin_lock_irqsave(&acm->write_lock, flags);
for (i = 0; i < ACM_NW; i++) {
n -= acm->wb[i].use;
}
+ spin_unlock_irqrestore(&acm->write_lock, flags);
return n;
}
-static inline int acm_wb_is_used(struct acm *acm, int wbn)
-{
- return acm->wb[wbn].use;
-}
-
/*
* Finish write.
*/
@@ -157,7 +165,6 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
unsigned long flags;
spin_lock_irqsave(&acm->write_lock, flags);
- acm->write_ready = 1;
wb->use = 0;
acm->transmitting--;
spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -190,40 +197,25 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
static int acm_write_start(struct acm *acm, int wbn)
{
unsigned long flags;
- struct acm_wb *wb;
+ struct acm_wb *wb = &acm->wb[wbn];
int rc;
spin_lock_irqsave(&acm->write_lock, flags);
if (!acm->dev) {
+ wb->use = 0;
spin_unlock_irqrestore(&acm->write_lock, flags);
return -ENODEV;
}
- if (!acm->write_ready) {
- spin_unlock_irqrestore(&acm->write_lock, flags);
- return 0; /* A white lie */
- }
-
- wb = &acm->wb[wbn];
- if(acm_wb_is_avail(acm) <= 1)
- acm->write_ready = 0;
-
dbg("%s susp_count: %d", __func__, acm->susp_count);
if (acm->susp_count) {
- acm->old_ready = acm->write_ready;
acm->delayed_wb = wb;
- acm->write_ready = 0;
schedule_work(&acm->waker);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
usb_mark_last_busy(acm->dev);
- if (!acm_wb_is_used(acm, wbn)) {
- spin_unlock_irqrestore(&acm->write_lock, flags);
- return 0;
- }
-
rc = acm_start_wb(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
@@ -488,22 +480,28 @@ urbs:
/* data interface wrote those outgoing bytes */
static void acm_write_bulk(struct urb *urb)
{
- struct acm *acm;
struct acm_wb *wb = urb->context;
+ struct acm *acm = wb->instance;
- dbg("Entering acm_write_bulk with status %d", urb->status);
+ if (verbose || urb->status
+ || (urb->actual_length != urb->transfer_buffer_length))
+ dev_dbg(&acm->data->dev, "tx %d/%d bytes -- > %d\n",
+ urb->actual_length,
+ urb->transfer_buffer_length,
+ urb->status);
- acm = wb->instance;
acm_write_done(acm, wb);
if (ACM_READY(acm))
schedule_work(&acm->work);
+ else
+ wake_up_interruptible(&acm->drain_wait);
}
static void acm_softint(struct work_struct *work)
{
struct acm *acm = container_of(work, struct acm, work);
- dbg("Entering acm_softint.");
-
+
+ dev_vdbg(&acm->data->dev, "tx work\n");
if (!ACM_READY(acm))
return;
tty_wakeup(acm->tty);
@@ -512,7 +510,6 @@ static void acm_softint(struct work_struct *work)
static void acm_waker(struct work_struct *waker)
{
struct acm *acm = container_of(waker, struct acm, waker);
- long flags;
int rv;
rv = usb_autopm_get_interface(acm->control);
@@ -524,9 +521,6 @@ static void acm_waker(struct work_struct *waker)
acm_start_wb(acm, acm->delayed_wb);
acm->delayed_wb = NULL;
}
- spin_lock_irqsave(&acm->write_lock, flags);
- acm->write_ready = acm->old_ready;
- spin_unlock_irqrestore(&acm->write_lock, flags);
usb_autopm_put_interface(acm->control);
}
@@ -595,8 +589,8 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
tasklet_schedule(&acm->urb_task);
done:
-err_out:
mutex_unlock(&acm->mutex);
+err_out:
mutex_unlock(&open_mutex);
return rv;
@@ -628,6 +622,8 @@ static void acm_tty_unregister(struct acm *acm)
kfree(acm);
}
+static int acm_tty_chars_in_buffer(struct tty_struct *tty);
+
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
@@ -642,6 +638,13 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
if (acm->dev) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
+
+ /* try letting the last writes drain naturally */
+ wait_event_interruptible_timeout(acm->drain_wait,
+ (ACM_NW == acm_wb_is_avail(acm))
+ || !acm->dev,
+ ACM_CLOSE_TIMEOUT * HZ);
+
usb_kill_urb(acm->ctrlurb);
for (i = 0; i < ACM_NW; i++)
usb_kill_urb(acm->wb[i].urb);
@@ -697,7 +700,7 @@ static int acm_tty_write_room(struct tty_struct *tty)
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
*/
- return (acm->write_ready && acm_wb_is_avail(acm)) ? acm->writesize : 0;
+ return acm_wb_is_avail(acm) ? acm->writesize : 0;
}
static int acm_tty_chars_in_buffer(struct tty_struct *tty)
@@ -1072,11 +1075,11 @@ skip_normal_probe:
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
INIT_WORK(&acm->waker, acm_waker);
+ init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
mutex_init(&acm->mutex);
- acm->write_ready = 1;
acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
buf = usb_buffer_alloc(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
@@ -1108,9 +1111,11 @@ skip_normal_probe:
rcv->instance = acm;
}
for (i = 0; i < num_rx_buf; i++) {
- struct acm_rb *buf = &(acm->rb[i]);
+ struct acm_rb *rb = &(acm->rb[i]);
- if (!(buf->base = usb_buffer_alloc(acm->dev, readsize, GFP_KERNEL, &buf->dma))) {
+ rb->base = usb_buffer_alloc(acm->dev, readsize,
+ GFP_KERNEL, &rb->dma);
+ if (!rb->base) {
dev_dbg(&intf->dev, "out of memory (read bufs usb_buffer_alloc)\n");
goto alloc_fail7;
}
@@ -1172,6 +1177,7 @@ skip_countries:
acm_set_line(acm, &acm->line);
usb_driver_claim_interface(&acm_driver, data_interface, acm);
+ usb_set_intfdata(data_interface, acm);
usb_get_intf(control_interface);
tty_register_device(acm_tty_driver, minor, &control_interface->dev);
@@ -1221,11 +1227,11 @@ static void acm_disconnect(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
- mutex_lock(&open_mutex);
- if (!acm || !acm->dev) {
- mutex_unlock(&open_mutex);
+ /* sibling interface is already cleaning up */
+ if (!acm)
return;
- }
+
+ mutex_lock(&open_mutex);
if (acm->country_codes){
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1356,6 +1362,9 @@ static struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0803, 0x3095), /* Zoom Telephonics Model 3095F USB MODEM */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0572, 0x1321), /* Conexant USB MODEM CX93010 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 85c3aaaab7c5..1f95e7aa1b66 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -106,8 +106,6 @@ struct acm {
struct list_head spare_read_bufs;
struct list_head filled_read_bufs;
int write_used; /* number of non-empty write buffers */
- int write_ready; /* write urb is not running */
- int old_ready;
int processing;
int transmitting;
spinlock_t write_lock;
@@ -115,6 +113,7 @@ struct acm {
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
struct work_struct waker;
+ wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index ddb54e14a5c5..5a7fa6f09958 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -230,6 +230,13 @@ static int usb_probe_interface(struct device *dev)
*/
intf->pm_usage_cnt = !(driver->supports_autosuspend);
+ /* Carry out a deferred switch to altsetting 0 */
+ if (intf->needs_altsetting0) {
+ usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ intf->needs_altsetting0 = 0;
+ }
+
error = driver->probe(intf, id);
if (error) {
mark_quiesced(intf);
@@ -266,8 +273,17 @@ static int usb_unbind_interface(struct device *dev)
driver->disconnect(intf);
- /* reset other interface state */
- usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
+ /* Reset other interface state.
+ * We cannot do a Set-Interface if the device is suspended or
+ * if it is prepared for a system sleep (since installing a new
+ * altsetting means creating new endpoint device entries).
+ * When either of these happens, defer the Set-Interface.
+ */
+ if (!error && intf->dev.power.status == DPM_ON)
+ usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ else
+ intf->needs_altsetting0 = 1;
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
@@ -774,7 +790,6 @@ void usb_deregister(struct usb_driver *driver)
}
EXPORT_SYMBOL_GPL(usb_deregister);
-
/* Forced unbinding of a USB interface driver, either because
* it doesn't support pre_reset/post_reset/reset_resume or
* because it doesn't support suspend/resume.
@@ -799,7 +814,8 @@ void usb_forced_unbind_intf(struct usb_interface *intf)
* The caller must hold @intf's device's lock, but not its pm_mutex
* and not @intf->dev.sem.
*
- * FIXME: The caller must block system sleep transitions.
+ * Note: Rebinds will be skipped if a system sleep transition is in
+ * progress and the PM "complete" callback hasn't occurred yet.
*/
void usb_rebind_intf(struct usb_interface *intf)
{
@@ -815,12 +831,16 @@ void usb_rebind_intf(struct usb_interface *intf)
}
/* Try to rebind the interface */
- intf->needs_binding = 0;
- rc = device_attach(&intf->dev);
- if (rc < 0)
- dev_warn(&intf->dev, "rebind failed: %d\n", rc);
+ if (intf->dev.power.status == DPM_ON) {
+ intf->needs_binding = 0;
+ rc = device_attach(&intf->dev);
+ if (rc < 0)
+ dev_warn(&intf->dev, "rebind failed: %d\n", rc);
+ }
}
+#ifdef CONFIG_PM
+
#define DO_UNBIND 0
#define DO_REBIND 1
@@ -828,7 +848,6 @@ void usb_rebind_intf(struct usb_interface *intf)
* or rebind interfaces that have been unbound, according to @action.
*
* The caller must hold @udev's device lock.
- * FIXME: For rebinds, the caller must block system sleep transitions.
*/
static void do_unbind_rebind(struct usb_device *udev, int action)
{
@@ -850,30 +869,14 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
}
break;
case DO_REBIND:
- if (intf->needs_binding) {
-
- /* FIXME: The next line is needed because we are going to probe
- * the interface, but as far as the PM core is concerned the
- * interface is still suspended. The problem wouldn't exist
- * if we could rebind the interface during the interface's own
- * resume() call, but at the time the usb_device isn't locked!
- *
- * The real solution will be to carry this out during the device's
- * complete() callback. Until that is implemented, we have to
- * use this hack.
- */
-// intf->dev.power.sleeping = 0;
-
+ if (intf->needs_binding)
usb_rebind_intf(intf);
- }
break;
}
}
}
}
-#ifdef CONFIG_PM
-
/* Caller has locked udev's pm_mutex */
static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
{
@@ -927,14 +930,14 @@ static int usb_resume_device(struct usb_device *udev)
}
/* Caller has locked intf's usb_device's pm mutex */
-static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
+static int usb_suspend_interface(struct usb_device *udev,
+ struct usb_interface *intf, pm_message_t msg)
{
struct usb_driver *driver;
int status = 0;
/* with no hardware, USB interfaces only use FREEZE and ON states */
- if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED ||
- !is_active(intf))
+ if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf))
goto done;
if (intf->condition == USB_INTERFACE_UNBOUND) /* This can't happen */
@@ -945,7 +948,7 @@ static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
status = driver->suspend(intf, msg);
if (status == 0)
mark_quiesced(intf);
- else if (!interface_to_usbdev(intf)->auto_pm)
+ else if (!udev->auto_pm)
dev_err(&intf->dev, "%s error %d\n",
"suspend", status);
} else {
@@ -962,13 +965,13 @@ static int usb_suspend_interface(struct usb_interface *intf, pm_message_t msg)
}
/* Caller has locked intf's usb_device's pm_mutex */
-static int usb_resume_interface(struct usb_interface *intf, int reset_resume)
+static int usb_resume_interface(struct usb_device *udev,
+ struct usb_interface *intf, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
- if (interface_to_usbdev(intf)->state == USB_STATE_NOTATTACHED ||
- is_active(intf))
+ if (udev->state == USB_STATE_NOTATTACHED || is_active(intf))
goto done;
/* Don't let autoresume interfere with unbinding */
@@ -976,8 +979,17 @@ static int usb_resume_interface(struct usb_interface *intf, int reset_resume)
goto done;
/* Can't resume it if it doesn't have a driver. */
- if (intf->condition == USB_INTERFACE_UNBOUND)
+ if (intf->condition == USB_INTERFACE_UNBOUND) {
+
+ /* Carry out a deferred switch to altsetting 0 */
+ if (intf->needs_altsetting0 &&
+ intf->dev.power.status == DPM_ON) {
+ usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ intf->needs_altsetting0 = 0;
+ }
goto done;
+ }
/* Don't resume if the interface is marked for rebinding */
if (intf->needs_binding)
@@ -1152,7 +1164,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
if (udev->actconfig) {
for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
- status = usb_suspend_interface(intf, msg);
+ status = usb_suspend_interface(udev, intf, msg);
if (status != 0)
break;
}
@@ -1164,7 +1176,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
if (status != 0) {
while (--i >= 0) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(intf, 0);
+ usb_resume_interface(udev, intf, 0);
}
/* Try another autosuspend when the interfaces aren't busy */
@@ -1277,7 +1289,7 @@ static int usb_resume_both(struct usb_device *udev)
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(intf, udev->reset_resume);
+ usb_resume_interface(udev, intf, udev->reset_resume);
}
}
@@ -1606,12 +1618,10 @@ int usb_external_resume_device(struct usb_device *udev)
return status;
}
-static int usb_suspend(struct device *dev, pm_message_t message)
+int usb_suspend(struct device *dev, pm_message_t message)
{
struct usb_device *udev;
- if (!is_usb_device(dev)) /* Ignore PM for interfaces */
- return 0;
udev = to_usb_device(dev);
/* If udev is already suspended, we can skip this suspend and
@@ -1630,12 +1640,10 @@ static int usb_suspend(struct device *dev, pm_message_t message)
return usb_external_suspend_device(udev, message);
}
-static int usb_resume(struct device *dev)
+int usb_resume(struct device *dev)
{
struct usb_device *udev;
- if (!is_usb_device(dev)) /* Ignore PM for interfaces */
- return 0;
udev = to_usb_device(dev);
/* If udev->skip_sys_resume is set then udev was already suspended
@@ -1647,17 +1655,10 @@ static int usb_resume(struct device *dev)
return usb_external_resume_device(udev);
}
-#else
-
-#define usb_suspend NULL
-#define usb_resume NULL
-
#endif /* CONFIG_PM */
struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
- .suspend = usb_suspend,
- .resume = usb_resume,
};
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index f7bfd72ef115..8abd4e59bf4a 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -924,15 +924,6 @@ static int register_root_hub(struct usb_hcd *hcd)
return retval;
}
-void usb_enable_root_hub_irq (struct usb_bus *bus)
-{
- struct usb_hcd *hcd;
-
- hcd = container_of (bus, struct usb_hcd, self);
- if (hcd->driver->hub_irq_enable && hcd->state != HC_STATE_HALT)
- hcd->driver->hub_irq_enable (hcd);
-}
-
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 5b0b59b0d89b..e710ce04e228 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -212,8 +212,6 @@ struct hc_driver {
int (*bus_suspend)(struct usb_hcd *);
int (*bus_resume)(struct usb_hcd *);
int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
- void (*hub_irq_enable)(struct usb_hcd *);
- /* Needed only if port-change IRQs are level-triggered */
/* force handover of high-speed port to full-speed companion */
void (*relinquish_port)(struct usb_hcd *, int);
@@ -379,8 +377,6 @@ extern struct list_head usb_bus_list;
extern struct mutex usb_bus_list_lock;
extern wait_queue_head_t usb_kill_urb_queue;
-extern void usb_enable_root_hub_irq(struct usb_bus *bus);
-
extern int usb_find_interface_driver(struct usb_device *dev,
struct usb_interface *interface);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 107e1d25ddec..6a5cb018383d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2102,8 +2102,6 @@ int usb_port_resume(struct usb_device *udev)
}
clear_bit(port1, hub->busy_bits);
- if (!hub->hdev->parent && !hub->busy_bits[0])
- usb_enable_root_hub_irq(hub->hdev->bus);
status = check_port_resume_type(udev,
hub, port1, status, portchange, portstatus);
@@ -3081,11 +3079,6 @@ static void hub_events(void)
}
}
- /* If this is a root hub, tell the HCD it's okay to
- * re-enable port-change interrupts now. */
- if (!hdev->parent && !hub->busy_bits[0])
- usb_enable_root_hub_irq(hdev->bus);
-
loop_autopm:
/* Allow autosuspend if we're not going to run again */
if (list_empty(&hub->event_list))
@@ -3311,8 +3304,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
break;
}
clear_bit(port1, parent_hub->busy_bits);
- if (!parent_hdev->parent && !parent_hub->busy_bits[0])
- usb_enable_root_hub_irq(parent_hdev->bus);
if (ret < 0)
goto re_enumerate;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 2fcc06eb5e60..286b4431a097 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -389,7 +389,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
if (io->entries <= 0)
return io->entries;
- io->count = io->entries;
io->urbs = kmalloc(io->entries * sizeof *io->urbs, mem_flags);
if (!io->urbs)
goto nomem;
@@ -458,6 +457,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
/* transaction state */
+ io->count = io->entries;
io->status = 0;
io->bytes = 0;
init_completion(&io->complete);
@@ -1091,8 +1091,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
- device_del(&interface->dev);
usb_remove_sysfs_intf_files(interface);
+ device_del(&interface->dev);
}
/* Now that the interfaces are unbound, nobody should
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index c0b1ae25ae2a..47111e88f791 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -601,15 +601,20 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
{
struct urb *victim;
+ unsigned long flags;
- spin_lock_irq(&anchor->lock);
+ spin_lock_irqsave(&anchor->lock, flags);
while (!list_empty(&anchor->urb_list)) {
victim = list_entry(anchor->urb_list.prev, struct urb,
anchor_list);
+ usb_get_urb(victim);
+ spin_unlock_irqrestore(&anchor->lock, flags);
/* this will unanchor the URB */
usb_unlink_urb(victim);
+ usb_put_urb(victim);
+ spin_lock_irqsave(&anchor->lock, flags);
}
- spin_unlock_irq(&anchor->lock);
+ spin_unlock_irqrestore(&anchor->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 84fcaa6a21ec..be1fa0723f2c 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -219,12 +219,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#endif /* CONFIG_HOTPLUG */
-struct device_type usb_device_type = {
- .name = "usb_device",
- .release = usb_release_dev,
- .uevent = usb_dev_uevent,
-};
-
#ifdef CONFIG_PM
static int ksuspend_usb_init(void)
@@ -244,13 +238,80 @@ static void ksuspend_usb_cleanup(void)
destroy_workqueue(ksuspend_usb_wq);
}
+/* USB device Power-Management thunks.
+ * There's no need to distinguish here between quiescing a USB device
+ * and powering it down; the generic_suspend() routine takes care of
+ * it by skipping the usb_port_suspend() call for a quiesce. And for
+ * USB interfaces there's no difference at all.
+ */
+
+static int usb_dev_prepare(struct device *dev)
+{
+ return 0; /* Implement eventually? */
+}
+
+static void usb_dev_complete(struct device *dev)
+{
+ /* Currently used only for rebinding interfaces */
+ usb_resume(dev); /* Implement eventually? */
+}
+
+static int usb_dev_suspend(struct device *dev)
+{
+ return usb_suspend(dev, PMSG_SUSPEND);
+}
+
+static int usb_dev_resume(struct device *dev)
+{
+ return usb_resume(dev);
+}
+
+static int usb_dev_freeze(struct device *dev)
+{
+ return usb_suspend(dev, PMSG_FREEZE);
+}
+
+static int usb_dev_thaw(struct device *dev)
+{
+ return usb_resume(dev);
+}
+
+static int usb_dev_poweroff(struct device *dev)
+{
+ return usb_suspend(dev, PMSG_HIBERNATE);
+}
+
+static int usb_dev_restore(struct device *dev)
+{
+ return usb_resume(dev);
+}
+
+static struct pm_ops usb_device_pm_ops = {
+ .prepare = usb_dev_prepare,
+ .complete = usb_dev_complete,
+ .suspend = usb_dev_suspend,
+ .resume = usb_dev_resume,
+ .freeze = usb_dev_freeze,
+ .thaw = usb_dev_thaw,
+ .poweroff = usb_dev_poweroff,
+ .restore = usb_dev_restore,
+};
+
#else
#define ksuspend_usb_init() 0
#define ksuspend_usb_cleanup() do {} while (0)
+#define usb_device_pm_ops (*(struct pm_ops *)0)
#endif /* CONFIG_PM */
+struct device_type usb_device_type = {
+ .name = "usb_device",
+ .release = usb_release_dev,
+ .uevent = usb_dev_uevent,
+ .pm = &usb_device_pm_ops,
+};
+
/* Returns 1 if @usb_bus is WUSB, 0 otherwise */
static unsigned usb_bus_is_wusb(struct usb_bus *bus)
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index d9a6e16dbf84..9a1a45ac3add 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -41,6 +41,9 @@ extern void usb_host_cleanup(void);
#ifdef CONFIG_PM
+extern int usb_suspend(struct device *dev, pm_message_t msg);
+extern int usb_resume(struct device *dev);
+
extern void usb_autosuspend_work(struct work_struct *work);
extern int usb_port_suspend(struct usb_device *dev);
extern int usb_port_resume(struct usb_device *dev);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index c6a8c6b1116a..acc95b2ac6f8 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -284,6 +284,16 @@ config USB_LH7A40X
default USB_GADGET
select USB_GADGET_SELECTED
+# built in ../musb along with host support
+config USB_GADGET_MUSB_HDRC
+ boolean "Inventra HDRC USB Peripheral (TI, ...)"
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SELECTED
+ help
+ This OTG-capable silicon IP is used in dual designs including
+ the TI DaVinci, OMAP 243x, OMAP 343x, and TUSB 6010.
+
config USB_GADGET_OMAP
boolean "OMAP USB Device Controller"
depends on ARCH_OMAP
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 1500e1b3c302..abf8192f89e8 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -44,7 +44,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index e2d8a5d86c40..a8a1de413321 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -40,16 +40,15 @@
#include <linux/usb/gadget.h>
#include <asm/byteorder.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
-#include <asm/mach-types.h>
#include <asm/gpio.h>
-#include <asm/arch/board.h>
-#include <asm/arch/cpu.h>
-#include <asm/arch/at91sam9261_matrix.h>
+#include <mach/board.h>
+#include <mach/cpu.h>
+#include <mach/at91sam9261_matrix.h>
#include "at91_udc.h"
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 07e5a0b5dcda..ae30ab1d264f 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -22,7 +22,7 @@
#include <linux/delay.h>
#include <asm/gpio.h>
-#include <asm/arch/board.h>
+#include <mach/board.h>
#include "atmel_usba_udc.h"
@@ -334,7 +334,7 @@ static void toggle_bias(int is_on)
#elif defined(CONFIG_ARCH_AT91)
-#include <asm/arch/at91_pmc.h>
+#include <mach/at91_pmc.h>
static void toggle_bias(int is_on)
{
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 21d1406af9ee..7600a0c78753 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -542,13 +542,14 @@ dummy_queue (struct usb_ep *_ep, struct usb_request *_req,
req->req.context = dum;
req->req.complete = fifo_complete;
+ list_add_tail(&req->queue, &ep->queue);
spin_unlock (&dum->lock);
_req->actual = _req->length;
_req->status = 0;
_req->complete (_ep, _req);
spin_lock (&dum->lock);
- }
- list_add_tail (&req->queue, &ep->queue);
+ } else
+ list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore (&dum->lock, flags);
/* real hardware would likely enable transfers here, in case
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d8faccf27895..5ee1590b8e9c 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -47,18 +47,37 @@ struct f_acm {
u8 ctrl_id, data_id;
u8 port_num;
- struct usb_descriptor_header **fs_function;
+ u8 pending;
+
+ /* lock is mostly for pending and notify_req ... they get accessed
+ * by callbacks both from tty (open/close/break) under its spinlock,
+ * and notify_req.complete() which can't use that lock.
+ */
+ spinlock_t lock;
+
struct acm_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct acm_ep_descs hs;
struct usb_ep *notify;
struct usb_endpoint_descriptor *notify_desc;
+ struct usb_request *notify_req;
struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
+
+ /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
u16 port_handshake_bits;
-#define RS232_RTS (1 << 1) /* unused with full duplex */
-#define RS232_DTR (1 << 0) /* host is ready for data r/w */
+#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
+#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
+
+ /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
+ u16 serial_state;
+#define ACM_CTRL_OVERRUN (1 << 6)
+#define ACM_CTRL_PARITY (1 << 5)
+#define ACM_CTRL_FRAMING (1 << 4)
+#define ACM_CTRL_RI (1 << 3)
+#define ACM_CTRL_BRK (1 << 2)
+#define ACM_CTRL_DSR (1 << 1)
+#define ACM_CTRL_DCD (1 << 0)
};
static inline struct f_acm *func_to_acm(struct usb_function *f)
@@ -66,12 +85,17 @@ static inline struct f_acm *func_to_acm(struct usb_function *f)
return container_of(f, struct f_acm, port.func);
}
+static inline struct f_acm *port_to_acm(struct gserial *p)
+{
+ return container_of(p, struct f_acm, port);
+}
+
/*-------------------------------------------------------------------------*/
/* notification endpoint uses smallish and infrequent fixed-size messages */
#define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */
-#define GS_NOTIFY_MAXPACKET 8
+#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */
/* interface and class descriptors: */
@@ -117,7 +141,7 @@ static struct usb_cdc_acm_descriptor acm_descriptor __initdata = {
.bLength = sizeof(acm_descriptor),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubType = USB_CDC_ACM_TYPE,
- .bmCapabilities = (1 << 1),
+ .bmCapabilities = USB_CDC_CAP_LINE,
};
static struct usb_cdc_union_desc acm_union_desc __initdata = {
@@ -277,6 +301,11 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
/* composite driver infrastructure handles everything except
* CDC class messages; interface activation uses set_alt().
+ *
+ * Note CDC spec table 4 lists the ACM request profile. It requires
+ * encapsulated command support ... we don't handle any, and respond
+ * to them by stalling. Options include get/set/clear comm features
+ * (not that useful) and SEND_BREAK.
*/
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
@@ -312,7 +341,7 @@ static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
value = 0;
/* FIXME we should not allow data to flow until the
- * host sets the RS232_DTR bit; and when it clears
+ * host sets the ACM_CTRL_DTR bit; and when it clears
* that bit, we should return to that no-flow state.
*/
acm->port_handshake_bits = w_value;
@@ -350,9 +379,6 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* we know alt == 0, so this is an activation or a reset */
if (intf == acm->ctrl_id) {
- /* REVISIT this may need more work when we start to
- * send notifications ...
- */
if (acm->notify->driver_data) {
VDBG(cdev, "reset acm control interface %d\n", intf);
usb_ep_disable(acm->notify);
@@ -397,6 +423,128 @@ static void acm_disable(struct usb_function *f)
/*-------------------------------------------------------------------------*/
+/**
+ * acm_cdc_notify - issue CDC notification to host
+ * @acm: wraps host to be notified
+ * @type: notification type
+ * @value: Refer to cdc specs, wValue field.
+ * @data: data to be sent
+ * @length: size of data
+ * Context: irqs blocked, acm->lock held, acm_notify_req non-null
+ *
+ * Returns zero on sucess or a negative errno.
+ *
+ * See section 6.3.5 of the CDC 1.1 specification for information
+ * about the only notification we issue: SerialState change.
+ */
+static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
+ void *data, unsigned length)
+{
+ struct usb_ep *ep = acm->notify;
+ struct usb_request *req;
+ struct usb_cdc_notification *notify;
+ const unsigned len = sizeof(*notify) + length;
+ void *buf;
+ int status;
+
+ req = acm->notify_req;
+ acm->notify_req = NULL;
+ acm->pending = false;
+
+ req->length = len;
+ notify = req->buf;
+ buf = notify + 1;
+
+ notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+ | USB_RECIP_INTERFACE;
+ notify->bNotificationType = type;
+ notify->wValue = cpu_to_le16(value);
+ notify->wIndex = cpu_to_le16(acm->ctrl_id);
+ notify->wLength = cpu_to_le16(length);
+ memcpy(buf, data, length);
+
+ status = usb_ep_queue(ep, req, GFP_ATOMIC);
+ if (status < 0) {
+ ERROR(acm->port.func.config->cdev,
+ "acm ttyGS%d can't notify serial state, %d\n",
+ acm->port_num, status);
+ acm->notify_req = req;
+ }
+
+ return status;
+}
+
+static int acm_notify_serial_state(struct f_acm *acm)
+{
+ struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+ int status;
+
+ spin_lock(&acm->lock);
+ if (acm->notify_req) {
+ DBG(cdev, "acm ttyGS%d serial state %04x\n",
+ acm->port_num, acm->serial_state);
+ status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
+ 0, &acm->serial_state, sizeof(acm->serial_state));
+ } else {
+ acm->pending = true;
+ status = 0;
+ }
+ spin_unlock(&acm->lock);
+ return status;
+}
+
+static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_acm *acm = req->context;
+ u8 doit = false;
+
+ /* on this call path we do NOT hold the port spinlock,
+ * which is why ACM needs its own spinlock
+ */
+ spin_lock(&acm->lock);
+ if (req->status != -ESHUTDOWN)
+ doit = acm->pending;
+ acm->notify_req = req;
+ spin_unlock(&acm->lock);
+
+ if (doit)
+ acm_notify_serial_state(acm);
+}
+
+/* connect == the TTY link is open */
+
+static void acm_connect(struct gserial *port)
+{
+ struct f_acm *acm = port_to_acm(port);
+
+ acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+ acm_notify_serial_state(acm);
+}
+
+static void acm_disconnect(struct gserial *port)
+{
+ struct f_acm *acm = port_to_acm(port);
+
+ acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+ acm_notify_serial_state(acm);
+}
+
+static int acm_send_break(struct gserial *port, int duration)
+{
+ struct f_acm *acm = port_to_acm(port);
+ u16 state;
+
+ state = acm->serial_state;
+ state &= ~ACM_CTRL_BRK;
+ if (duration)
+ state |= ACM_CTRL_BRK;
+
+ acm->serial_state = state;
+ return acm_notify_serial_state(acm);
+}
+
+/*-------------------------------------------------------------------------*/
+
/* ACM function driver setup/binding */
static int __init
acm_bind(struct usb_configuration *c, struct usb_function *f)
@@ -445,8 +593,20 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
acm->notify = ep;
ep->driver_data = cdev; /* claim */
+ /* allocate notification */
+ acm->notify_req = gs_alloc_req(ep,
+ sizeof(struct usb_cdc_notification) + 2,
+ GFP_KERNEL);
+ if (!acm->notify_req)
+ goto fail;
+
+ acm->notify_req->complete = acm_cdc_notify_complete;
+ acm->notify_req->context = acm;
+
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(acm_fs_function);
+ if (!f->descriptors)
+ goto fail;
acm->fs.in = usb_find_endpoint(acm_fs_function,
f->descriptors, &acm_fs_in_desc);
@@ -478,8 +638,6 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
f->hs_descriptors, &acm_hs_notify_desc);
}
- /* FIXME provide a callback for triggering notifications */
-
DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
acm->port_num,
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -488,6 +646,9 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
return 0;
fail:
+ if (acm->notify_req)
+ gs_free_req(acm->notify, acm->notify_req);
+
/* we might as well release our claims on endpoints */
if (acm->notify)
acm->notify->driver_data = NULL;
@@ -504,10 +665,13 @@ fail:
static void
acm_unbind(struct usb_configuration *c, struct usb_function *f)
{
+ struct f_acm *acm = func_to_acm(f);
+
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
- kfree(func_to_acm(f));
+ gs_free_req(acm->notify, acm->notify_req);
+ kfree(acm);
}
/* Some controllers can't support CDC ACM ... */
@@ -571,8 +735,14 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
if (!acm)
return -ENOMEM;
+ spin_lock_init(&acm->lock);
+
acm->port_num = port_num;
+ acm->port.connect = acm_connect;
+ acm->port.disconnect = acm_disconnect;
+ acm->port.send_break = acm_send_break;
+
acm->port.func.name = "acm";
acm->port.func.strings = acm_strings;
/* descriptors are per-instance copies */
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 0822e9d7693a..a2b5c092bda0 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -63,9 +63,7 @@ struct f_ecm {
char ethaddr[14];
- struct usb_descriptor_header **fs_function;
struct ecm_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct ecm_ep_descs hs;
struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 61652f0f13fd..659b3d9671c4 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -85,9 +85,7 @@ struct f_rndis {
u8 ethaddr[ETH_ALEN];
int config;
- struct usb_descriptor_header **fs_function;
struct rndis_ep_descs fs;
- struct usb_descriptor_header **hs_function;
struct rndis_ep_descs hs;
struct usb_ep *notify;
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 1b6bde9aaed5..fe5674db344b 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -36,9 +36,7 @@ struct f_gser {
u8 data_id;
u8 port_num;
- struct usb_descriptor_header **fs_function;
struct gser_descs fs;
- struct usb_descriptor_header **hs_function;
struct gser_descs hs;
};
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index afeab9a0523f..acb8d233aa1d 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -66,9 +66,7 @@ struct f_gether {
char ethaddr[14];
- struct usb_descriptor_header **fs_function;
struct geth_descs fs;
- struct usb_descriptor_header **hs_function;
struct geth_descs hs;
};
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 5246e8fef2b2..17d9905101b7 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -11,6 +11,10 @@
* Some are available on 2.4 kernels; several are available, but not
* yet pushed in the 2.6 mainline tree.
*/
+
+#ifndef __GADGET_CHIPS_H
+#define __GADGET_CHIPS_H
+
#ifdef CONFIG_USB_GADGET_NET2280
#define gadget_is_net2280(g) !strcmp("net2280", (g)->name)
#else
@@ -237,3 +241,5 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
/* Everything else is *presumably* fine ... */
return true;
}
+
+#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/lh7a40x_udc.h b/drivers/usb/gadget/lh7a40x_udc.h
index 1ecfd6366b9a..ca861203a301 100644
--- a/drivers/usb/gadget/lh7a40x_udc.h
+++ b/drivers/usb/gadget/lh7a40x_udc.h
@@ -47,7 +47,7 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 395bd1844482..574c53831a05 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -52,8 +52,9 @@
#include <asm/unaligned.h>
#include <asm/mach-types.h>
-#include <asm/arch/dma.h>
-#include <asm/arch/usb.h>
+#include <mach/dma.h>
+#include <mach/usb.h>
+#include <mach/control.h>
#include "omap_udc.h"
@@ -2310,10 +2311,10 @@ static int proc_otg_show(struct seq_file *s)
u32 trans;
char *ctrl_name;
- tmp = OTG_REV_REG;
+ tmp = omap_readl(OTG_REV);
if (cpu_is_omap24xx()) {
ctrl_name = "control_devconf";
- trans = CONTROL_DEVCONF_REG;
+ trans = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
} else {
ctrl_name = "tranceiver_ctrl";
trans = omap_readw(USB_TRANSCEIVER_CTRL);
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 7e6725d89976..da6e93c201d2 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -61,7 +61,7 @@
* This driver is PXA25x only. Grab the right register definitions.
*/
#ifdef CONFIG_ARCH_PXA
-#include <asm/arch/pxa25x-udc.h>
+#include <mach/pxa25x-udc.h>
#endif
#include <asm/mach/udc_pxa2xx.h>
diff --git a/drivers/usb/gadget/pxa25x_udc.h b/drivers/usb/gadget/pxa25x_udc.h
index c8a13215e02c..1d51aa21e6eb 100644
--- a/drivers/usb/gadget/pxa25x_udc.h
+++ b/drivers/usb/gadget/pxa25x_udc.h
@@ -139,7 +139,7 @@ struct pxa25x_udc {
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_ARCH_LUBBOCK
-#include <asm/arch/lubbock.h>
+#include <mach/lubbock.h>
/* lubbock can also report usb connect/disconnect irqs */
#endif
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 9d447d8cfc0c..7cbc78a6853d 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -33,13 +33,13 @@
#include <linux/irq.h>
#include <asm/byteorder.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <asm/arch/pxa2xx-regs.h> /* FIXME: for PSSR */
-#include <asm/arch/udc.h>
+#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
+#include <mach/udc.h>
#include "pxa27x_udc.h"
@@ -1622,7 +1622,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
struct pxa_udc *udc = the_controller;
int retval;
- if (!driver || driver->speed != USB_SPEED_FULL || !driver->bind
+ if (!driver || driver->speed < USB_SPEED_FULL || !driver->bind
|| !driver->disconnect || !driver->setup)
return -EINVAL;
if (!udc)
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 6b1ef488043b..29d13ebe7500 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -35,7 +35,6 @@
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
-#include <linux/version.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
@@ -49,15 +48,14 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
-#include <asm/arch/irqs.h>
+#include <mach/irqs.h>
-#include <asm/arch/hardware.h>
-#include <asm/arch/regs-gpio.h>
+#include <mach/hardware.h>
+#include <mach/regs-gpio.h>
#include <asm/plat-s3c24xx/regs-udc.h>
#include <asm/plat-s3c24xx/udc.h>
-#include <asm/mach-types.h>
#include "s3c2410_udc.h"
@@ -888,7 +886,7 @@ static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
}
}
-#include <asm/arch/regs-irq.h>
+#include <mach/regs-irq.h>
/*
* s3c2410_udc_irq - interrupt handler
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index abf9505d3a75..53d59287f2bc 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -52,13 +52,16 @@
* is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
*/
+#define PREFIX "ttyGS"
+
/*
* gserial is the lifecycle interface, used by USB functions
* gs_port is the I/O nexus, used by the tty driver
* tty_struct links to the tty/filesystem framework
*
* gserial <---> gs_port ... links will be null when the USB link is
- * inactive; managed by gserial_{connect,disconnect}().
+ * inactive; managed by gserial_{connect,disconnect}(). each gserial
+ * instance can wrap its own USB control protocol.
* gserial->ioport == usb_ep->driver_data ... gs_port
* gs_port->port_usb ... gserial
*
@@ -100,6 +103,8 @@ struct gs_port {
wait_queue_head_t close_wait; /* wait for last close */
struct list_head read_pool;
+ struct list_head read_queue;
+ unsigned n_read;
struct tasklet_struct push;
struct list_head write_pool;
@@ -177,7 +182,7 @@ static void gs_buf_clear(struct gs_buf *gb)
/*
* gs_buf_data_avail
*
- * Return the number of bytes of data available in the circular
+ * Return the number of bytes of data written into the circular
* buffer.
*/
static unsigned gs_buf_data_avail(struct gs_buf *gb)
@@ -278,7 +283,7 @@ gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
* Allocate a usb_request and its buffer. Returns a pointer to the
* usb_request or NULL if there is an error.
*/
-static struct usb_request *
+struct usb_request *
gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
{
struct usb_request *req;
@@ -302,7 +307,7 @@ gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
*
* Free a usb_request and its buffer.
*/
-static void gs_free_req(struct usb_ep *ep, struct usb_request *req)
+void gs_free_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
@@ -367,11 +372,9 @@ __acquires(&port->port_lock)
req->length = len;
list_del(&req->list);
-#ifdef VERBOSE_DEBUG
- pr_debug("%s: %s, len=%d, 0x%02x 0x%02x 0x%02x ...\n",
- __func__, in->name, len, *((u8 *)req->buf),
+ pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
+ port->port_num, len, *((u8 *)req->buf),
*((u8 *)req->buf+1), *((u8 *)req->buf+2));
-#endif
/* Drop lock while we call out of driver; completions
* could be issued while we do so. Disconnection may
@@ -401,56 +404,6 @@ __acquires(&port->port_lock)
return status;
}
-static void gs_rx_push(unsigned long _port)
-{
- struct gs_port *port = (void *)_port;
- struct tty_struct *tty = port->port_tty;
-
- /* With low_latency, tty_flip_buffer_push() doesn't put its
- * real work through a workqueue, so the ldisc has a better
- * chance to keep up with peak USB data rates.
- */
- if (tty) {
- tty_flip_buffer_push(tty);
- wake_up_interruptible(&tty->read_wait);
- }
-}
-
-/*
- * gs_recv_packet
- *
- * Called for each USB packet received. Reads the packet
- * header and stuffs the data in the appropriate tty buffer.
- * Returns 0 if successful, or a negative error number.
- *
- * Called during USB completion routine, on interrupt time.
- * With port_lock.
- */
-static int gs_recv_packet(struct gs_port *port, char *packet, unsigned size)
-{
- unsigned len;
- struct tty_struct *tty;
-
- /* I/O completions can continue for a while after close(), until the
- * request queue empties. Just discard any data we receive, until
- * something reopens this TTY ... as if there were no HW flow control.
- */
- tty = port->port_tty;
- if (tty == NULL) {
- pr_vdebug("%s: ttyGS%d, after close\n",
- __func__, port->port_num);
- return -EIO;
- }
-
- len = tty_insert_flip_string(tty, packet, size);
- if (len > 0)
- tasklet_schedule(&port->push);
- if (len < size)
- pr_debug("%s: ttyGS%d, drop %d bytes\n",
- __func__, port->port_num, size - len);
- return 0;
-}
-
/*
* Context: caller owns port_lock, and port_usb is set
*/
@@ -469,9 +422,9 @@ __acquires(&port->port_lock)
int status;
struct tty_struct *tty;
- /* no more rx if closed or throttled */
+ /* no more rx if closed */
tty = port->port_tty;
- if (!tty || test_bit(TTY_THROTTLED, &tty->flags))
+ if (!tty)
break;
req = list_entry(pool->next, struct usb_request, list);
@@ -500,36 +453,134 @@ __acquires(&port->port_lock)
return started;
}
-static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+/*
+ * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * layer until it refuses to take any more data (or is throttled back).
+ * Then it issues reads for any further data.
+ *
+ * If the RX queue becomes full enough that no usb_request is queued,
+ * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
+ * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
+ * can be buffered before the TTY layer's buffers (currently 64 KB).
+ */
+static void gs_rx_push(unsigned long _port)
{
- int status;
- struct gs_port *port = ep->driver_data;
+ struct gs_port *port = (void *)_port;
+ struct tty_struct *tty;
+ struct list_head *queue = &port->read_queue;
+ bool disconnect = false;
+ bool do_push = false;
- spin_lock(&port->port_lock);
- list_add(&req->list, &port->read_pool);
+ /* hand any queued data to the tty */
+ spin_lock_irq(&port->port_lock);
+ tty = port->port_tty;
+ while (!list_empty(queue)) {
+ struct usb_request *req;
- switch (req->status) {
- case 0:
- /* normal completion */
- status = gs_recv_packet(port, req->buf, req->actual);
- if (status && status != -EIO)
- pr_debug("%s: %s %s err %d\n",
- __func__, "recv", ep->name, status);
- gs_start_rx(port);
- break;
+ req = list_first_entry(queue, struct usb_request, list);
- case -ESHUTDOWN:
- /* disconnect */
- pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
- break;
+ /* discard data if tty was closed */
+ if (!tty)
+ goto recycle;
- default:
- /* presumably a transient fault */
- pr_warning("%s: unexpected %s status %d\n",
- __func__, ep->name, req->status);
- gs_start_rx(port);
- break;
+ /* leave data queued if tty was rx throttled */
+ if (test_bit(TTY_THROTTLED, &tty->flags))
+ break;
+
+ switch (req->status) {
+ case -ESHUTDOWN:
+ disconnect = true;
+ pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
+ break;
+
+ default:
+ /* presumably a transient fault */
+ pr_warning(PREFIX "%d: unexpected RX status %d\n",
+ port->port_num, req->status);
+ /* FALLTHROUGH */
+ case 0:
+ /* normal completion */
+ break;
+ }
+
+ /* push data to (open) tty */
+ if (req->actual) {
+ char *packet = req->buf;
+ unsigned size = req->actual;
+ unsigned n;
+ int count;
+
+ /* we may have pushed part of this packet already... */
+ n = port->n_read;
+ if (n) {
+ packet += n;
+ size -= n;
+ }
+
+ count = tty_insert_flip_string(tty, packet, size);
+ if (count)
+ do_push = true;
+ if (count != size) {
+ /* stop pushing; TTY layer can't handle more */
+ port->n_read += count;
+ pr_vdebug(PREFIX "%d: rx block %d/%d\n",
+ port->port_num,
+ count, req->actual);
+ break;
+ }
+ port->n_read = 0;
+ }
+recycle:
+ list_move(&req->list, &port->read_pool);
}
+
+ /* Push from tty to ldisc; this is immediate with low_latency, and
+ * may trigger callbacks to this driver ... so drop the spinlock.
+ */
+ if (tty && do_push) {
+ spin_unlock_irq(&port->port_lock);
+ tty_flip_buffer_push(tty);
+ wake_up_interruptible(&tty->read_wait);
+ spin_lock_irq(&port->port_lock);
+
+ /* tty may have been closed */
+ tty = port->port_tty;
+ }
+
+
+ /* We want our data queue to become empty ASAP, keeping data
+ * in the tty and ldisc (not here). If we couldn't push any
+ * this time around, there may be trouble unless there's an
+ * implicit tty_unthrottle() call on its way...
+ *
+ * REVISIT we should probably add a timer to keep the tasklet
+ * from starving ... but it's not clear that case ever happens.
+ */
+ if (!list_empty(queue) && tty) {
+ if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (do_push)
+ tasklet_schedule(&port->push);
+ else
+ pr_warning(PREFIX "%d: RX not scheduled?\n",
+ port->port_num);
+ }
+ }
+
+ /* If we're still connected, refill the USB RX queue. */
+ if (!disconnect && port->port_usb)
+ gs_start_rx(port);
+
+ spin_unlock_irq(&port->port_lock);
+}
+
+static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct gs_port *port = ep->driver_data;
+
+ /* Queue all received data until the tty layer is ready for it. */
+ spin_lock(&port->port_lock);
+ list_add_tail(&req->list, &port->read_queue);
+ tasklet_schedule(&port->push);
spin_unlock(&port->port_lock);
}
@@ -625,6 +676,7 @@ static int gs_start_io(struct gs_port *port)
}
/* queue read requests */
+ port->n_read = 0;
started = gs_start_rx(port);
/* unblock any pending writes into our circular buffer */
@@ -633,9 +685,10 @@ static int gs_start_io(struct gs_port *port)
} else {
gs_free_requests(ep, head);
gs_free_requests(port->port_usb->in, &port->write_pool);
+ status = -EIO;
}
- return started ? 0 : status;
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -736,10 +789,13 @@ static int gs_open(struct tty_struct *tty, struct file *file)
/* if connected, start the I/O stream */
if (port->port_usb) {
+ struct gserial *gser = port->port_usb;
+
pr_debug("gs_open: start ttyGS%d\n", port->port_num);
gs_start_io(port);
- /* REVISIT for ACM, issue "network connected" event */
+ if (gser->connect)
+ gser->connect(gser);
}
pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
@@ -766,6 +822,7 @@ static int gs_writes_finished(struct gs_port *p)
static void gs_close(struct tty_struct *tty, struct file *file)
{
struct gs_port *port = tty->driver_data;
+ struct gserial *gser;
spin_lock_irq(&port->port_lock);
@@ -785,32 +842,31 @@ static void gs_close(struct tty_struct *tty, struct file *file)
port->openclose = true;
port->open_count = 0;
- if (port->port_usb)
- /* REVISIT for ACM, issue "network disconnected" event */;
+ gser = port->port_usb;
+ if (gser && gser->disconnect)
+ gser->disconnect(gser);
/* wait for circular write buffer to drain, disconnect, or at
* most GS_CLOSE_TIMEOUT seconds; then discard the rest
*/
- if (gs_buf_data_avail(&port->port_write_buf) > 0
- && port->port_usb) {
+ if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
spin_unlock_irq(&port->port_lock);
wait_event_interruptible_timeout(port->drain_wait,
gs_writes_finished(port),
GS_CLOSE_TIMEOUT * HZ);
spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
}
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
* let the push tasklet fire again until we're re-opened.
*/
- if (port->port_usb == NULL)
+ if (gser == NULL)
gs_buf_free(&port->port_write_buf);
else
gs_buf_clear(&port->port_write_buf);
- tasklet_kill(&port->push);
-
tty->driver_data = NULL;
port->port_tty = NULL;
@@ -911,15 +967,35 @@ static void gs_unthrottle(struct tty_struct *tty)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- unsigned started = 0;
spin_lock_irqsave(&port->port_lock, flags);
- if (port->port_usb)
- started = gs_start_rx(port);
+ if (port->port_usb) {
+ /* Kickstart read queue processing. We don't do xon/xoff,
+ * rts/cts, or other handshaking with the host, but if the
+ * read queue backs up enough we'll be NAKing OUT packets.
+ */
+ tasklet_schedule(&port->push);
+ pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
+ }
spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int gs_break_ctl(struct tty_struct *tty, int duration)
+{
+ struct gs_port *port = tty->driver_data;
+ int status = 0;
+ struct gserial *gser;
+
+ pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
+ port->port_num, duration);
- pr_vdebug("gs_unthrottle: ttyGS%d, %d packets\n",
- port->port_num, started);
+ spin_lock_irq(&port->port_lock);
+ gser = port->port_usb;
+ if (gser && gser->send_break)
+ status = gser->send_break(gser, duration);
+ spin_unlock_irq(&port->port_lock);
+
+ return status;
}
static const struct tty_operations gs_tty_ops = {
@@ -931,6 +1007,7 @@ static const struct tty_operations gs_tty_ops = {
.write_room = gs_write_room,
.chars_in_buffer = gs_chars_in_buffer,
.unthrottle = gs_unthrottle,
+ .break_ctl = gs_break_ctl,
};
/*-------------------------------------------------------------------------*/
@@ -953,6 +1030,7 @@ gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
INIT_LIST_HEAD(&port->read_pool);
+ INIT_LIST_HEAD(&port->read_queue);
INIT_LIST_HEAD(&port->write_pool);
port->port_num = port_num;
@@ -997,7 +1075,7 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count)
gs_tty_driver->owner = THIS_MODULE;
gs_tty_driver->driver_name = "g_serial";
- gs_tty_driver->name = "ttyGS";
+ gs_tty_driver->name = PREFIX;
/* uses dynamically assigned dev_t values */
gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1104,6 +1182,8 @@ void gserial_cleanup(void)
ports[i].port = NULL;
mutex_unlock(&ports[i].lock);
+ tasklet_kill(&port->push);
+
/* wait for old opens to finish */
wait_event(port->close_wait, gs_closed(port));
@@ -1175,14 +1255,17 @@ int gserial_connect(struct gserial *gser, u8 port_num)
/* REVISIT if waiting on "carrier detect", signal. */
- /* REVISIT for ACM, issue "network connection" status notification:
- * connected if open_count, else disconnected.
+ /* if it's already open, start I/O ... and notify the serial
+ * protocol about open/close status (connect/disconnect).
*/
-
- /* if it's already open, start I/O */
if (port->open_count) {
pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
gs_start_io(port);
+ if (gser->connect)
+ gser->connect(gser);
+ } else {
+ if (gser->disconnect)
+ gser->disconnect(gser);
}
spin_unlock_irqrestore(&port->port_lock, flags);
@@ -1241,6 +1324,7 @@ void gserial_disconnect(struct gserial *gser)
if (port->open_count == 0 && !port->openclose)
gs_buf_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool);
+ gs_free_requests(gser->out, &port->read_queue);
gs_free_requests(gser->in, &port->write_pool);
spin_unlock_irqrestore(&port->port_lock, flags);
}
diff --git a/drivers/usb/gadget/u_serial.h b/drivers/usb/gadget/u_serial.h
index 7b561138f90e..af3910d01aea 100644
--- a/drivers/usb/gadget/u_serial.h
+++ b/drivers/usb/gadget/u_serial.h
@@ -23,8 +23,7 @@
* style I/O using the USB peripheral endpoints listed here, including
* hookups to sysfs and /dev for each logical "tty" device.
*
- * REVISIT need TTY --> USB event flow too, so ACM can report open/close
- * as carrier detect events. Model after ECM. There's more ACM state too.
+ * REVISIT at least ACM could support tiocmget() if needed.
*
* REVISIT someday, allow multiplexing several TTYs over these endpoints.
*/
@@ -41,8 +40,17 @@ struct gserial {
/* REVISIT avoid this CDC-ACM support harder ... */
struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */
+
+ /* notification callbacks */
+ void (*connect)(struct gserial *p);
+ void (*disconnect)(struct gserial *p);
+ int (*send_break)(struct gserial *p, int duration);
};
+/* utilities to allocate/free request and buffer */
+struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
+void gs_free_req(struct usb_ep *, struct usb_request *req);
+
/* port setup/teardown is handled by gadget driver */
int gserial_setup(struct usb_gadget *g, unsigned n_ports);
void gserial_cleanup(void);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 5fbdc14e63b3..5416cf969005 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
-#include <asm/plat-orion/ehci-orion.h>
+#include <plat/ehci-orion.h>
#define rdl(off) __raw_readl(hcd->regs + (off))
#define wrl(off, val) __raw_writel((val), hcd->regs + (off))
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 2622b6596d7c..3712b925b315 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -932,7 +932,7 @@ static struct ehci_qh *qh_append_tds (
list_del (&qtd->qtd_list);
list_add (&dummy->qtd_list, qtd_list);
- __list_splice (qtd_list, qh->qtd_list.prev);
+ list_splice_tail(qtd_list, &qh->qtd_list);
ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
qh->dummy = qtd;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index c858f2adb929..8017f1cf78e2 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -126,9 +126,8 @@ static void isp1760_writel(const unsigned int val, __u32 __iomem *regs)
* doesn't quite work because some people have to enforce 32-bit access
*/
static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
- __u32 __iomem *dst, u32 offset, u32 len)
+ __u32 __iomem *dst, u32 len)
{
- struct usb_hcd *hcd = priv_to_hcd(priv);
u32 val;
u8 *buff8;
@@ -136,11 +135,6 @@ static void priv_read_copy(struct isp1760_hcd *priv, u32 *src,
printk(KERN_ERR "ERROR: buffer: %p len: %d\n", src, len);
return;
}
- isp1760_writel(offset, hcd->regs + HC_MEMORY_REG);
- /* XXX
- * 90nsec delay, the spec says something how this could be avoided.
- */
- mdelay(1);
while (len >= 4) {
*src = __raw_readl(dst);
@@ -987,8 +981,20 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
printk(KERN_ERR "qh is 0\n");
continue;
}
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs,
- atl_regs, sizeof(ptd));
+ isp1760_writel(atl_regs + ISP_BANK(0), usb_hcd->regs +
+ HC_MEMORY_REG);
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+ /*
+ * write bank1 address twice to ensure the 90ns delay (time
+ * between BANK0 write and the priv_read_copy() call is at
+ * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 109ns)
+ */
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+
+ priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs +
+ ISP_BANK(0), sizeof(ptd));
dw1 = le32_to_cpu(ptd.dw1);
dw2 = le32_to_cpu(ptd.dw2);
@@ -1091,7 +1097,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
case IN_PID:
priv_read_copy(priv,
priv->atl_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload, payload,
+ usb_hcd->regs + payload + ISP_BANK(1),
length);
case OUT_PID:
@@ -1122,11 +1128,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd)
} else if (usb_pipebulk(urb->pipe) && (length < qtd->length)) {
/* short BULK received */
- printk(KERN_ERR "short bulk, %d instead %zu\n", length,
- qtd->length);
if (urb->transfer_flags & URB_SHORT_NOT_OK) {
urb->status = -EREMOTEIO;
- printk(KERN_ERR "not okey\n");
+ isp1760_dbg(priv, "short bulk, %d instead %zu "
+ "with URB_SHORT_NOT_OK flag.\n",
+ length, qtd->length);
}
if (urb->status == -EINPROGRESS)
@@ -1206,8 +1212,20 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
continue;
}
- priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs,
- int_regs, sizeof(ptd));
+ isp1760_writel(int_regs + ISP_BANK(0), usb_hcd->regs +
+ HC_MEMORY_REG);
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+ /*
+ * write bank1 address twice to ensure the 90ns delay (time
+ * between BANK0 write and the priv_read_copy() call is at
+ * least 3*t_WHWL + 2*t_w11 = 3*25ns + 2*17ns = 92ns)
+ */
+ isp1760_writel(payload + ISP_BANK(1), usb_hcd->regs +
+ HC_MEMORY_REG);
+
+ priv_read_copy(priv, (u32 *)&ptd, usb_hcd->regs + int_regs +
+ ISP_BANK(0), sizeof(ptd));
dw1 = le32_to_cpu(ptd.dw1);
dw3 = le32_to_cpu(ptd.dw3);
check_int_err_status(le32_to_cpu(ptd.dw4));
@@ -1242,7 +1260,7 @@ static void do_intl_int(struct usb_hcd *usb_hcd)
case IN_PID:
priv_read_copy(priv,
priv->int_ints[queue_entry].data_buffer,
- usb_hcd->regs + payload , payload,
+ usb_hcd->regs + payload + ISP_BANK(1),
length);
case OUT_PID:
@@ -1615,8 +1633,7 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return -EPIPE;
}
- isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
- return 0;
+ return isp1760_prepare_enqueue(priv, urb, &qtd_list, mem_flags, pe);
}
static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h
index 6473dd86993c..4377277667d9 100644
--- a/drivers/usb/host/isp1760-hcd.h
+++ b/drivers/usb/host/isp1760-hcd.h
@@ -54,6 +54,8 @@ void deinit_kmem_cache(void);
#define BUFFER_MAP 0x7
#define HC_MEMORY_REG 0x33c
+#define ISP_BANK(x) ((x) << 16)
+
#define HC_PORT1_CTRL 0x374
#define PORT1_POWER (3 << 3)
#define PORT1_INIT1 (1 << 7)
@@ -119,6 +121,9 @@ struct inter_packet_info {
typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
struct isp1760_qtd *qtd);
+#define isp1760_dbg(priv, fmt, args...) \
+ dev_dbg(priv_to_hcd(priv)->self.controller, fmt, ##args)
+
#define isp1760_info(priv, fmt, args...) \
dev_info(priv_to_hcd(priv)->self.controller, fmt, ##args)
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index a5d8e550d897..4ed228a89943 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -15,12 +15,11 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <asm/mach-types.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <asm/gpio.h>
-#include <asm/arch/board.h>
-#include <asm/arch/cpu.h>
+#include <mach/board.h>
+#include <mach/cpu.h>
#ifndef CONFIG_ARCH_AT91
#error "CONFIG_ARCH_AT91 must be defined."
@@ -261,7 +260,6 @@ static const struct hc_driver ohci_at91_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index c0948008fe3d..2ac4e022a13f 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -163,7 +163,6 @@ static const struct hc_driver ohci_au1xxx_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 5adaf36e47d0..fb3055f084b5 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -28,8 +28,7 @@
#include <linux/signal.h>
#include <linux/platform_device.h>
-#include <asm/mach-types.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
static struct clk *usb_host_clock;
@@ -135,7 +134,6 @@ static struct hc_driver ohci_ep93xx_hc_driver = {
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 26bc47941d01..89901962cbfd 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -86,6 +86,21 @@ static void ohci_stop (struct usb_hcd *hcd);
static int ohci_restart (struct ohci_hcd *ohci);
#endif
+#ifdef CONFIG_PCI
+static void quirk_amd_pll(int state);
+static void amd_iso_dev_put(void);
+#else
+static inline void quirk_amd_pll(int state)
+{
+ return;
+}
+static inline void amd_iso_dev_put(void)
+{
+ return;
+}
+#endif
+
+
#include "ohci-hub.c"
#include "ohci-dbg.c"
#include "ohci-mem.c"
@@ -483,6 +498,9 @@ static int ohci_init (struct ohci_hcd *ohci)
int ret;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
+ if (distrust_firmware)
+ ohci->flags |= OHCI_QUIRK_HUB_POWER;
+
disable (ohci);
ohci->regs = hcd->regs;
@@ -689,7 +707,8 @@ retry:
temp |= RH_A_NOCP;
temp &= ~(RH_A_POTPGT | RH_A_NPS);
ohci_writel (ohci, temp, &ohci->regs->roothub.a);
- } else if ((ohci->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
+ } else if ((ohci->flags & OHCI_QUIRK_AMD756) ||
+ (ohci->flags & OHCI_QUIRK_HUB_POWER)) {
/* hub power always on; required for AMD-756 and some
* Mac platforms. ganged overcurrent reporting, if any.
*/
@@ -882,6 +901,8 @@ static void ohci_stop (struct usb_hcd *hcd)
if (quirk_zfmicro(ohci))
del_timer(&ohci->unlink_watchdog);
+ if (quirk_amdiso(ohci))
+ amd_iso_dev_put();
remove_debug_files (ohci);
ohci_mem_cleanup (ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index b56739221d11..7ea9a7b31155 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -36,18 +36,6 @@
/*-------------------------------------------------------------------------*/
-/* hcd->hub_irq_enable() */
-static void ohci_rhsc_enable (struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci (hcd);
-
- spin_lock_irq(&ohci->lock);
- if (!ohci->autostop)
- del_timer(&hcd->rh_timer); /* Prevent next poll */
- ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
- spin_unlock_irq(&ohci->lock);
-}
-
#define OHCI_SCHED_ENABLES \
(OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
@@ -374,18 +362,28 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected)
{
int poll_rh = 1;
+ int rhsc;
+ rhsc = ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC;
switch (ohci->hc_control & OHCI_CTRL_HCFS) {
case OHCI_USB_OPER:
- /* keep on polling until we know a device is connected
- * and RHSC is enabled */
+ /* If no status changes are pending, enable status-change
+ * interrupts.
+ */
+ if (!rhsc && !changed) {
+ rhsc = OHCI_INTR_RHSC;
+ ohci_writel(ohci, rhsc, &ohci->regs->intrenable);
+ }
+
+ /* Keep on polling until we know a device is connected
+ * and RHSC is enabled, or until we autostop.
+ */
if (!ohci->autostop) {
if (any_connected ||
!device_may_wakeup(&ohci_to_hcd(ohci)
->self.root_hub->dev)) {
- if (ohci_readl(ohci, &ohci->regs->intrenable) &
- OHCI_INTR_RHSC)
+ if (rhsc)
poll_rh = 0;
} else {
ohci->autostop = 1;
@@ -398,12 +396,13 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
ohci->autostop = 0;
ohci->next_statechange = jiffies +
STATECHANGE_DELAY;
- } else if (time_after_eq(jiffies,
+ } else if (rhsc && time_after_eq(jiffies,
ohci->next_statechange)
&& !ohci->ed_rm_list
&& !(ohci->hc_control &
OHCI_SCHED_ENABLES)) {
ohci_rh_suspend(ohci, 1);
+ poll_rh = 0;
}
}
break;
@@ -417,6 +416,12 @@ static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
else
usb_hcd_resume_root_hub(ohci_to_hcd(ohci));
} else {
+ if (!rhsc && (ohci->autostop ||
+ ohci_to_hcd(ohci)->self.root_hub->
+ do_remote_wakeup))
+ ohci_writel(ohci, OHCI_INTR_RHSC,
+ &ohci->regs->intrenable);
+
/* everything is idle, no need for polling */
poll_rh = 0;
}
@@ -438,12 +443,16 @@ static inline int ohci_rh_resume(struct ohci_hcd *ohci)
static int ohci_root_hub_state_changes(struct ohci_hcd *ohci, int changed,
int any_connected)
{
- int poll_rh = 1;
-
- /* keep on polling until RHSC is enabled */
+ /* If RHSC is enabled, don't poll */
if (ohci_readl(ohci, &ohci->regs->intrenable) & OHCI_INTR_RHSC)
- poll_rh = 0;
- return poll_rh;
+ return 0;
+
+ /* If no status changes are pending, enable status-change interrupts */
+ if (!changed) {
+ ohci_writel(ohci, OHCI_INTR_RHSC, &ohci->regs->intrenable);
+ return 0;
+ }
+ return 1;
}
#endif /* CONFIG_PM */
@@ -483,6 +492,13 @@ ohci_hub_status_data (struct usb_hcd *hcd, char *buf)
length++;
}
+ /* Some broken controllers never turn off RHCS in the interrupt
+ * status register. For their sake we won't re-enable RHSC
+ * interrupts if the flag is already set.
+ */
+ if (ohci_readl(ohci, &ohci->regs->intrstatus) & OHCI_INTR_RHSC)
+ changed = 1;
+
/* look at each port */
for (i = 0; i < ohci->num_ports; i++) {
u32 status = roothub_portstatus (ohci, i);
@@ -572,8 +588,6 @@ static int ohci_start_port_reset (struct usb_hcd *hcd, unsigned port)
return 0;
}
-static void start_hnp(struct ohci_hcd *ohci);
-
#else
#define ohci_start_port_reset NULL
@@ -760,7 +774,7 @@ static int ohci_hub_control (
#ifdef CONFIG_USB_OTG
if (hcd->self.otg_port == (wIndex + 1)
&& hcd->self.b_hnp_enable)
- start_hnp(ohci);
+ ohci->start_hnp(ohci);
else
#endif
ohci_writel (ohci, RH_PS_PSS,
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index 1ef5d482c145..de42283149c7 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/signal.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
extern int usb_disabled(void);
@@ -193,7 +193,6 @@ static const struct hc_driver ohci_lh7a404_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 6e5e5f81ac90..1eb64d08b60a 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -19,15 +19,15 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/mach-types.h>
-#include <asm/arch/mux.h>
-#include <asm/arch/irqs.h>
-#include <asm/arch/gpio.h>
-#include <asm/arch/fpga.h>
-#include <asm/arch/usb.h>
+#include <mach/mux.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+#include <mach/fpga.h>
+#include <mach/usb.h>
/* OMAP-1510 OHCI has its own MMU for DMA */
@@ -225,6 +225,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
dev_err(hcd->self.controller, "can't find transceiver\n");
return -ENODEV;
}
+ ohci->start_hnp = start_hnp;
}
#endif
@@ -260,7 +261,7 @@ static int ohci_omap_init(struct usb_hcd *hcd)
omap_cfg_reg(W4_USB_HIGHZ);
}
ohci_writel(ohci, rh, &ohci->regs->roothub.a);
- distrust_firmware = 0;
+ ohci->flags &= ~OHCI_QUIRK_HUB_POWER;
} else if (machine_is_nokia770()) {
/* We require a self-powered hub, which should have
* plenty of power. */
@@ -469,7 +470,6 @@ static const struct hc_driver ohci_omap_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 4696cc912e16..a9c2ae36c7ad 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -18,6 +18,28 @@
#error "This file is PCI bus glue. CONFIG_PCI must be defined."
#endif
+#include <linux/pci.h>
+#include <linux/io.h>
+
+
+/* constants used to work around PM-related transfer
+ * glitches in some AMD 700 series southbridges
+ */
+#define AB_REG_BAR 0xf0
+#define AB_INDX(addr) ((addr) + 0x00)
+#define AB_DATA(addr) ((addr) + 0x04)
+#define AX_INDXC 0X30
+#define AX_DATAC 0x34
+
+#define NB_PCIE_INDX_ADDR 0xe0
+#define NB_PCIE_INDX_DATA 0xe4
+#define PCIE_P_CNTL 0x10040
+#define BIF_NB 0x10002
+
+static struct pci_dev *amd_smbus_dev;
+static struct pci_dev *amd_hb_dev;
+static int amd_ohci_iso_count;
+
/*-------------------------------------------------------------------------*/
static int broken_suspend(struct usb_hcd *hcd)
@@ -143,6 +165,103 @@ static int ohci_quirk_nec(struct usb_hcd *hcd)
return 0;
}
+static int ohci_quirk_amd700(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ u8 rev = 0;
+
+ if (!amd_smbus_dev)
+ amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI,
+ PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
+ if (!amd_smbus_dev)
+ return 0;
+
+ pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+ if ((rev > 0x3b) || (rev < 0x30)) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ return 0;
+ }
+
+ amd_ohci_iso_count++;
+
+ if (!amd_hb_dev)
+ amd_hb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9600, NULL);
+
+ ohci->flags |= OHCI_QUIRK_AMD_ISO;
+ ohci_dbg(ohci, "enabled AMD ISO transfers quirk\n");
+
+ return 0;
+}
+
+/*
+ * The hardware normally enables the A-link power management feature, which
+ * lets the system lower the power consumption in idle states.
+ *
+ * Assume the system is configured to have USB 1.1 ISO transfers going
+ * to or from a USB device. Without this quirk, that stream may stutter
+ * or have breaks occasionally. For transfers going to speakers, this
+ * makes a very audible mess...
+ *
+ * That audio playback corruption is due to the audio stream getting
+ * interrupted occasionally when the link goes in lower power state
+ * This USB quirk prevents the link going into that lower power state
+ * during audio playback or other ISO operations.
+ */
+static void quirk_amd_pll(int on)
+{
+ u32 addr;
+ u32 val;
+ u32 bit = (on > 0) ? 1 : 0;
+
+ pci_read_config_dword(amd_smbus_dev, AB_REG_BAR, &addr);
+
+ /* BIT names/meanings are NDA-protected, sorry ... */
+
+ outl(AX_INDXC, AB_INDX(addr));
+ outl(0x40, AB_DATA(addr));
+ outl(AX_DATAC, AB_INDX(addr));
+ val = inl(AB_DATA(addr));
+ val &= ~((1 << 3) | (1 << 4) | (1 << 9));
+ val |= (bit << 3) | ((!bit) << 4) | ((!bit) << 9);
+ outl(val, AB_DATA(addr));
+
+ if (amd_hb_dev) {
+ addr = PCIE_P_CNTL;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+ pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
+ val |= bit | (bit << 3) | (bit << 12);
+ val |= ((!bit) << 4) | ((!bit) << 9);
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+
+ addr = BIF_NB;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_ADDR, addr);
+
+ pci_read_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, &val);
+ val &= ~(1 << 8);
+ val |= bit << 8;
+ pci_write_config_dword(amd_hb_dev, NB_PCIE_INDX_DATA, val);
+ }
+}
+
+static void amd_iso_dev_put(void)
+{
+ amd_ohci_iso_count--;
+ if (amd_ohci_iso_count == 0) {
+ if (amd_smbus_dev) {
+ pci_dev_put(amd_smbus_dev);
+ amd_smbus_dev = NULL;
+ }
+ if (amd_hb_dev) {
+ pci_dev_put(amd_hb_dev);
+ amd_hb_dev = NULL;
+ }
+ }
+
+}
+
/* List of quirks for OHCI */
static const struct pci_device_id ohci_pci_quirks[] = {
{
@@ -181,6 +300,19 @@ static const struct pci_device_id ohci_pci_quirks[] = {
PCI_DEVICE(PCI_VENDOR_ID_ITE, 0x8152),
.driver_data = (unsigned long) broken_suspend,
},
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4397),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4398),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
+ .driver_data = (unsigned long)ohci_quirk_amd700,
+ },
+
/* FIXME for some of the early AMD 760 southbridges, OHCI
* won't work at all. blacklist them.
*/
@@ -327,7 +459,6 @@ static const struct hc_driver ohci_pci_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 6ad8f2fc57b9..658a2a978c32 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -21,13 +21,12 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <asm/io.h>
-#include <asm/mach-types.h>
-#include <asm/arch/platform.h>
-#include <asm/arch/irqs.h>
-#include <asm/arch/gpio.h>
+#include <mach/platform.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
#define USB_CTRL IO_ADDRESS(PNX4008_PWRMAN_BASE + 0x64)
@@ -278,7 +277,6 @@ static const struct hc_driver ohci_pnx4008_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-pnx8550.c b/drivers/usb/host/ohci-pnx8550.c
index 605d59cba28e..28467e288a93 100644
--- a/drivers/usb/host/ohci-pnx8550.c
+++ b/drivers/usb/host/ohci-pnx8550.c
@@ -201,7 +201,6 @@ static const struct hc_driver ohci_pnx8550_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 91e6e101a4cc..7ac53264ead3 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -72,7 +72,6 @@ static const struct hc_driver ohci_ppc_of_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index 523c30125577..cd3398b675b2 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -172,7 +172,6 @@ static const struct hc_driver ohci_ppc_soc_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index 55c95647f008..2089d8a46c4b 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -68,7 +68,6 @@ static const struct hc_driver ps3_ohci_hc_driver = {
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
.start_port_reset = ohci_start_port_reset,
#if defined(CONFIG_PM)
.bus_suspend = ohci_bus_suspend,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 127b15799024..7f0f35c78185 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -24,11 +24,10 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <asm/mach-types.h>
-#include <asm/hardware.h>
-#include <asm/arch/pxa-regs.h>
-#include <asm/arch/pxa2xx-regs.h> /* FIXME: for PSSR */
-#include <asm/arch/ohci.h>
+#include <mach/hardware.h>
+#include <mach/pxa-regs.h>
+#include <mach/pxa2xx-regs.h> /* FIXME: for PSSR */
+#include <mach/ohci.h>
#define PXA_UHC_MAX_PORTNUM 3
@@ -299,7 +298,6 @@ static const struct hc_driver ohci_pxa27x_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 6a9b4c557953..c2d80f80448b 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -49,6 +49,9 @@ __acquires(ohci->lock)
switch (usb_pipetype (urb->pipe)) {
case PIPE_ISOCHRONOUS:
ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
+ if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+ && quirk_amdiso(ohci))
+ quirk_amd_pll(1);
break;
case PIPE_INTERRUPT:
ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -677,6 +680,9 @@ static void td_submit_urb (
data + urb->iso_frame_desc [cnt].offset,
urb->iso_frame_desc [cnt].length, urb, cnt);
}
+ if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
+ && quirk_amdiso(ohci))
+ quirk_amd_pll(0);
periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
&& ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
break;
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 3c7a740cfe0c..f46af7a718d4 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -22,8 +22,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <asm/hardware.h>
-#include <asm/arch/usb-control.h>
+#include <mach/hardware.h>
+#include <mach/usb-control.h>
#define valid_port(idx) ((idx) == 1 || (idx) == 2)
@@ -466,7 +466,6 @@ static const struct hc_driver ohci_s3c2410_hc_driver = {
*/
.hub_status_data = ohci_s3c2410_hub_status_data,
.hub_control = ohci_s3c2410_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 2e9dceb9bb99..e4bbe8e188e4 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -13,10 +13,10 @@
* This file is licenced under the GPL.
*/
-#include <asm/hardware.h>
+#include <mach/hardware.h>
#include <asm/mach-types.h>
-#include <asm/arch/assabet.h>
-#include <asm/arch/badge4.h>
+#include <mach/assabet.h>
+#include <mach/badge4.h>
#include <asm/hardware/sa1111.h>
#ifndef CONFIG_SA1111
@@ -231,7 +231,6 @@ static const struct hc_driver ohci_sa1111_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index e7ee607278fe..60f03cc7ec4f 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -68,7 +68,6 @@ static const struct hc_driver ohci_sh_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 21b164e4abeb..cff23637cfcc 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -75,7 +75,6 @@ static const struct hc_driver ohci_sm501_hc_driver = {
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci-ssb.c b/drivers/usb/host/ohci-ssb.c
index 3660c83d80af..23fd6a886bdd 100644
--- a/drivers/usb/host/ohci-ssb.c
+++ b/drivers/usb/host/ohci-ssb.c
@@ -81,7 +81,6 @@ static const struct hc_driver ssb_ohci_hc_driver = {
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
- .hub_irq_enable = ohci_rhsc_enable,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index dc544ddc7849..faf622eafce7 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -371,6 +371,7 @@ struct ohci_hcd {
* other external transceivers should be software-transparent
*/
struct otg_transceiver *transceiver;
+ void (*start_hnp)(struct ohci_hcd *ohci);
/*
* memory management for queue data structures
@@ -399,6 +400,8 @@ struct ohci_hcd {
#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
#define OHCI_QUIRK_NEC 0x40 /* lost interrupts */
#define OHCI_QUIRK_FRAME_NO 0x80 /* no big endian frame_no shift */
+#define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */
+#define OHCI_QUIRK_AMD_ISO 0x200 /* ISO transfers*/
// there are also chip quirks/bugs in init logic
struct work_struct nec_work; /* Worker for NEC quirk */
@@ -426,6 +429,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
{
return ohci->flags & OHCI_QUIRK_ZFMICRO;
}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+ return ohci->flags & OHCI_QUIRK_AMD_ISO;
+}
#else
static inline int quirk_nec(struct ohci_hcd *ohci)
{
@@ -435,6 +442,10 @@ static inline int quirk_zfmicro(struct ohci_hcd *ohci)
{
return 0;
}
+static inline int quirk_amdiso(struct ohci_hcd *ohci)
+{
+ return 0;
+}
#endif
/* convert between an hcd pointer and the corresponding ohci_hcd */
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index d5f02dddb120..ea7126f99cab 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -964,11 +964,34 @@ static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
disable_irq_nrdy(r8a66597, pipenum);
}
+static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
+{
+ mod_timer(&r8a66597->rh_timer,
+ jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
+}
+
+static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
+ int connect)
+{
+ struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
+
+ rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
+ rh->scount = R8A66597_MAX_SAMPLING;
+ if (connect)
+ rh->port |= 1 << USB_PORT_FEAT_CONNECTION;
+ else
+ rh->port &= ~(1 << USB_PORT_FEAT_CONNECTION);
+ rh->port |= 1 << USB_PORT_FEAT_C_CONNECTION;
+
+ r8a66597_root_hub_start_polling(r8a66597);
+}
+
/* this function must be called with interrupt disabled */
static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
u16 syssts)
{
if (syssts == SE0) {
+ r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
return;
}
@@ -1002,13 +1025,10 @@ static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597, int port)
{
struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
- r8a66597->root_hub[port].port &= ~(1 << USB_PORT_FEAT_CONNECTION);
- r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_C_CONNECTION);
-
disable_r8a66597_pipe_all(r8a66597, dev);
free_usb_address(r8a66597, dev);
- r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port));
+ start_root_hub_sampling(r8a66597, port, 0);
}
/* this function must be called with interrupt disabled */
@@ -1551,23 +1571,6 @@ static void irq_pipe_nrdy(struct r8a66597 *r8a66597)
}
}
-static void r8a66597_root_hub_start_polling(struct r8a66597 *r8a66597)
-{
- mod_timer(&r8a66597->rh_timer,
- jiffies + msecs_to_jiffies(R8A66597_RH_POLL_TIME));
-}
-
-static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port)
-{
- struct r8a66597_root_hub *rh = &r8a66597->root_hub[port];
-
- rh->old_syssts = r8a66597_read(r8a66597, get_syssts_reg(port)) & LNST;
- rh->scount = R8A66597_MAX_SAMPLING;
- r8a66597->root_hub[port].port |= (1 << USB_PORT_FEAT_CONNECTION)
- | (1 << USB_PORT_FEAT_C_CONNECTION);
- r8a66597_root_hub_start_polling(r8a66597);
-}
-
static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
@@ -1594,7 +1597,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
r8a66597_bclr(r8a66597, ATTCHE, INTENB2);
/* start usb bus sampling */
- start_root_hub_sampling(r8a66597, 1);
+ start_root_hub_sampling(r8a66597, 1, 1);
}
if (mask2 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS2);
@@ -1609,7 +1612,7 @@ static irqreturn_t r8a66597_irq(struct usb_hcd *hcd)
r8a66597_bclr(r8a66597, ATTCHE, INTENB1);
/* start usb bus sampling */
- start_root_hub_sampling(r8a66597, 0);
+ start_root_hub_sampling(r8a66597, 0, 1);
}
if (mask1 & DTCH) {
r8a66597_write(r8a66597, ~DTCH, INTSTS1);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 20ad3c48fcb2..228f2b070f2b 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2934,16 +2934,6 @@ static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num)
return 0;
}
-static void u132_hub_irq_enable(struct usb_hcd *hcd)
-{
- struct u132 *u132 = hcd_to_u132(hcd);
- if (u132->going > 1) {
- dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
- , u132->going);
- } else if (u132->going > 0)
- dev_err(&u132->platform_dev->dev, "device is being removed\n");
-}
-
#ifdef CONFIG_PM
static int u132_bus_suspend(struct usb_hcd *hcd)
@@ -2995,7 +2985,6 @@ static struct hc_driver u132_hc_driver = {
.bus_suspend = u132_bus_suspend,
.bus_resume = u132_bus_resume,
.start_port_reset = u132_start_port_reset,
- .hub_irq_enable = u132_hub_irq_enable,
};
/*
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index 001789c9a11a..4ea50e0abcbb 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -42,16 +42,6 @@ config USB_ADUTUX
To compile this driver as a module, choose M here. The module
will be called adutux.
-config USB_AUERSWALD
- tristate "USB Auerswald ISDN support"
- depends on USB
- help
- Say Y here if you want to connect an Auerswald USB ISDN Device
- to your computer's USB port.
-
- To compile this driver as a module, choose M here: the
- module will be called auerswald.
-
config USB_RIO500
tristate "USB Diamond Rio500 support"
depends on USB
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index aba091cb5ec0..45b4e12afb08 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_USB_ADUTUX) += adutux.o
obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
-obj-$(CONFIG_USB_AUERSWALD) += auerswald.o
obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
obj-$(CONFIG_USB_CYTHERM) += cytherm.o
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
deleted file mode 100644
index d2f61d5510e7..000000000000
--- a/drivers/usb/misc/auerswald.c
+++ /dev/null
@@ -1,2152 +0,0 @@
-/*****************************************************************************/
-/*
- * auerswald.c -- Auerswald PBX/System Telephone usb driver.
- *
- * Copyright (C) 2001 Wolfgang Mües (wolfgang@iksw-muees.de)
- *
- * Very much code of this driver is borrowed from dabusb.c (Deti Fliegl)
- * and from the USB Skeleton driver (Greg Kroah-Hartman). Thank you.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
- /*****************************************************************************/
-
-/* Standard Linux module include files */
-#include <asm/uaccess.h>
-#include <asm/byteorder.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/usb.h>
-#include <linux/mutex.h>
-
-/*-------------------------------------------------------------------*/
-/* Debug support */
-#ifdef DEBUG
-#define dump( adr, len) \
-do { \
- unsigned int u; \
- printk (KERN_DEBUG); \
- for (u = 0; u < len; u++) \
- printk (" %02X", adr[u] & 0xFF); \
- printk ("\n"); \
-} while (0)
-#else
-#define dump( adr, len)
-#endif
-
-/*-------------------------------------------------------------------*/
-/* Version Information */
-#define DRIVER_VERSION "0.9.11"
-#define DRIVER_AUTHOR "Wolfgang Mües <wolfgang@iksw-muees.de>"
-#define DRIVER_DESC "Auerswald PBX/System Telephone usb driver"
-
-/*-------------------------------------------------------------------*/
-/* Private declarations for Auerswald USB driver */
-
-/* Auerswald Vendor ID */
-#define ID_AUERSWALD 0x09BF
-
-#define AUER_MINOR_BASE 112 /* auerswald driver minor number */
-
-/* we can have up to this number of device plugged in at once */
-#define AUER_MAX_DEVICES 16
-
-
-/* Number of read buffers for each device */
-#define AU_RBUFFERS 10
-
-/* Number of chain elements for each control chain */
-#define AUCH_ELEMENTS 20
-
-/* Number of retries in communication */
-#define AU_RETRIES 10
-
-/*-------------------------------------------------------------------*/
-/* vendor specific protocol */
-/* Header Byte */
-#define AUH_INDIRMASK 0x80 /* mask for direct/indirect bit */
-#define AUH_DIRECT 0x00 /* data is for USB device */
-#define AUH_INDIRECT 0x80 /* USB device is relay */
-
-#define AUH_SPLITMASK 0x40 /* mask for split bit */
-#define AUH_UNSPLIT 0x00 /* data block is full-size */
-#define AUH_SPLIT 0x40 /* data block is part of a larger one,
- split-byte follows */
-
-#define AUH_TYPEMASK 0x3F /* mask for type of data transfer */
-#define AUH_TYPESIZE 0x40 /* different types */
-#define AUH_DCHANNEL 0x00 /* D channel data */
-#define AUH_B1CHANNEL 0x01 /* B1 channel transparent */
-#define AUH_B2CHANNEL 0x02 /* B2 channel transparent */
-/* 0x03..0x0F reserved for driver internal use */
-#define AUH_COMMAND 0x10 /* Command channel */
-#define AUH_BPROT 0x11 /* Configuration block protocol */
-#define AUH_DPROTANA 0x12 /* D channel protocol analyzer */
-#define AUH_TAPI 0x13 /* telephone api data (ATD) */
-/* 0x14..0x3F reserved for other protocols */
-#define AUH_UNASSIGNED 0xFF /* if char device has no assigned service */
-#define AUH_FIRSTUSERCH 0x11 /* first channel which is available for driver users */
-
-#define AUH_SIZE 1 /* Size of Header Byte */
-
-/* Split Byte. Only present if split bit in header byte set.*/
-#define AUS_STARTMASK 0x80 /* mask for first block of splitted frame */
-#define AUS_FIRST 0x80 /* first block */
-#define AUS_FOLLOW 0x00 /* following block */
-
-#define AUS_ENDMASK 0x40 /* mask for last block of splitted frame */
-#define AUS_END 0x40 /* last block */
-#define AUS_NOEND 0x00 /* not the last block */
-
-#define AUS_LENMASK 0x3F /* mask for block length information */
-
-/* Request types */
-#define AUT_RREQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Read Request */
-#define AUT_WREQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER) /* Write Request */
-
-/* Vendor Requests */
-#define AUV_GETINFO 0x00 /* GetDeviceInfo */
-#define AUV_WBLOCK 0x01 /* Write Block */
-#define AUV_RBLOCK 0x02 /* Read Block */
-#define AUV_CHANNELCTL 0x03 /* Channel Control */
-#define AUV_DUMMY 0x04 /* Dummy Out for retry */
-
-/* Device Info Types */
-#define AUDI_NUMBCH 0x0000 /* Number of supported B channels */
-#define AUDI_OUTFSIZE 0x0001 /* Size of OUT B channel fifos */
-#define AUDI_MBCTRANS 0x0002 /* max. Blocklength of control transfer */
-
-/* Interrupt endpoint definitions */
-#define AU_IRQENDP 1 /* Endpoint number */
-#define AU_IRQCMDID 16 /* Command-block ID */
-#define AU_BLOCKRDY 0 /* Command: Block data ready on ctl endpoint */
-#define AU_IRQMINSIZE 5 /* Nr. of bytes decoded in this driver */
-
-/* Device String Descriptors */
-#define AUSI_VENDOR 1 /* "Auerswald GmbH & Co. KG" */
-#define AUSI_DEVICE 2 /* Name of the Device */
-#define AUSI_SERIALNR 3 /* Serial Number */
-#define AUSI_MSN 4 /* "MSN ..." (first) Multiple Subscriber Number */
-
-#define AUSI_DLEN 100 /* Max. Length of Device Description */
-
-#define AUV_RETRY 0x101 /* First Firmware version which can do control retries */
-
-/*-------------------------------------------------------------------*/
-/* External data structures / Interface */
-typedef struct
-{
- char __user *buf; /* return buffer for string contents */
- unsigned int bsize; /* size of return buffer */
-} audevinfo_t,*paudevinfo_t;
-
-/* IO controls */
-#define IOCTL_AU_SLEN _IOR( 'U', 0xF0, int) /* return the max. string descriptor length */
-#define IOCTL_AU_DEVINFO _IOWR('U', 0xF1, audevinfo_t) /* get name of a specific device */
-#define IOCTL_AU_SERVREQ _IOW( 'U', 0xF2, int) /* request a service channel */
-#define IOCTL_AU_BUFLEN _IOR( 'U', 0xF3, int) /* return the max. buffer length for the device */
-#define IOCTL_AU_RXAVAIL _IOR( 'U', 0xF4, int) /* return != 0 if Receive Data available */
-#define IOCTL_AU_CONNECT _IOR( 'U', 0xF5, int) /* return != 0 if connected to a service channel */
-#define IOCTL_AU_TXREADY _IOR( 'U', 0xF6, int) /* return != 0 if Transmitt channel ready to send */
-/* 'U' 0xF7..0xFF reseved */
-
-/*-------------------------------------------------------------------*/
-/* Internal data structures */
-
-/* ..................................................................*/
-/* urb chain element */
-struct auerchain; /* forward for circular reference */
-typedef struct
-{
- struct auerchain *chain; /* pointer to the chain to which this element belongs */
- struct urb * urbp; /* pointer to attached urb */
- void *context; /* saved URB context */
- usb_complete_t complete; /* saved URB completion function */
- struct list_head list; /* to include element into a list */
-} auerchainelement_t,*pauerchainelement_t;
-
-/* urb chain */
-typedef struct auerchain
-{
- pauerchainelement_t active; /* element which is submitted to urb */
- spinlock_t lock; /* protection agains interrupts */
- struct list_head waiting_list; /* list of waiting elements */
- struct list_head free_list; /* list of available elements */
-} auerchain_t,*pauerchain_t;
-
-/* urb blocking completion helper struct */
-typedef struct
-{
- wait_queue_head_t wqh; /* wait for completion */
- unsigned int done; /* completion flag */
-} auerchain_chs_t,*pauerchain_chs_t;
-
-/* ...................................................................*/
-/* buffer element */
-struct auerbufctl; /* forward */
-typedef struct
-{
- char *bufp; /* reference to allocated data buffer */
- unsigned int len; /* number of characters in data buffer */
- unsigned int retries; /* for urb retries */
- struct usb_ctrlrequest *dr; /* for setup data in control messages */
- struct urb * urbp; /* USB urb */
- struct auerbufctl *list; /* pointer to list */
- struct list_head buff_list; /* reference to next buffer in list */
-} auerbuf_t,*pauerbuf_t;
-
-/* buffer list control block */
-typedef struct auerbufctl
-{
- spinlock_t lock; /* protection in interrupt */
- struct list_head free_buff_list;/* free buffers */
- struct list_head rec_buff_list; /* buffers with receive data */
-} auerbufctl_t,*pauerbufctl_t;
-
-/* ...................................................................*/
-/* service context */
-struct auerscon; /* forward */
-typedef void (*auer_dispatch_t)(struct auerscon*, pauerbuf_t);
-typedef void (*auer_disconn_t) (struct auerscon*);
-typedef struct auerscon
-{
- unsigned int id; /* protocol service id AUH_xxxx */
- auer_dispatch_t dispatch; /* dispatch read buffer */
- auer_disconn_t disconnect; /* disconnect from device, wake up all char readers */
-} auerscon_t,*pauerscon_t;
-
-/* ...................................................................*/
-/* USB device context */
-typedef struct
-{
- struct mutex mutex; /* protection in user context */
- char name[20]; /* name of the /dev/usb entry */
- unsigned int dtindex; /* index in the device table */
- struct usb_device * usbdev; /* USB device handle */
- int open_count; /* count the number of open character channels */
- char dev_desc[AUSI_DLEN];/* for storing a textual description */
- unsigned int maxControlLength; /* max. Length of control paket (without header) */
- struct urb * inturbp; /* interrupt urb */
- char * intbufp; /* data buffer for interrupt urb */
- unsigned int irqsize; /* size of interrupt endpoint 1 */
- struct auerchain controlchain; /* for chaining of control messages */
- auerbufctl_t bufctl; /* Buffer control for control transfers */
- pauerscon_t services[AUH_TYPESIZE];/* context pointers for each service */
- unsigned int version; /* Version of the device */
- wait_queue_head_t bufferwait; /* wait for a control buffer */
-} auerswald_t,*pauerswald_t;
-
-/* ................................................................... */
-/* character device context */
-typedef struct
-{
- struct mutex mutex; /* protection in user context */
- pauerswald_t auerdev; /* context pointer of assigned device */
- auerbufctl_t bufctl; /* controls the buffer chain */
- auerscon_t scontext; /* service context */
- wait_queue_head_t readwait; /* for synchronous reading */
- struct mutex readmutex; /* protection against multiple reads */
- pauerbuf_t readbuf; /* buffer held for partial reading */
- unsigned int readoffset; /* current offset in readbuf */
- unsigned int removed; /* is != 0 if device is removed */
-} auerchar_t,*pauerchar_t;
-
-
-/*-------------------------------------------------------------------*/
-/* Forwards */
-static void auerswald_ctrlread_complete (struct urb * urb);
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp);
-static struct usb_driver auerswald_driver;
-
-
-/*-------------------------------------------------------------------*/
-/* USB chain helper functions */
-/* -------------------------- */
-
-/* completion function for chained urbs */
-static void auerchain_complete (struct urb * urb)
-{
- unsigned long flags;
- int result;
-
- /* get pointer to element and to chain */
- pauerchainelement_t acep = urb->context;
- pauerchain_t acp = acep->chain;
-
- /* restore original entries in urb */
- urb->context = acep->context;
- urb->complete = acep->complete;
-
- dbg ("auerchain_complete called");
-
- /* call original completion function
- NOTE: this function may lead to more urbs submitted into the chain.
- (no chain lock at calling complete()!)
- acp->active != NULL is protecting us against recursion.*/
- urb->complete (urb);
-
- /* detach element from chain data structure */
- spin_lock_irqsave (&acp->lock, flags);
- if (acp->active != acep) /* paranoia debug check */
- dbg ("auerchain_complete: completion on non-active element called!");
- else
- acp->active = NULL;
-
- /* add the used chain element to the list of free elements */
- list_add_tail (&acep->list, &acp->free_list);
- acep = NULL;
-
- /* is there a new element waiting in the chain? */
- if (!acp->active && !list_empty (&acp->waiting_list)) {
- /* yes: get the entry */
- struct list_head *tmp = acp->waiting_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- acp->active = acep;
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* submit the new urb */
- if (acep) {
- urb = acep->urbp;
- dbg ("auerchain_complete: submitting next urb from chain");
- urb->status = 0; /* needed! */
- result = usb_submit_urb(urb, GFP_ATOMIC);
-
- /* check for submit errors */
- if (result) {
- urb->status = result;
- dbg("auerchain_complete: usb_submit_urb with error code %d", result);
- /* and do error handling via *this* completion function (recursive) */
- auerchain_complete( urb);
- }
- } else {
- /* simple return without submitting a new urb.
- The empty chain is detected with acp->active == NULL. */
- };
-}
-
-
-/* submit function for chained urbs
- this function may be called from completion context or from user space!
- early = 1 -> submit in front of chain
-*/
-static int auerchain_submit_urb_list (pauerchain_t acp, struct urb * urb, int early)
-{
- int result;
- unsigned long flags;
- pauerchainelement_t acep = NULL;
-
- dbg ("auerchain_submit_urb called");
-
- /* try to get a chain element */
- spin_lock_irqsave (&acp->lock, flags);
- if (!list_empty (&acp->free_list)) {
- /* yes: get the entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* if no chain element available: return with error */
- if (!acep) {
- return -ENOMEM;
- }
-
- /* fill in the new chain element values */
- acep->chain = acp;
- acep->context = urb->context;
- acep->complete = urb->complete;
- acep->urbp = urb;
- INIT_LIST_HEAD (&acep->list);
-
- /* modify urb */
- urb->context = acep;
- urb->complete = auerchain_complete;
- urb->status = -EINPROGRESS; /* usb_submit_urb does this, too */
-
- /* add element to chain - or start it immediately */
- spin_lock_irqsave (&acp->lock, flags);
- if (acp->active) {
- /* there is traffic in the chain, simple add element to chain */
- if (early) {
- dbg ("adding new urb to head of chain");
- list_add (&acep->list, &acp->waiting_list);
- } else {
- dbg ("adding new urb to end of chain");
- list_add_tail (&acep->list, &acp->waiting_list);
- }
- acep = NULL;
- } else {
- /* the chain is empty. Prepare restart */
- acp->active = acep;
- }
- /* Spin has to be removed before usb_submit_urb! */
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* Submit urb if immediate restart */
- if (acep) {
- dbg("submitting urb immediate");
- urb->status = 0; /* needed! */
- result = usb_submit_urb(urb, GFP_ATOMIC);
- /* check for submit errors */
- if (result) {
- urb->status = result;
- dbg("auerchain_submit_urb: usb_submit_urb with error code %d", result);
- /* and do error handling via completion function */
- auerchain_complete( urb);
- }
- }
-
- return 0;
-}
-
-/* submit function for chained urbs
- this function may be called from completion context or from user space!
-*/
-static int auerchain_submit_urb (pauerchain_t acp, struct urb * urb)
-{
- return auerchain_submit_urb_list (acp, urb, 0);
-}
-
-/* cancel an urb which is submitted to the chain
- the result is 0 if the urb is cancelled, or -EINPROGRESS if
- the function is successfully started.
-*/
-static int auerchain_unlink_urb (pauerchain_t acp, struct urb * urb)
-{
- unsigned long flags;
- struct urb * urbp;
- pauerchainelement_t acep;
- struct list_head *tmp;
-
- dbg ("auerchain_unlink_urb called");
-
- /* search the chain of waiting elements */
- spin_lock_irqsave (&acp->lock, flags);
- list_for_each (tmp, &acp->waiting_list) {
- acep = list_entry (tmp, auerchainelement_t, list);
- if (acep->urbp == urb) {
- list_del (tmp);
- urb->context = acep->context;
- urb->complete = acep->complete;
- list_add_tail (&acep->list, &acp->free_list);
- spin_unlock_irqrestore (&acp->lock, flags);
- dbg ("unlink waiting urb");
- urb->status = -ENOENT;
- urb->complete (urb);
- return 0;
- }
- }
- /* not found. */
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* get the active urb */
- acep = acp->active;
- if (acep) {
- urbp = acep->urbp;
-
- /* check if we have to cancel the active urb */
- if (urbp == urb) {
- /* note that there is a race condition between the check above
- and the unlink() call because of no lock. This race is harmless,
- because the usb module will detect the unlink() after completion.
- We can't use the acp->lock here because the completion function
- wants to grab it.
- */
- dbg ("unlink active urb");
- return usb_unlink_urb (urbp);
- }
- }
-
- /* not found anyway
- ... is some kind of success
- */
- dbg ("urb to unlink not found in chain");
- return 0;
-}
-
-/* cancel all urbs which are in the chain.
- this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_unlink_all (pauerchain_t acp)
-{
- unsigned long flags;
- struct urb * urbp;
- pauerchainelement_t acep;
-
- dbg ("auerchain_unlink_all called");
-
- /* clear the chain of waiting elements */
- spin_lock_irqsave (&acp->lock, flags);
- while (!list_empty (&acp->waiting_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->waiting_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- urbp = acep->urbp;
- urbp->context = acep->context;
- urbp->complete = acep->complete;
- list_add_tail (&acep->list, &acp->free_list);
- spin_unlock_irqrestore (&acp->lock, flags);
- dbg ("unlink waiting urb");
- urbp->status = -ENOENT;
- urbp->complete (urbp);
- spin_lock_irqsave (&acp->lock, flags);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-
- /* clear the active urb */
- acep = acp->active;
- if (acep) {
- urbp = acep->urbp;
- dbg ("unlink active urb");
- usb_kill_urb (urbp);
- }
-}
-
-
-/* free the chain.
- this function must not be called from interrupt or completion handler.
-*/
-static void auerchain_free (pauerchain_t acp)
-{
- unsigned long flags;
- pauerchainelement_t acep;
-
- dbg ("auerchain_free called");
-
- /* first, cancel all pending urbs */
- auerchain_unlink_all (acp);
-
- /* free the elements */
- spin_lock_irqsave (&acp->lock, flags);
- while (!list_empty (&acp->free_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- spin_unlock_irqrestore (&acp->lock, flags);
- acep = list_entry (tmp, auerchainelement_t, list);
- kfree (acep);
- spin_lock_irqsave (&acp->lock, flags);
- }
- spin_unlock_irqrestore (&acp->lock, flags);
-}
-
-
-/* Init the chain control structure */
-static void auerchain_init (pauerchain_t acp)
-{
- /* init the chain data structure */
- acp->active = NULL;
- spin_lock_init (&acp->lock);
- INIT_LIST_HEAD (&acp->waiting_list);
- INIT_LIST_HEAD (&acp->free_list);
-}
-
-/* setup a chain.
- It is assumed that there is no concurrency while setting up the chain
- requirement: auerchain_init()
-*/
-static int auerchain_setup (pauerchain_t acp, unsigned int numElements)
-{
- pauerchainelement_t acep;
-
- dbg ("auerchain_setup called with %d elements", numElements);
-
- /* fill the list of free elements */
- for (;numElements; numElements--) {
- acep = kzalloc(sizeof(auerchainelement_t), GFP_KERNEL);
- if (!acep)
- goto ac_fail;
- INIT_LIST_HEAD (&acep->list);
- list_add_tail (&acep->list, &acp->free_list);
- }
- return 0;
-
-ac_fail:/* free the elements */
- while (!list_empty (&acp->free_list)) {
- /* get the next entry */
- struct list_head *tmp = acp->free_list.next;
- list_del (tmp);
- acep = list_entry (tmp, auerchainelement_t, list);
- kfree (acep);
- }
- return -ENOMEM;
-}
-
-
-/* completion handler for synchronous chained URBs */
-static void auerchain_blocking_completion (struct urb *urb)
-{
- pauerchain_chs_t pchs = urb->context;
- pchs->done = 1;
- wmb();
- wake_up (&pchs->wqh);
-}
-
-
-/* Starts chained urb and waits for completion or timeout */
-static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int timeout, int* actual_length)
-{
- auerchain_chs_t chs;
- int status;
-
- dbg ("auerchain_start_wait_urb called");
- init_waitqueue_head (&chs.wqh);
- chs.done = 0;
-
- urb->context = &chs;
- status = auerchain_submit_urb (acp, urb);
- if (status)
- /* something went wrong */
- return status;
-
- timeout = wait_event_timeout(chs.wqh, chs.done, timeout);
-
- if (!timeout && !chs.done) {
- if (urb->status != -EINPROGRESS) { /* No callback?!! */
- dbg ("auerchain_start_wait_urb: raced timeout");
- status = urb->status;
- } else {
- dbg ("auerchain_start_wait_urb: timeout");
- auerchain_unlink_urb (acp, urb); /* remove urb safely */
- status = -ETIMEDOUT;
- }
- } else
- status = urb->status;
-
- if (status >= 0)
- *actual_length = urb->actual_length;
-
- return status;
-}
-
-
-/* auerchain_control_msg - Builds a control urb, sends it off and waits for completion
- acp: pointer to the auerchain
- dev: pointer to the usb device to send the message to
- pipe: endpoint "pipe" to send the message to
- request: USB message request value
- requesttype: USB message request type value
- value: USB message value
- index: USB message index value
- data: pointer to the data to send
- size: length in bytes of the data to send
- timeout: time to wait for the message to complete before timing out (if 0 the wait is forever)
-
- This function sends a simple control message to a specified endpoint
- and waits for the message to complete, or timeout.
-
- If successful, it returns the transferred length, otherwise a negative error number.
-
- Don't use this function from within an interrupt context, like a
- bottom half handler. If you need an asynchronous message, or need to send
- a message from within interrupt context, use auerchain_submit_urb()
-*/
-static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype,
- __u16 value, __u16 index, void *data, __u16 size, int timeout)
-{
- int ret;
- struct usb_ctrlrequest *dr;
- struct urb *urb;
- int uninitialized_var(length);
-
- dbg ("auerchain_control_msg");
- dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
- urb = usb_alloc_urb (0, GFP_KERNEL);
- if (!urb) {
- kfree (dr);
- return -ENOMEM;
- }
-
- dr->bRequestType = requesttype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16 (value);
- dr->wIndex = cpu_to_le16 (index);
- dr->wLength = cpu_to_le16 (size);
-
- usb_fill_control_urb (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
- auerchain_blocking_completion, NULL);
- ret = auerchain_start_wait_urb (acp, urb, timeout, &length);
-
- usb_free_urb (urb);
- kfree (dr);
-
- if (ret < 0)
- return ret;
- else
- return length;
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Buffer List helper functions */
-
-/* free a single auerbuf */
-static void auerbuf_free (pauerbuf_t bp)
-{
- kfree(bp->bufp);
- kfree(bp->dr);
- usb_free_urb(bp->urbp);
- kfree(bp);
-}
-
-/* free the buffers from an auerbuf list */
-static void auerbuf_free_list (struct list_head *q)
-{
- struct list_head *tmp;
- struct list_head *p;
- pauerbuf_t bp;
-
- dbg ("auerbuf_free_list");
- for (p = q->next; p != q;) {
- bp = list_entry (p, auerbuf_t, buff_list);
- tmp = p->next;
- list_del (p);
- p = tmp;
- auerbuf_free (bp);
- }
-}
-
-/* init the members of a list control block */
-static void auerbuf_init (pauerbufctl_t bcp)
-{
- dbg ("auerbuf_init");
- spin_lock_init (&bcp->lock);
- INIT_LIST_HEAD (&bcp->free_buff_list);
- INIT_LIST_HEAD (&bcp->rec_buff_list);
-}
-
-/* free all buffers from an auerbuf chain */
-static void auerbuf_free_buffers (pauerbufctl_t bcp)
-{
- unsigned long flags;
- dbg ("auerbuf_free_buffers");
-
- spin_lock_irqsave (&bcp->lock, flags);
-
- auerbuf_free_list (&bcp->free_buff_list);
- auerbuf_free_list (&bcp->rec_buff_list);
-
- spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-/* setup a list of buffers */
-/* requirement: auerbuf_init() */
-static int auerbuf_setup (pauerbufctl_t bcp, unsigned int numElements, unsigned int bufsize)
-{
- pauerbuf_t bep = NULL;
-
- dbg ("auerbuf_setup called with %d elements of %d bytes", numElements, bufsize);
-
- /* fill the list of free elements */
- for (;numElements; numElements--) {
- bep = kzalloc(sizeof(auerbuf_t), GFP_KERNEL);
- if (!bep)
- goto bl_fail;
- bep->list = bcp;
- INIT_LIST_HEAD (&bep->buff_list);
- bep->bufp = kmalloc (bufsize, GFP_KERNEL);
- if (!bep->bufp)
- goto bl_fail;
- bep->dr = kmalloc(sizeof (struct usb_ctrlrequest), GFP_KERNEL);
- if (!bep->dr)
- goto bl_fail;
- bep->urbp = usb_alloc_urb (0, GFP_KERNEL);
- if (!bep->urbp)
- goto bl_fail;
- list_add_tail (&bep->buff_list, &bcp->free_buff_list);
- }
- return 0;
-
-bl_fail:/* not enough memory. Free allocated elements */
- dbg ("auerbuf_setup: no more memory");
- auerbuf_free(bep);
- auerbuf_free_buffers (bcp);
- return -ENOMEM;
-}
-
-/* insert a used buffer into the free list */
-static void auerbuf_releasebuf( pauerbuf_t bp)
-{
- unsigned long flags;
- pauerbufctl_t bcp = bp->list;
- bp->retries = 0;
-
- dbg ("auerbuf_releasebuf called");
- spin_lock_irqsave (&bcp->lock, flags);
- list_add_tail (&bp->buff_list, &bcp->free_buff_list);
- spin_unlock_irqrestore (&bcp->lock, flags);
-}
-
-
-/*-------------------------------------------------------------------*/
-/* Completion handlers */
-
-/* Values of urb->status or results of usb_submit_urb():
-0 Initial, OK
--EINPROGRESS during submission until end
--ENOENT if urb is unlinked
--ETIME Device did not respond
--ENOMEM Memory Overflow
--ENODEV Specified USB-device or bus doesn't exist
--ENXIO URB already queued
--EINVAL a) Invalid transfer type specified (or not supported)
- b) Invalid interrupt interval (0n256)
--EAGAIN a) Specified ISO start frame too early
- b) (using ISO-ASAP) Too much scheduled for the future wait some time and try again.
--EFBIG Too much ISO frames requested (currently uhci900)
--EPIPE Specified pipe-handle/Endpoint is already stalled
--EMSGSIZE Endpoint message size is zero, do interface/alternate setting
--EPROTO a) Bitstuff error
- b) Unknown USB error
--EILSEQ CRC mismatch
--ENOSR Buffer error
--EREMOTEIO Short packet detected
--EXDEV ISO transfer only partially completed look at individual frame status for details
--EINVAL ISO madness, if this happens: Log off and go home
--EOVERFLOW babble
-*/
-
-/* check if a status code allows a retry */
-static int auerswald_status_retry (int status)
-{
- switch (status) {
- case 0:
- case -ETIME:
- case -EOVERFLOW:
- case -EAGAIN:
- case -EPIPE:
- case -EPROTO:
- case -EILSEQ:
- case -ENOSR:
- case -EREMOTEIO:
- return 1; /* do a retry */
- }
- return 0; /* no retry possible */
-}
-
-/* Completion of asynchronous write block */
-static void auerchar_ctrlwrite_complete (struct urb * urb)
-{
- pauerbuf_t bp = urb->context;
- pauerswald_t cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
- dbg ("auerchar_ctrlwrite_complete called");
-
- /* reuse the buffer */
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-}
-
-/* Completion handler for dummy retry packet */
-static void auerswald_ctrlread_wretcomplete (struct urb * urb)
-{
- pauerbuf_t bp = urb->context;
- pauerswald_t cp;
- int ret;
- int status = urb->status;
-
- dbg ("auerswald_ctrlread_wretcomplete called");
- dbg ("complete with status: %d", status);
- cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
- /* check if it is possible to advance */
- if (!auerswald_status_retry(status) || !cp->usbdev) {
- /* reuse the buffer */
- err ("control dummy: transmission error %d, can not retry", status);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- return;
- }
-
- /* fill the control message */
- bp->dr->bRequestType = AUT_RREQ;
- bp->dr->bRequest = AUV_RBLOCK;
- bp->dr->wLength = bp->dr->wValue; /* temporary stored */
- bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */
- /* bp->dr->index = channel id; remains */
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
- auerswald_ctrlread_complete,bp);
-
- /* submit the control msg as next paket */
- ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
- if (ret) {
- dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_complete (bp->urbp);
- }
-}
-
-/* completion handler for receiving of control messages */
-static void auerswald_ctrlread_complete (struct urb * urb)
-{
- unsigned int serviceid;
- pauerswald_t cp;
- pauerscon_t scp;
- pauerbuf_t bp = urb->context;
- int status = urb->status;
- int ret;
-
- dbg ("auerswald_ctrlread_complete called");
-
- cp = ((pauerswald_t)((char *)(bp->list)-(unsigned long)(&((pauerswald_t)0)->bufctl)));
-
- /* check if there is valid data in this urb */
- if (status) {
- dbg ("complete with non-zero status: %d", status);
- /* should we do a retry? */
- if (!auerswald_status_retry(status)
- || !cp->usbdev
- || (cp->version < AUV_RETRY)
- || (bp->retries >= AU_RETRIES)) {
- /* reuse the buffer */
- err ("control read: transmission error %d, can not retry", status);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- return;
- }
- bp->retries++;
- dbg ("Retry count = %d", bp->retries);
- /* send a long dummy control-write-message to allow device firmware to react */
- bp->dr->bRequestType = AUT_WREQ;
- bp->dr->bRequest = AUV_DUMMY;
- bp->dr->wValue = bp->dr->wLength; /* temporary storage */
- // bp->dr->wIndex channel ID remains
- bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, 32,
- auerswald_ctrlread_wretcomplete,bp);
-
- /* submit the control msg as next paket */
- ret = auerchain_submit_urb_list (&cp->controlchain, bp->urbp, 1);
- if (ret) {
- dbg ("auerswald_ctrlread_complete: nonzero result of auerchain_submit_urb_list %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_wretcomplete (bp->urbp);
- }
- return;
- }
-
- /* get the actual bytecount (incl. headerbyte) */
- bp->len = urb->actual_length;
- serviceid = bp->bufp[0] & AUH_TYPEMASK;
- dbg ("Paket with serviceid %d and %d bytes received", serviceid, bp->len);
-
- /* dispatch the paket */
- scp = cp->services[serviceid];
- if (scp) {
- /* look, Ma, a listener! */
- scp->dispatch (scp, bp);
- }
-
- /* release the paket */
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-}
-
-/*-------------------------------------------------------------------*/
-/* Handling of Interrupt Endpoint */
-/* This interrupt Endpoint is used to inform the host about waiting
- messages from the USB device.
-*/
-/* int completion handler. */
-static void auerswald_int_complete (struct urb * urb)
-{
- unsigned long flags;
- unsigned int channelid;
- unsigned int bytecount;
- int ret;
- int status = urb->status;
- pauerbuf_t bp = NULL;
- pauerswald_t cp = urb->context;
-
- dbg ("%s called", __func__);
-
- switch (status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dbg("%s - urb shutting down with status: %d", __func__, status);
- return;
- default:
- dbg("%s - nonzero urb status received: %d", __func__, status);
- goto exit;
- }
-
- /* check if all needed data was received */
- if (urb->actual_length < AU_IRQMINSIZE) {
- dbg ("invalid data length received: %d bytes", urb->actual_length);
- goto exit;
- }
-
- /* check the command code */
- if (cp->intbufp[0] != AU_IRQCMDID) {
- dbg ("invalid command received: %d", cp->intbufp[0]);
- goto exit;
- }
-
- /* check the command type */
- if (cp->intbufp[1] != AU_BLOCKRDY) {
- dbg ("invalid command type received: %d", cp->intbufp[1]);
- goto exit;
- }
-
- /* now extract the information */
- channelid = cp->intbufp[2];
- bytecount = (unsigned char)cp->intbufp[3];
- bytecount |= (unsigned char)cp->intbufp[4] << 8;
-
- /* check the channel id */
- if (channelid >= AUH_TYPESIZE) {
- dbg ("invalid channel id received: %d", channelid);
- goto exit;
- }
-
- /* check the byte count */
- if (bytecount > (cp->maxControlLength+AUH_SIZE)) {
- dbg ("invalid byte count received: %d", bytecount);
- goto exit;
- }
- dbg ("Service Channel = %d", channelid);
- dbg ("Byte Count = %d", bytecount);
-
- /* get a buffer for the next data paket */
- spin_lock_irqsave (&cp->bufctl.lock, flags);
- if (!list_empty (&cp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = cp->bufctl.free_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
- /* if no buffer available: skip it */
- if (!bp) {
- dbg ("auerswald_int_complete: no data buffer available");
- /* can we do something more?
- This is a big problem: if this int packet is ignored, the
- device will wait forever and not signal any more data.
- The only real solution is: having enough buffers!
- Or perhaps temporary disabling the int endpoint?
- */
- goto exit;
- }
-
- /* fill the control message */
- bp->dr->bRequestType = AUT_RREQ;
- bp->dr->bRequest = AUV_RBLOCK;
- bp->dr->wValue = cpu_to_le16 (0);
- bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
- bp->dr->wLength = cpu_to_le16 (bytecount);
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, bytecount,
- auerswald_ctrlread_complete,bp);
-
- /* submit the control msg */
- ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
- if (ret) {
- dbg ("auerswald_int_complete: nonzero result of auerchain_submit_urb %d", ret);
- bp->urbp->status = ret;
- auerswald_ctrlread_complete( bp->urbp);
- /* here applies the same problem as above: device locking! */
- }
-exit:
- ret = usb_submit_urb (urb, GFP_ATOMIC);
- if (ret)
- err ("%s - usb_submit_urb failed with result %d",
- __func__, ret);
-}
-
-/* int memory deallocation
- NOTE: no mutex please!
-*/
-static void auerswald_int_free (pauerswald_t cp)
-{
- if (cp->inturbp) {
- usb_free_urb(cp->inturbp);
- cp->inturbp = NULL;
- }
- kfree(cp->intbufp);
- cp->intbufp = NULL;
-}
-
-/* This function is called to activate the interrupt
- endpoint. This function returns 0 if successful or an error code.
- NOTE: no mutex please!
-*/
-static int auerswald_int_open (pauerswald_t cp)
-{
- int ret;
- struct usb_host_endpoint *ep;
- int irqsize;
- dbg ("auerswald_int_open");
-
- ep = cp->usbdev->ep_in[AU_IRQENDP];
- if (!ep) {
- ret = -EFAULT;
- goto intoend;
- }
- irqsize = le16_to_cpu(ep->desc.wMaxPacketSize);
- cp->irqsize = irqsize;
-
- /* allocate the urb and data buffer */
- if (!cp->inturbp) {
- cp->inturbp = usb_alloc_urb (0, GFP_KERNEL);
- if (!cp->inturbp) {
- ret = -ENOMEM;
- goto intoend;
- }
- }
- if (!cp->intbufp) {
- cp->intbufp = kmalloc (irqsize, GFP_KERNEL);
- if (!cp->intbufp) {
- ret = -ENOMEM;
- goto intoend;
- }
- }
- /* setup urb */
- usb_fill_int_urb (cp->inturbp, cp->usbdev,
- usb_rcvintpipe (cp->usbdev,AU_IRQENDP), cp->intbufp,
- irqsize, auerswald_int_complete, cp, ep->desc.bInterval);
- /* start the urb */
- cp->inturbp->status = 0; /* needed! */
- ret = usb_submit_urb (cp->inturbp, GFP_KERNEL);
-
-intoend:
- if (ret < 0) {
- /* activation of interrupt endpoint has failed. Now clean up. */
- dbg ("auerswald_int_open: activation of int endpoint failed");
-
- /* deallocate memory */
- auerswald_int_free (cp);
- }
- return ret;
-}
-
-/* This function is called to deactivate the interrupt
- endpoint. This function returns 0 if successful or an error code.
- NOTE: no mutex please!
-*/
-static void auerswald_int_release (pauerswald_t cp)
-{
- dbg ("auerswald_int_release");
-
- /* stop the int endpoint */
- usb_kill_urb (cp->inturbp);
-
- /* deallocate memory */
- auerswald_int_free (cp);
-}
-
-/* --------------------------------------------------------------------- */
-/* Helper functions */
-
-/* wake up waiting readers */
-static void auerchar_disconnect (pauerscon_t scp)
-{
- pauerchar_t ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
- dbg ("auerchar_disconnect called");
- ccp->removed = 1;
- wake_up (&ccp->readwait);
-}
-
-
-/* dispatch a read paket to a waiting character device */
-static void auerchar_ctrlread_dispatch (pauerscon_t scp, pauerbuf_t bp)
-{
- unsigned long flags;
- pauerchar_t ccp;
- pauerbuf_t newbp = NULL;
- char * charp;
- dbg ("auerchar_ctrlread_dispatch called");
- ccp = ((pauerchar_t)((char *)(scp)-(unsigned long)(&((pauerchar_t)0)->scontext)));
-
- /* get a read buffer from character device context */
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- if (!list_empty (&ccp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = ccp->bufctl.free_buff_list.next;
- list_del (tmp);
- newbp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
- if (!newbp) {
- dbg ("No read buffer available, discard paket!");
- return; /* no buffer, no dispatch */
- }
-
- /* copy information to new buffer element
- (all buffers have the same length) */
- charp = newbp->bufp;
- newbp->bufp = bp->bufp;
- bp->bufp = charp;
- newbp->len = bp->len;
-
- /* insert new buffer in read list */
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- list_add_tail (&newbp->buff_list, &ccp->bufctl.rec_buff_list);
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
- dbg ("read buffer appended to rec_list");
-
- /* wake up pending synchronous reads */
- wake_up (&ccp->readwait);
-}
-
-
-/* Delete an auerswald driver context */
-static void auerswald_delete( pauerswald_t cp)
-{
- dbg( "auerswald_delete");
- if (cp == NULL)
- return;
-
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
-
- /* Cleaning up */
- auerswald_int_release (cp);
- auerchain_free (&cp->controlchain);
- auerbuf_free_buffers (&cp->bufctl);
-
- /* release the memory */
- kfree( cp);
-}
-
-
-/* Delete an auerswald character context */
-static void auerchar_delete( pauerchar_t ccp)
-{
- dbg ("auerchar_delete");
- if (ccp == NULL)
- return;
-
- /* wake up pending synchronous reads */
- ccp->removed = 1;
- wake_up (&ccp->readwait);
-
- /* remove the read buffer */
- if (ccp->readbuf) {
- auerbuf_releasebuf (ccp->readbuf);
- ccp->readbuf = NULL;
- }
-
- /* remove the character buffers */
- auerbuf_free_buffers (&ccp->bufctl);
-
- /* release the memory */
- kfree( ccp);
-}
-
-
-/* add a new service to the device
- scp->id must be set!
- return: 0 if OK, else error code
-*/
-static int auerswald_addservice (pauerswald_t cp, pauerscon_t scp)
-{
- int ret;
-
- /* is the device available? */
- if (!cp->usbdev) {
- dbg ("usbdev == NULL");
- return -EIO; /*no: can not add a service, sorry*/
- }
-
- /* is the service available? */
- if (cp->services[scp->id]) {
- dbg ("service is busy");
- return -EBUSY;
- }
-
- /* device is available, service is free */
- cp->services[scp->id] = scp;
-
- /* register service in device */
- ret = auerchain_control_msg(
- &cp->controlchain, /* pointer to control chain */
- cp->usbdev, /* pointer to device */
- usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
- AUV_CHANNELCTL, /* USB message request value */
- AUT_WREQ, /* USB message request type value */
- 0x01, /* open USB message value */
- scp->id, /* USB message index value */
- NULL, /* pointer to the data to send */
- 0, /* length in bytes of the data to send */
- HZ * 2); /* time to wait for the message to complete before timing out */
- if (ret < 0) {
- dbg ("auerswald_addservice: auerchain_control_msg returned error code %d", ret);
- /* undo above actions */
- cp->services[scp->id] = NULL;
- return ret;
- }
-
- dbg ("auerswald_addservice: channel open OK");
- return 0;
-}
-
-
-/* remove a service from the device
- scp->id must be set! */
-static void auerswald_removeservice (pauerswald_t cp, pauerscon_t scp)
-{
- dbg ("auerswald_removeservice called");
-
- /* check if we have a service allocated */
- if (scp->id == AUH_UNASSIGNED)
- return;
-
- /* If there is a device: close the channel */
- if (cp->usbdev) {
- /* Close the service channel inside the device */
- int ret = auerchain_control_msg(
- &cp->controlchain, /* pointer to control chain */
- cp->usbdev, /* pointer to device */
- usb_sndctrlpipe (cp->usbdev, 0), /* pipe to control endpoint */
- AUV_CHANNELCTL, /* USB message request value */
- AUT_WREQ, /* USB message request type value */
- 0x00, // close /* USB message value */
- scp->id, /* USB message index value */
- NULL, /* pointer to the data to send */
- 0, /* length in bytes of the data to send */
- HZ * 2); /* time to wait for the message to complete before timing out */
- if (ret < 0) {
- dbg ("auerswald_removeservice: auerchain_control_msg returned error code %d", ret);
- }
- else {
- dbg ("auerswald_removeservice: channel close OK");
- }
- }
-
- /* remove the service from the device */
- cp->services[scp->id] = NULL;
- scp->id = AUH_UNASSIGNED;
-}
-
-
-/* --------------------------------------------------------------------- */
-/* Char device functions */
-
-/* Open a new character device */
-static int auerchar_open (struct inode *inode, struct file *file)
-{
- int dtindex = iminor(inode);
- pauerswald_t cp = NULL;
- pauerchar_t ccp = NULL;
- struct usb_interface *intf;
- int ret;
-
- /* minor number in range? */
- if (dtindex < 0) {
- return -ENODEV;
- }
- intf = usb_find_interface(&auerswald_driver, dtindex);
- if (!intf) {
- return -ENODEV;
- }
-
- /* usb device available? */
- cp = usb_get_intfdata (intf);
- if (cp == NULL) {
- return -ENODEV;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- return -ERESTARTSYS;
- }
-
- /* we have access to the device. Now lets allocate memory */
- ccp = kzalloc(sizeof(auerchar_t), GFP_KERNEL);
- if (ccp == NULL) {
- err ("out of memory");
- ret = -ENOMEM;
- goto ofail;
- }
-
- /* Initialize device descriptor */
- mutex_init(&ccp->mutex);
- mutex_init(&ccp->readmutex);
- auerbuf_init (&ccp->bufctl);
- ccp->scontext.id = AUH_UNASSIGNED;
- ccp->scontext.dispatch = auerchar_ctrlread_dispatch;
- ccp->scontext.disconnect = auerchar_disconnect;
- init_waitqueue_head (&ccp->readwait);
-
- ret = auerbuf_setup (&ccp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE);
- if (ret) {
- goto ofail;
- }
-
- cp->open_count++;
- ccp->auerdev = cp;
- dbg("open %s as /dev/%s", cp->dev_desc, cp->name);
- mutex_unlock(&cp->mutex);
-
- /* file IO stuff */
- file->f_pos = 0;
- file->private_data = ccp;
- return nonseekable_open(inode, file);
-
- /* Error exit */
-ofail: mutex_unlock(&cp->mutex);
- auerchar_delete (ccp);
- return ret;
-}
-
-
-/* IOCTL functions */
-static long auerchar_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- int ret = 0;
- audevinfo_t devinfo;
- pauerswald_t cp = NULL;
- unsigned int u;
- unsigned int __user *user_arg = (unsigned int __user *)arg;
-
- dbg ("ioctl");
-
- /* get the mutexes */
- if (mutex_lock_interruptible(&ccp->mutex)) {
- return -ERESTARTSYS;
- }
- cp = ccp->auerdev;
- if (!cp) {
- mutex_unlock(&ccp->mutex);
- return -ENODEV;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
-
- /* Check for removal */
- if (!cp->usbdev) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -ENODEV;
- }
- lock_kernel();
- switch (cmd) {
-
- /* return != 0 if Transmitt channel ready to send */
- case IOCTL_AU_TXREADY:
- dbg ("IOCTL_AU_TXREADY");
- u = ccp->auerdev
- && (ccp->scontext.id != AUH_UNASSIGNED)
- && !list_empty (&cp->bufctl.free_buff_list);
- ret = put_user (u, user_arg);
- break;
-
- /* return != 0 if connected to a service channel */
- case IOCTL_AU_CONNECT:
- dbg ("IOCTL_AU_CONNECT");
- u = (ccp->scontext.id != AUH_UNASSIGNED);
- ret = put_user (u, user_arg);
- break;
-
- /* return != 0 if Receive Data available */
- case IOCTL_AU_RXAVAIL:
- dbg ("IOCTL_AU_RXAVAIL");
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- ret = -EIO;
- break;
- }
- u = 0; /* no data */
- if (ccp->readbuf) {
- int restlen = ccp->readbuf->len - ccp->readoffset;
- if (restlen > 0)
- u = 1;
- }
- if (!u) {
- if (!list_empty (&ccp->bufctl.rec_buff_list)) {
- u = 1;
- }
- }
- ret = put_user (u, user_arg);
- break;
-
- /* return the max. buffer length for the device */
- case IOCTL_AU_BUFLEN:
- dbg ("IOCTL_AU_BUFLEN");
- u = cp->maxControlLength;
- ret = put_user (u, user_arg);
- break;
-
- /* requesting a service channel */
- case IOCTL_AU_SERVREQ:
- dbg ("IOCTL_AU_SERVREQ");
- /* requesting a service means: release the previous one first */
- auerswald_removeservice (cp, &ccp->scontext);
- /* get the channel number */
- ret = get_user (u, user_arg);
- if (ret) {
- break;
- }
- if ((u < AUH_FIRSTUSERCH) || (u >= AUH_TYPESIZE)) {
- ret = -EIO;
- break;
- }
- dbg ("auerchar service request parameters are ok");
- ccp->scontext.id = u;
-
- /* request the service now */
- ret = auerswald_addservice (cp, &ccp->scontext);
- if (ret) {
- /* no: revert service entry */
- ccp->scontext.id = AUH_UNASSIGNED;
- }
- break;
-
- /* get a string descriptor for the device */
- case IOCTL_AU_DEVINFO:
- dbg ("IOCTL_AU_DEVINFO");
- if (copy_from_user (&devinfo, (void __user *) arg, sizeof (audevinfo_t))) {
- ret = -EFAULT;
- break;
- }
- u = strlen(cp->dev_desc)+1;
- if (u > devinfo.bsize) {
- u = devinfo.bsize;
- }
- ret = copy_to_user(devinfo.buf, cp->dev_desc, u) ? -EFAULT : 0;
- break;
-
- /* get the max. string descriptor length */
- case IOCTL_AU_SLEN:
- dbg ("IOCTL_AU_SLEN");
- u = AUSI_DLEN;
- ret = put_user (u, user_arg);
- break;
-
- default:
- dbg ("IOCTL_AU_UNKNOWN");
- ret = -ENOTTY;
- break;
- }
- unlock_kernel();
- /* release the mutexes */
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return ret;
-}
-
-/* Read data from the device */
-static ssize_t auerchar_read (struct file *file, char __user *buf, size_t count, loff_t * ppos)
-{
- unsigned long flags;
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerbuf_t bp = NULL;
- wait_queue_t wait;
-
- dbg ("auerchar_read");
-
- /* Error checking */
- if (!ccp)
- return -EIO;
- if (*ppos)
- return -ESPIPE;
- if (count == 0)
- return 0;
-
- /* get the mutex */
- if (mutex_lock_interruptible(&ccp->mutex))
- return -ERESTARTSYS;
-
- /* Can we expect to read something? */
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
-
- /* only one reader per device allowed */
- if (mutex_lock_interruptible(&ccp->readmutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
-
- /* read data from readbuf, if available */
-doreadbuf:
- bp = ccp->readbuf;
- if (bp) {
- /* read the maximum bytes */
- int restlen = bp->len - ccp->readoffset;
- if (restlen < 0)
- restlen = 0;
- if (count > restlen)
- count = restlen;
- if (count) {
- if (copy_to_user (buf, bp->bufp+ccp->readoffset, count)) {
- dbg ("auerswald_read: copy_to_user failed");
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return -EFAULT;
- }
- }
- /* advance the read offset */
- ccp->readoffset += count;
- restlen -= count;
- // reuse the read buffer
- if (restlen <= 0) {
- auerbuf_releasebuf (bp);
- ccp->readbuf = NULL;
- }
- /* return with number of bytes read */
- if (count) {
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return count;
- }
- }
-
- /* a read buffer is not available. Try to get the next data block. */
-doreadlist:
- /* Preparing for sleep */
- init_waitqueue_entry (&wait, current);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&ccp->readwait, &wait);
-
- bp = NULL;
- spin_lock_irqsave (&ccp->bufctl.lock, flags);
- if (!list_empty (&ccp->bufctl.rec_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = ccp->bufctl.rec_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&ccp->bufctl.lock, flags);
-
- /* have we got data? */
- if (bp) {
- ccp->readbuf = bp;
- ccp->readoffset = AUH_SIZE; /* for headerbyte */
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ccp->readwait, &wait);
- goto doreadbuf; /* now we can read! */
- }
-
- /* no data available. Should we wait? */
- if (file->f_flags & O_NONBLOCK) {
- dbg ("No read buffer available, returning -EAGAIN");
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ccp->readwait, &wait);
- mutex_unlock(&ccp->readmutex);
- mutex_unlock(&ccp->mutex);
- return -EAGAIN; /* nonblocking, no data available */
- }
-
- /* yes, we should wait! */
- mutex_unlock(&ccp->mutex); /* allow other operations while we wait */
- schedule();
- remove_wait_queue (&ccp->readwait, &wait);
- if (signal_pending (current)) {
- /* waked up by a signal */
- mutex_unlock(&ccp->readmutex);
- return -ERESTARTSYS;
- }
-
- /* Anything left to read? */
- if ((ccp->scontext.id == AUH_UNASSIGNED) || ccp->removed) {
- mutex_unlock(&ccp->readmutex);
- return -EIO;
- }
-
- if (mutex_lock_interruptible(&ccp->mutex)) {
- mutex_unlock(&ccp->readmutex);
- return -ERESTARTSYS;
- }
-
- /* try to read the incoming data again */
- goto doreadlist;
-}
-
-
-/* Write a data block into the right service channel of the device */
-static ssize_t auerchar_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerswald_t cp = NULL;
- pauerbuf_t bp;
- unsigned long flags;
- int ret;
- wait_queue_t wait;
-
- dbg ("auerchar_write %zd bytes", len);
-
- /* Error checking */
- if (!ccp)
- return -EIO;
- if (*ppos)
- return -ESPIPE;
- if (len == 0)
- return 0;
-
-write_again:
- /* get the mutex */
- if (mutex_lock_interruptible(&ccp->mutex))
- return -ERESTARTSYS;
-
- /* Can we expect to write something? */
- if (ccp->scontext.id == AUH_UNASSIGNED) {
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
-
- cp = ccp->auerdev;
- if (!cp) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
- if (mutex_lock_interruptible(&cp->mutex)) {
- mutex_unlock(&ccp->mutex);
- return -ERESTARTSYS;
- }
- if (!cp->usbdev) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
- /* Prepare for sleep */
- init_waitqueue_entry (&wait, current);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&cp->bufferwait, &wait);
-
- /* Try to get a buffer from the device pool.
- We can't use a buffer from ccp->bufctl because the write
- command will last beond a release() */
- bp = NULL;
- spin_lock_irqsave (&cp->bufctl.lock, flags);
- if (!list_empty (&cp->bufctl.free_buff_list)) {
- /* yes: get the entry */
- struct list_head *tmp = cp->bufctl.free_buff_list.next;
- list_del (tmp);
- bp = list_entry (tmp, auerbuf_t, buff_list);
- }
- spin_unlock_irqrestore (&cp->bufctl.lock, flags);
-
- /* are there any buffers left? */
- if (!bp) {
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
-
- /* NONBLOCK: don't wait */
- if (file->f_flags & O_NONBLOCK) {
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&cp->bufferwait, &wait);
- return -EAGAIN;
- }
-
- /* BLOCKING: wait */
- schedule();
- remove_wait_queue (&cp->bufferwait, &wait);
- if (signal_pending (current)) {
- /* waked up by a signal */
- return -ERESTARTSYS;
- }
- goto write_again;
- } else {
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&cp->bufferwait, &wait);
- }
-
- /* protect against too big write requests */
- if (len > cp->maxControlLength)
- len = cp->maxControlLength;
-
- /* Fill the buffer */
- if (copy_from_user ( bp->bufp+AUH_SIZE, buf, len)) {
- dbg ("copy_from_user failed");
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- mutex_unlock(&cp->mutex);
- mutex_unlock(&ccp->mutex);
- return -EFAULT;
- }
-
- /* set the header byte */
- *(bp->bufp) = ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT;
-
- /* Set the transfer Parameters */
- bp->len = len+AUH_SIZE;
- bp->dr->bRequestType = AUT_WREQ;
- bp->dr->bRequest = AUV_WBLOCK;
- bp->dr->wValue = cpu_to_le16 (0);
- bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
- bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE);
- usb_fill_control_urb (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
- (unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
- auerchar_ctrlwrite_complete, bp);
- /* up we go */
- ret = auerchain_submit_urb (&cp->controlchain, bp->urbp);
- mutex_unlock(&cp->mutex);
- if (ret) {
- dbg ("auerchar_write: nonzero result of auerchain_submit_urb %d", ret);
- auerbuf_releasebuf (bp);
- /* Wake up all processes waiting for a buffer */
- wake_up (&cp->bufferwait);
- mutex_unlock(&ccp->mutex);
- return -EIO;
- }
- else {
- dbg ("auerchar_write: Write OK");
- mutex_unlock(&ccp->mutex);
- return len;
- }
-}
-
-
-/* Close a character device */
-static int auerchar_release (struct inode *inode, struct file *file)
-{
- pauerchar_t ccp = (pauerchar_t) file->private_data;
- pauerswald_t cp;
- dbg("release");
-
- mutex_lock(&ccp->mutex);
- cp = ccp->auerdev;
- if (cp) {
- mutex_lock(&cp->mutex);
- /* remove an open service */
- auerswald_removeservice (cp, &ccp->scontext);
- /* detach from device */
- if ((--cp->open_count <= 0) && (cp->usbdev == NULL)) {
- /* usb device waits for removal */
- mutex_unlock(&cp->mutex);
- auerswald_delete (cp);
- } else {
- mutex_unlock(&cp->mutex);
- }
- cp = NULL;
- ccp->auerdev = NULL;
- }
- mutex_unlock(&ccp->mutex);
- auerchar_delete (ccp);
-
- return 0;
-}
-
-
-/*----------------------------------------------------------------------*/
-/* File operation structure */
-static const struct file_operations auerswald_fops =
-{
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = auerchar_read,
- .write = auerchar_write,
- .unlocked_ioctl = auerchar_ioctl,
- .open = auerchar_open,
- .release = auerchar_release,
-};
-
-static struct usb_class_driver auerswald_class = {
- .name = "auer%d",
- .fops = &auerswald_fops,
- .minor_base = AUER_MINOR_BASE,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Special USB driver functions */
-
-/* Probe if this driver wants to serve an USB device
-
- This entry point is called whenever a new device is attached to the bus.
- Then the device driver has to create a new instance of its internal data
- structures for the new device.
-
- The dev argument specifies the device context, which contains pointers
- to all USB descriptors. The interface argument specifies the interface
- number. If a USB driver wants to bind itself to a particular device and
- interface it has to return a pointer. This pointer normally references
- the device driver's context structure.
-
- Probing normally is done by checking the vendor and product identifications
- or the class and subclass definitions. If they match the interface number
- is compared with the ones supported by the driver. When probing is done
- class based it might be necessary to parse some more USB descriptors because
- the device properties can differ in a wide range.
-*/
-static int auerswald_probe (struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *usbdev = interface_to_usbdev(intf);
- pauerswald_t cp = NULL;
- unsigned int u = 0;
- __le16 *pbuf;
- int ret;
-
- dbg ("probe: vendor id 0x%x, device id 0x%x",
- le16_to_cpu(usbdev->descriptor.idVendor),
- le16_to_cpu(usbdev->descriptor.idProduct));
-
- /* we use only the first -and only- interface */
- if (intf->altsetting->desc.bInterfaceNumber != 0)
- return -ENODEV;
-
- /* allocate memory for our device and initialize it */
- cp = kzalloc (sizeof(auerswald_t), GFP_KERNEL);
- if (cp == NULL) {
- err ("out of memory");
- goto pfail;
- }
-
- /* Initialize device descriptor */
- mutex_init(&cp->mutex);
- cp->usbdev = usbdev;
- auerchain_init (&cp->controlchain);
- auerbuf_init (&cp->bufctl);
- init_waitqueue_head (&cp->bufferwait);
-
- ret = usb_register_dev(intf, &auerswald_class);
- if (ret) {
- err ("Not able to get a minor for this device.");
- goto pfail;
- }
-
- /* Give the device a name */
- sprintf (cp->name, "usb/auer%d", intf->minor);
-
- /* Store the index */
- cp->dtindex = intf->minor;
-
- /* Get the usb version of the device */
- cp->version = le16_to_cpu(cp->usbdev->descriptor.bcdDevice);
- dbg ("Version is %X", cp->version);
-
- /* allow some time to settle the device */
- msleep(334);
-
- /* Try to get a suitable textual description of the device */
- /* Device name:*/
- ret = usb_string( cp->usbdev, AUSI_DEVICE, cp->dev_desc, AUSI_DLEN-1);
- if (ret >= 0) {
- u += ret;
- /* Append Serial Number */
- memcpy(&cp->dev_desc[u], ",Ser# ", 6);
- u += 6;
- ret = usb_string( cp->usbdev, AUSI_SERIALNR, &cp->dev_desc[u], AUSI_DLEN-u-1);
- if (ret >= 0) {
- u += ret;
- /* Append subscriber number */
- memcpy(&cp->dev_desc[u], ", ", 2);
- u += 2;
- ret = usb_string( cp->usbdev, AUSI_MSN, &cp->dev_desc[u], AUSI_DLEN-u-1);
- if (ret >= 0) {
- u += ret;
- }
- }
- }
- cp->dev_desc[u] = '\0';
- info("device is a %s", cp->dev_desc);
-
- /* get the maximum allowed control transfer length */
- pbuf = kmalloc(2, GFP_KERNEL); /* use an allocated buffer because of urb target */
- if (!pbuf) {
- err( "out of memory");
- goto pfail;
- }
- ret = usb_control_msg(cp->usbdev, /* pointer to device */
- usb_rcvctrlpipe( cp->usbdev, 0 ), /* pipe to control endpoint */
- AUV_GETINFO, /* USB message request value */
- AUT_RREQ, /* USB message request type value */
- 0, /* USB message value */
- AUDI_MBCTRANS, /* USB message index value */
- pbuf, /* pointer to the receive buffer */
- 2, /* length of the buffer */
- 2000); /* time to wait for the message to complete before timing out */
- if (ret == 2) {
- cp->maxControlLength = le16_to_cpup(pbuf);
- kfree(pbuf);
- dbg("setup: max. allowed control transfersize is %d bytes", cp->maxControlLength);
- } else {
- kfree(pbuf);
- err("setup: getting max. allowed control transfer length failed with error %d", ret);
- goto pfail;
- }
-
- /* allocate a chain for the control messages */
- if (auerchain_setup (&cp->controlchain, AUCH_ELEMENTS)) {
- err ("out of memory");
- goto pfail;
- }
-
- /* allocate buffers for control messages */
- if (auerbuf_setup (&cp->bufctl, AU_RBUFFERS, cp->maxControlLength+AUH_SIZE)) {
- err ("out of memory");
- goto pfail;
- }
-
- /* start the interrupt endpoint */
- if (auerswald_int_open (cp)) {
- err ("int endpoint failed");
- goto pfail;
- }
-
- /* all OK */
- usb_set_intfdata (intf, cp);
- return 0;
-
- /* Error exit: clean up the memory */
-pfail: auerswald_delete (cp);
- return -EIO;
-}
-
-
-/* Disconnect driver from a served device
-
- This function is called whenever a device which was served by this driver
- is disconnected.
-
- The argument dev specifies the device context and the driver_context
- returns a pointer to the previously registered driver_context of the
- probe function. After returning from the disconnect function the USB
- framework completely deallocates all data structures associated with
- this device. So especially the usb_device structure must not be used
- any longer by the usb driver.
-*/
-static void auerswald_disconnect (struct usb_interface *intf)
-{
- pauerswald_t cp = usb_get_intfdata (intf);
- unsigned int u;
-
- usb_set_intfdata (intf, NULL);
- if (!cp)
- return;
-
- /* give back our USB minor number */
- usb_deregister_dev(intf, &auerswald_class);
-
- mutex_lock(&cp->mutex);
- info ("device /dev/%s now disconnecting", cp->name);
-
- /* Stop the interrupt endpoint */
- auerswald_int_release (cp);
-
- /* remove the control chain allocated in auerswald_probe
- This has the benefit of
- a) all pending (a)synchronous urbs are unlinked
- b) all buffers dealing with urbs are reclaimed
- */
- auerchain_free (&cp->controlchain);
-
- if (cp->open_count == 0) {
- /* nobody is using this device. So we can clean up now */
- mutex_unlock(&cp->mutex);
- /* mutex_unlock() is possible here because no other task
- can open the device (see above). I don't want
- to kfree() a locked mutex. */
-
- auerswald_delete (cp);
- } else {
- /* device is used. Remove the pointer to the
- usb device (it's not valid any more). The last
- release() will do the clean up */
- cp->usbdev = NULL;
- mutex_unlock(&cp->mutex);
- /* Terminate waiting writers */
- wake_up (&cp->bufferwait);
- /* Inform all waiting readers */
- for ( u = 0; u < AUH_TYPESIZE; u++) {
- pauerscon_t scp = cp->services[u];
- if (scp)
- scp->disconnect( scp);
- }
- }
-}
-
-/* Descriptor for the devices which are served by this driver.
- NOTE: this struct is parsed by the usbmanager install scripts.
- Don't change without caution!
-*/
-static struct usb_device_id auerswald_ids [] = {
- { USB_DEVICE (ID_AUERSWALD, 0x00C0) }, /* COMpact 2104 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00DB) }, /* COMpact 4410/2206 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00DC) }, /* COMpact 4406 DSL */
- { USB_DEVICE (ID_AUERSWALD, 0x00DD) }, /* COMpact 2204 USB */
- { USB_DEVICE (ID_AUERSWALD, 0x00F1) }, /* Comfort 2000 System Telephone */
- { USB_DEVICE (ID_AUERSWALD, 0x00F2) }, /* Comfort 1200 System Telephone */
- { } /* Terminating entry */
-};
-
-/* Standard module device table */
-MODULE_DEVICE_TABLE (usb, auerswald_ids);
-
-/* Standard usb driver struct */
-static struct usb_driver auerswald_driver = {
- .name = "auerswald",
- .probe = auerswald_probe,
- .disconnect = auerswald_disconnect,
- .id_table = auerswald_ids,
-};
-
-
-/* --------------------------------------------------------------------- */
-/* Module loading/unloading */
-
-/* Driver initialisation. Called after module loading.
- NOTE: there is no concurrency at _init
-*/
-static int __init auerswald_init (void)
-{
- int result;
- dbg ("init");
-
- /* register driver at the USB subsystem */
- result = usb_register (&auerswald_driver);
- if (result < 0) {
- err ("driver could not be registered");
- return -1;
- }
- return 0;
-}
-
-/* Driver deinit. Called before module removal.
- NOTE: there is no concurrency at _cleanup
-*/
-static void __exit auerswald_cleanup (void)
-{
- dbg ("cleanup");
- usb_deregister (&auerswald_driver);
-}
-
-/* --------------------------------------------------------------------- */
-/* Linux device driver module description */
-
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_DESCRIPTION (DRIVER_DESC);
-MODULE_LICENSE ("GPL");
-
-module_init (auerswald_init);
-module_exit (auerswald_cleanup);
-
-/* --------------------------------------------------------------------- */
-
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index e6ca9979e3ae..a4ef77ef917d 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/poll.h>
-#include <linux/version.h>
#include <linux/usb/iowarrior.h>
/* Version Information */
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index d94aa7387608..b897f6554ecd 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -48,7 +48,8 @@ static int isight_firmware_load(struct usb_interface *intf,
if (request_firmware(&firmware, "isight.fw", &dev->dev) != 0) {
printk(KERN_ERR "Unable to load isight firmware\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
ptr = firmware->data;
@@ -91,7 +92,6 @@ static int isight_firmware_load(struct usb_interface *intf,
buf, llen, 300) != llen) {
printk(KERN_ERR
"Failed to load isight firmware\n");
- kfree(buf);
ret = -ENODEV;
goto out;
}
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index fbace41a7cba..69c34a58e205 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3270,6 +3270,7 @@ static struct usb_device_id sisusb_table [] = {
{ USB_DEVICE(0x0711, 0x0900) },
{ USB_DEVICE(0x0711, 0x0901) },
{ USB_DEVICE(0x0711, 0x0902) },
+ { USB_DEVICE(0x0711, 0x0918) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
new file mode 100644
index 000000000000..a0017486ad4e
--- /dev/null
+++ b/drivers/usb/musb/Kconfig
@@ -0,0 +1,175 @@
+#
+# USB Dual Role (OTG-ready) Controller Drivers
+# for silicon based on Mentor Graphics INVENTRA designs
+#
+
+comment "Enable Host or Gadget support to see Inventra options"
+ depends on !USB && USB_GADGET=n
+
+# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
+config USB_MUSB_HDRC
+ depends on (USB || USB_GADGET) && HAVE_CLK
+ select TWL4030_USB if MACH_OMAP_3430SDP
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ...)'
+ help
+ Say Y here if your system has a dual role high speed USB
+ controller based on the Mentor Graphics silicon IP. Then
+ configure options to match your silicon and the board
+ it's being used with, including the USB peripheral role,
+ or the USB host role, or both.
+
+ Texas Instruments parts using this IP include DaVinci 644x,
+ OMAP 243x, OMAP 343x, and TUSB 6010.
+
+ If you do not know what this is, please say N.
+
+ To compile this driver as a module, choose M here; the
+ module will be called "musb_hdrc".
+
+config USB_MUSB_SOC
+ boolean
+ depends on USB_MUSB_HDRC
+ default y if ARCH_DAVINCI
+ default y if ARCH_OMAP2430
+ default y if ARCH_OMAP34XX
+ help
+ Use a static <asm/arch/hdrc_cnf.h> file to describe how the
+ controller is configured (endpoints, mechanisms, etc) on the
+ current iteration of a given system-on-chip.
+
+comment "DaVinci 644x USB support"
+ depends on USB_MUSB_HDRC && ARCH_DAVINCI
+
+comment "OMAP 243x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP2430
+
+comment "OMAP 343x high speed USB support"
+ depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+
+config USB_TUSB6010
+ boolean "TUSB 6010 support"
+ depends on USB_MUSB_HDRC && !USB_MUSB_SOC
+ default y
+ help
+ The TUSB 6010 chip, from Texas Instruments, connects a discrete
+ HDRC core using a 16-bit parallel bus (NOR flash style) or VLYNQ
+ (a high speed serial link). It can use system-specific external
+ DMA controllers.
+
+choice
+ prompt "Driver Mode"
+ depends on USB_MUSB_HDRC
+ help
+ Dual-Role devices can support both host and peripheral roles,
+ as well as a the special "OTG Device" role which can switch
+ between both roles as needed.
+
+# use USB_MUSB_HDRC_HCD not USB_MUSB_HOST to #ifdef host side support;
+# OTG needs both roles, not just USB_MUSB_HOST.
+config USB_MUSB_HOST
+ depends on USB
+ bool "USB Host"
+ help
+ Say Y here if your system supports the USB host role.
+ If it has a USB "A" (rectangular), "Mini-A" (uncommon),
+ or "Mini-AB" connector, it supports the host role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+# use USB_GADGET_MUSB_HDRC not USB_MUSB_PERIPHERAL to #ifdef peripheral
+# side support ... OTG needs both roles
+config USB_MUSB_PERIPHERAL
+ depends on USB_GADGET
+ bool "USB Peripheral (gadget stack)"
+ select USB_GADGET_MUSB_HDRC
+ help
+ Say Y here if your system supports the USB peripheral role.
+ If it has a USB "B" (squarish), "Mini-B", or "Mini-AB"
+ connector, it supports the peripheral role.
+ (With a "Mini-AB" connector, you should enable USB OTG.)
+
+config USB_MUSB_OTG
+ depends on USB && USB_GADGET && PM && EXPERIMENTAL
+ bool "Both host and peripheral: USB OTG (On The Go) Device"
+ select USB_GADGET_MUSB_HDRC
+ select USB_OTG
+ help
+ The most notable feature of USB OTG is support for a
+ "Dual-Role" device, which can act as either a device
+ or a host. The initial role choice can be changed
+ later, when two dual-role devices talk to each other.
+
+ At this writing, the OTG support in this driver is incomplete,
+ omitting the mandatory HNP or SRP protocols. However, some
+ of the cable based role switching works. (That is, grounding
+ the ID pin switches the controller to host mode, while leaving
+ it floating leaves it in peripheral mode.)
+
+ Select this if your system has a Mini-AB connector, or
+ to simplify certain kinds of configuration.
+
+ To implement your OTG Targeted Peripherals List (TPL), enable
+ USB_OTG_WHITELIST and update "drivers/usb/core/otg_whitelist.h"
+ to match your requirements.
+
+endchoice
+
+# enable peripheral support (including with OTG)
+config USB_GADGET_MUSB_HDRC
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG)
+# default y
+# select USB_GADGET_DUALSPEED
+# select USB_GADGET_SELECTED
+
+# enables host support (including with OTG)
+config USB_MUSB_HDRC_HCD
+ bool
+ depends on USB_MUSB_HDRC && (USB_MUSB_HOST || USB_MUSB_OTG)
+ select USB_OTG if USB_GADGET_MUSB_HDRC
+ default y
+
+
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+ depends on USB_MUSB_HDRC
+ default y if USB_TUSB6010
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not select 'n' here unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+config USB_INVENTRA_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ default ARCH_OMAP2430 || ARCH_OMAP34XX
+ help
+ Enable DMA transfers using Mentor's engine.
+
+config USB_TI_CPPI_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ default ARCH_DAVINCI
+ help
+ Enable DMA transfers when TI CPPI DMA is available.
+
+config USB_TUSB_OMAP_DMA
+ bool
+ depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ depends on USB_TUSB6010
+ depends on ARCH_OMAP
+ default y
+ help
+ Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+
+config USB_MUSB_DEBUG
+ depends on USB_MUSB_HDRC
+ bool "Enable debugging messages"
+ default n
+ help
+ This enables musb debugging. To set the logging level use the debug
+ module parameter. Starting at level 3, per-transfer (urb, usb_request,
+ packet, or dma transfer) tracing may kick in.
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
new file mode 100644
index 000000000000..b6af0d687a73
--- /dev/null
+++ b/drivers/usb/musb/Makefile
@@ -0,0 +1,69 @@
+#
+# for USB OTG silicon based on Mentor Graphics INVENTRA designs
+#
+
+musb_hdrc-objs := musb_core.o
+
+obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
+
+ifeq ($(CONFIG_ARCH_DAVINCI),y)
+ musb_hdrc-objs += davinci.o
+endif
+
+ifeq ($(CONFIG_USB_TUSB6010),y)
+ musb_hdrc-objs += tusb6010.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP2430),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
+ifeq ($(CONFIG_ARCH_OMAP3430),y)
+ musb_hdrc-objs += omap2430.o
+endif
+
+ifeq ($(CONFIG_USB_GADGET_MUSB_HDRC),y)
+ musb_hdrc-objs += musb_gadget_ep0.o musb_gadget.o
+endif
+
+ifeq ($(CONFIG_USB_MUSB_HDRC_HCD),y)
+ musb_hdrc-objs += musb_virthub.o musb_host.o
+endif
+
+# the kconfig must guarantee that only one of the
+# possible I/O schemes will be enabled at a time ...
+# PIO only, or DMA (several potential schemes).
+# though PIO is always there to back up DMA, and for ep0
+
+ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
+
+ ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
+ musb_hdrc-objs += musbhsdma.o
+
+ else
+ ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
+ musb_hdrc-objs += cppi_dma.o
+
+ else
+ ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
+ musb_hdrc-objs += tusb6010_omap.o
+
+ endif
+ endif
+ endif
+endif
+
+
+################################################################################
+
+# FIXME remove all these extra "-DMUSB_* things, stick to CONFIG_*
+
+ifeq ($(CONFIG_USB_INVENTRA_MUSB_HAS_AHB_ID),y)
+ EXTRA_CFLAGS += -DMUSB_AHB_ID
+endif
+
+# Debugging
+
+ifeq ($(CONFIG_USB_MUSB_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
new file mode 100644
index 000000000000..5ad6d0893cbe
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.c
@@ -0,0 +1,1540 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file implements a DMA interface using TI's CPPI DMA.
+ * For now it's DaVinci-only, but CPPI isn't specific to DaVinci or USB.
+ * The TUSB6020, using VLYNQ, has CPPI that looks much like DaVinci.
+ */
+
+#include <linux/usb.h>
+
+#include "musb_core.h"
+#include "cppi_dma.h"
+
+
+/* CPPI DMA status 7-mar-2006:
+ *
+ * - See musb_{host,gadget}.c for more info
+ *
+ * - Correct RX DMA generally forces the engine into irq-per-packet mode,
+ * which can easily saturate the CPU under non-mass-storage loads.
+ *
+ * NOTES 24-aug-2006 (2.6.18-rc4):
+ *
+ * - peripheral RXDMA wedged in a test with packets of length 512/512/1.
+ * evidently after the 1 byte packet was received and acked, the queue
+ * of BDs got garbaged so it wouldn't empty the fifo. (rxcsr 0x2003,
+ * and RX DMA0: 4 left, 80000000 8feff880, 8feff860 8feff860; 8f321401
+ * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx
+ * of its next (512 byte) packet. IRQ issues?
+ *
+ * REVISIT: the "transfer DMA" glue between CPPI and USB fifos will
+ * evidently also directly update the RX and TX CSRs ... so audit all
+ * host and peripheral side DMA code to avoid CSR access after DMA has
+ * been started.
+ */
+
+/* REVISIT now we can avoid preallocating these descriptors; or
+ * more simply, switch to a global freelist not per-channel ones.
+ * Note: at full speed, 64 descriptors == 4K bulk data.
+ */
+#define NUM_TXCHAN_BD 64
+#define NUM_RXCHAN_BD 64
+
+static inline void cpu_drain_writebuffer(void)
+{
+ wmb();
+#ifdef CONFIG_CPU_ARM926T
+ /* REVISIT this "should not be needed",
+ * but lack of it sure seemed to hurt ...
+ */
+ asm("mcr p15, 0, r0, c7, c10, 4 @ drain write buffer\n");
+#endif
+}
+
+static inline struct cppi_descriptor *cppi_bd_alloc(struct cppi_channel *c)
+{
+ struct cppi_descriptor *bd = c->freelist;
+
+ if (bd)
+ c->freelist = bd->next;
+ return bd;
+}
+
+static inline void
+cppi_bd_free(struct cppi_channel *c, struct cppi_descriptor *bd)
+{
+ if (!bd)
+ return;
+ bd->next = c->freelist;
+ c->freelist = bd;
+}
+
+/*
+ * Start DMA controller
+ *
+ * Initialize the DMA controller as necessary.
+ */
+
+/* zero out entire rx state RAM entry for the channel */
+static void cppi_reset_rx(struct cppi_rx_stateram __iomem *rx)
+{
+ musb_writel(&rx->rx_skipbytes, 0, 0);
+ musb_writel(&rx->rx_head, 0, 0);
+ musb_writel(&rx->rx_sop, 0, 0);
+ musb_writel(&rx->rx_current, 0, 0);
+ musb_writel(&rx->rx_buf_current, 0, 0);
+ musb_writel(&rx->rx_len_len, 0, 0);
+ musb_writel(&rx->rx_cnt_cnt, 0, 0);
+}
+
+/* zero out entire tx state RAM entry for the channel */
+static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr)
+{
+ musb_writel(&tx->tx_head, 0, 0);
+ musb_writel(&tx->tx_buf, 0, 0);
+ musb_writel(&tx->tx_current, 0, 0);
+ musb_writel(&tx->tx_buf_current, 0, 0);
+ musb_writel(&tx->tx_info, 0, 0);
+ musb_writel(&tx->tx_rem_len, 0, 0);
+ /* musb_writel(&tx->tx_dummy, 0, 0); */
+ musb_writel(&tx->tx_complete, 0, ptr);
+}
+
+static void __init cppi_pool_init(struct cppi *cppi, struct cppi_channel *c)
+{
+ int j;
+
+ /* initialize channel fields */
+ c->head = NULL;
+ c->tail = NULL;
+ c->last_processed = NULL;
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = cppi;
+ c->is_rndis = 0;
+ c->freelist = NULL;
+
+ /* build the BD Free list for the channel */
+ for (j = 0; j < NUM_TXCHAN_BD + 1; j++) {
+ struct cppi_descriptor *bd;
+ dma_addr_t dma;
+
+ bd = dma_pool_alloc(cppi->pool, GFP_KERNEL, &dma);
+ bd->dma = dma;
+ cppi_bd_free(c, bd);
+ }
+}
+
+static int cppi_channel_abort(struct dma_channel *);
+
+static void cppi_pool_free(struct cppi_channel *c)
+{
+ struct cppi *cppi = c->controller;
+ struct cppi_descriptor *bd;
+
+ (void) cppi_channel_abort(&c->channel);
+ c->channel.status = MUSB_DMA_STATUS_UNKNOWN;
+ c->controller = NULL;
+
+ /* free all its bds */
+ bd = c->last_processed;
+ do {
+ if (bd)
+ dma_pool_free(cppi->pool, bd, bd->dma);
+ bd = cppi_bd_alloc(c);
+ } while (bd);
+ c->last_processed = NULL;
+}
+
+static int __init cppi_controller_start(struct dma_controller *c)
+{
+ struct cppi *controller;
+ void __iomem *tibase;
+ int i;
+
+ controller = container_of(c, struct cppi, controller);
+
+ /* do whatever is necessary to start controller */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ controller->tx[i].transmit = true;
+ controller->tx[i].index = i;
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ controller->rx[i].transmit = false;
+ controller->rx[i].index = i;
+ }
+
+ /* setup BD list on a per channel basis */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++)
+ cppi_pool_init(controller, controller->tx + i);
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_init(controller, controller->rx + i);
+
+ tibase = controller->tibase;
+ INIT_LIST_HEAD(&controller->tx_complete);
+
+ /* initialise tx/rx channel head pointers to zero */
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ struct cppi_channel *tx_ch = controller->tx + i;
+ struct cppi_tx_stateram __iomem *tx;
+
+ INIT_LIST_HEAD(&tx_ch->tx_complete);
+
+ tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i);
+ tx_ch->state_ram = tx;
+ cppi_reset_tx(tx, 0);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++) {
+ struct cppi_channel *rx_ch = controller->rx + i;
+ struct cppi_rx_stateram __iomem *rx;
+
+ INIT_LIST_HEAD(&rx_ch->tx_complete);
+
+ rx = tibase + DAVINCI_RXCPPI_STATERAM_OFFSET(i);
+ rx_ch->state_ram = rx;
+ cppi_reset_rx(rx);
+ }
+
+ /* enable individual cppi channels */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTENAB_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ /* enable tx/rx CPPI control */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_ENABLE);
+
+ /* disable RNDIS mode, also host rx RNDIS autorequest */
+ musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
+
+ return 0;
+}
+
+/*
+ * Stop DMA controller
+ *
+ * De-Init the DMA controller as necessary.
+ */
+
+static int cppi_controller_stop(struct dma_controller *c)
+{
+ struct cppi *controller;
+ void __iomem *tibase;
+ int i;
+
+ controller = container_of(c, struct cppi, controller);
+
+ tibase = controller->tibase;
+ /* DISABLE INDIVIDUAL CHANNEL Interrupts */
+ musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
+ DAVINCI_DMA_ALL_CHANNELS_ENABLE);
+
+ DBG(1, "Tearing down RX and TX Channels\n");
+ for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
+ /* FIXME restructure of txdma to use bds like rxdma */
+ controller->tx[i].last_processed = NULL;
+ cppi_pool_free(controller->tx + i);
+ }
+ for (i = 0; i < ARRAY_SIZE(controller->rx); i++)
+ cppi_pool_free(controller->rx + i);
+
+ /* in Tx Case proper teardown is supported. We resort to disabling
+ * Tx/Rx CPPI after cleanup of Tx channels. Before TX teardown is
+ * complete TX CPPI cannot be disabled.
+ */
+ /*disable tx/rx cppi */
+ musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+ musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
+
+ return 0;
+}
+
+/* While dma channel is allocated, we only want the core irqs active
+ * for fault reports, otherwise we'd get irqs that we don't care about.
+ * Except for TX irqs, where dma done != fifo empty and reusable ...
+ *
+ * NOTE: docs don't say either way, but irq masking **enables** irqs.
+ *
+ * REVISIT same issue applies to pure PIO usage too, and non-cppi dma...
+ */
+static inline void core_rxirq_disable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_CLR_REG, 1 << (epnum + 8));
+}
+
+static inline void core_rxirq_enable(void __iomem *tibase, unsigned epnum)
+{
+ musb_writel(tibase, DAVINCI_USB_INT_MASK_SET_REG, 1 << (epnum + 8));
+}
+
+
+/*
+ * Allocate a CPPI Channel for DMA. With CPPI, channels are bound to
+ * each transfer direction of a non-control endpoint, so allocating
+ * (and deallocating) is mostly a way to notice bad housekeeping on
+ * the software side. We assume the irqs are always active.
+ */
+static struct dma_channel *
+cppi_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *ep, u8 transmit)
+{
+ struct cppi *controller;
+ u8 index;
+ struct cppi_channel *cppi_ch;
+ void __iomem *tibase;
+
+ controller = container_of(c, struct cppi, controller);
+ tibase = controller->tibase;
+
+ /* ep0 doesn't use DMA; remember cppi indices are 0..N-1 */
+ index = ep->epnum - 1;
+
+ /* return the corresponding CPPI Channel Handle, and
+ * probably disable the non-CPPI irq until we need it.
+ */
+ if (transmit) {
+ if (index >= ARRAY_SIZE(controller->tx)) {
+ DBG(1, "no %cX%d CPPI channel\n", 'T', index);
+ return NULL;
+ }
+ cppi_ch = controller->tx + index;
+ } else {
+ if (index >= ARRAY_SIZE(controller->rx)) {
+ DBG(1, "no %cX%d CPPI channel\n", 'R', index);
+ return NULL;
+ }
+ cppi_ch = controller->rx + index;
+ core_rxirq_disable(tibase, ep->epnum);
+ }
+
+ /* REVISIT make this an error later once the same driver code works
+ * with the other DMA engine too
+ */
+ if (cppi_ch->hw_ep)
+ DBG(1, "re-allocating DMA%d %cX channel %p\n",
+ index, transmit ? 'T' : 'R', cppi_ch);
+ cppi_ch->hw_ep = ep;
+ cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+ return &cppi_ch->channel;
+}
+
+/* Release a CPPI Channel. */
+static void cppi_channel_release(struct dma_channel *channel)
+{
+ struct cppi_channel *c;
+ void __iomem *tibase;
+
+ /* REVISIT: for paranoia, check state and abort if needed... */
+
+ c = container_of(channel, struct cppi_channel, channel);
+ tibase = c->controller->tibase;
+ if (!c->hw_ep)
+ DBG(1, "releasing idle DMA channel %p\n", c);
+ else if (!c->transmit)
+ core_rxirq_enable(tibase, c->index + 1);
+
+ /* for now, leave its cppi IRQ enabled (we won't trigger it) */
+ c->hw_ep = NULL;
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_rx_stateram __iomem *rx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ DBG(level, "RX DMA%d%s: %d left, csr %04x, "
+ "%08x H%08x S%08x C%08x, "
+ "B%08x L%08x %08x .. %08x"
+ "\n",
+ c->index, tag,
+ musb_readl(c->controller->tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
+ musb_readw(c->hw_ep->regs, MUSB_RXCSR),
+
+ musb_readl(&rx->rx_skipbytes, 0),
+ musb_readl(&rx->rx_head, 0),
+ musb_readl(&rx->rx_sop, 0),
+ musb_readl(&rx->rx_current, 0),
+
+ musb_readl(&rx->rx_buf_current, 0),
+ musb_readl(&rx->rx_len_len, 0),
+ musb_readl(&rx->rx_cnt_cnt, 0),
+ musb_readl(&rx->rx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static void
+cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
+{
+ void __iomem *base = c->controller->mregs;
+ struct cppi_tx_stateram __iomem *tx = c->state_ram;
+
+ musb_ep_select(base, c->index + 1);
+
+ DBG(level, "TX DMA%d%s: csr %04x, "
+ "H%08x S%08x C%08x %08x, "
+ "F%08x L%08x .. %08x"
+ "\n",
+ c->index, tag,
+ musb_readw(c->hw_ep->regs, MUSB_TXCSR),
+
+ musb_readl(&tx->tx_head, 0),
+ musb_readl(&tx->tx_buf, 0),
+ musb_readl(&tx->tx_current, 0),
+ musb_readl(&tx->tx_buf_current, 0),
+
+ musb_readl(&tx->tx_info, 0),
+ musb_readl(&tx->tx_rem_len, 0),
+ /* dummy/unused word 6 */
+ musb_readl(&tx->tx_complete, 0)
+ );
+}
+
+/* Context: controller irqlocked */
+static inline void
+cppi_rndis_update(struct cppi_channel *c, int is_rx,
+ void __iomem *tibase, int is_rndis)
+{
+ /* we may need to change the rndis flag for this cppi channel */
+ if (c->is_rndis != is_rndis) {
+ u32 value = musb_readl(tibase, DAVINCI_RNDIS_REG);
+ u32 temp = 1 << (c->index);
+
+ if (is_rx)
+ temp <<= 16;
+ if (is_rndis)
+ value |= temp;
+ else
+ value &= ~temp;
+ musb_writel(tibase, DAVINCI_RNDIS_REG, value);
+ c->is_rndis = is_rndis;
+ }
+}
+
+static void cppi_dump_rxbd(const char *tag, struct cppi_descriptor *bd)
+{
+ pr_debug("RXBD/%s %08x: "
+ "nxt %08x buf %08x off.blen %08x opt.plen %08x\n",
+ tag, bd->dma,
+ bd->hw_next, bd->hw_bufp, bd->hw_off_len,
+ bd->hw_options);
+}
+
+static void cppi_dump_rxq(int level, const char *tag, struct cppi_channel *rx)
+{
+#if MUSB_DEBUG > 0
+ struct cppi_descriptor *bd;
+
+ if (!_dbg_level(level))
+ return;
+ cppi_dump_rx(level, rx, tag);
+ if (rx->last_processed)
+ cppi_dump_rxbd("last", rx->last_processed);
+ for (bd = rx->head; bd; bd = bd->next)
+ cppi_dump_rxbd("active", bd);
+#endif
+}
+
+
+/* NOTE: DaVinci autoreq is ignored except for host side "RNDIS" mode RX;
+ * so we won't ever use it (see "CPPI RX Woes" below).
+ */
+static inline int cppi_autoreq_update(struct cppi_channel *rx,
+ void __iomem *tibase, int onepacket, unsigned n_bds)
+{
+ u32 val;
+
+#ifdef RNDIS_RX_IS_USABLE
+ u32 tmp;
+ /* assert(is_host_active(musb)) */
+
+ /* start from "AutoReq never" */
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ val = tmp & ~((0x3) << (rx->index * 2));
+
+ /* HCD arranged reqpkt for packet #1. we arrange int
+ * for all but the last one, maybe in two segments.
+ */
+ if (!onepacket) {
+#if 0
+ /* use two segments, autoreq "all" then the last "never" */
+ val |= ((0x3) << (rx->index * 2));
+ n_bds--;
+#else
+ /* one segment, autoreq "all-but-last" */
+ val |= ((0x1) << (rx->index * 2));
+#endif
+ }
+
+ if (val != tmp) {
+ int n = 100;
+
+ /* make sure that autoreq is updated before continuing */
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, val);
+ do {
+ tmp = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ if (tmp == val)
+ break;
+ cpu_relax();
+ } while (n-- > 0);
+ }
+#endif
+
+ /* REQPKT is turned off after each segment */
+ if (n_bds && rx->channel.actual_len) {
+ void __iomem *regs = rx->hw_ep->regs;
+
+ val = musb_readw(regs, MUSB_RXCSR);
+ if (!(val & MUSB_RXCSR_H_REQPKT)) {
+ val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR, val);
+ /* flush writebufer */
+ val = musb_readw(regs, MUSB_RXCSR);
+ }
+ }
+ return n_bds;
+}
+
+
+/* Buffer enqueuing Logic:
+ *
+ * - RX builds new queues each time, to help handle routine "early
+ * termination" cases (faults, including errors and short reads)
+ * more correctly.
+ *
+ * - for now, TX reuses the same queue of BDs every time
+ *
+ * REVISIT long term, we want a normal dynamic model.
+ * ... the goal will be to append to the
+ * existing queue, processing completed "dma buffers" (segments) on the fly.
+ *
+ * Otherwise we force an IRQ latency between requests, which slows us a lot
+ * (especially in "transparent" dma). Unfortunately that model seems to be
+ * inherent in the DMA model from the Mentor code, except in the rare case
+ * of transfers big enough (~128+ KB) that we could append "middle" segments
+ * in the TX paths. (RX can't do this, see below.)
+ *
+ * That's true even in the CPPI- friendly iso case, where most urbs have
+ * several small segments provided in a group and where the "packet at a time"
+ * "transparent" DMA model is always correct, even on the RX side.
+ */
+
+/*
+ * CPPI TX:
+ * ========
+ * TX is a lot more reasonable than RX; it doesn't need to run in
+ * irq-per-packet mode very often. RNDIS mode seems to behave too
+ * (except how it handles the exactly-N-packets case). Building a
+ * txdma queue with multiple requests (urb or usb_request) looks
+ * like it would work ... but fault handling would need much testing.
+ *
+ * The main issue with TX mode RNDIS relates to transfer lengths that
+ * are an exact multiple of the packet length. It appears that there's
+ * a hiccup in that case (maybe the DMA completes before the ZLP gets
+ * written?) boiling down to not being able to rely on CPPI writing any
+ * terminating zero length packet before the next transfer is written.
+ * So that's punted to PIO; better yet, gadget drivers can avoid it.
+ *
+ * Plus, there's allegedly an undocumented constraint that rndis transfer
+ * length be a multiple of 64 bytes ... but the chip doesn't act that
+ * way, and we really don't _want_ that behavior anyway.
+ *
+ * On TX, "transparent" mode works ... although experiments have shown
+ * problems trying to use the SOP/EOP bits in different USB packets.
+ *
+ * REVISIT try to handle terminating zero length packets using CPPI
+ * instead of doing it by PIO after an IRQ. (Meanwhile, make Ethernet
+ * links avoid that issue by forcing them to avoid zlps.)
+ */
+static void
+cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
+{
+ unsigned maxpacket = tx->maxpacket;
+ dma_addr_t addr = tx->buf_dma + tx->offset;
+ size_t length = tx->buf_len - tx->offset;
+ struct cppi_descriptor *bd;
+ unsigned n_bds;
+ unsigned i;
+ struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram;
+ int rndis;
+
+ /* TX can use the CPPI "rndis" mode, where we can probably fit this
+ * transfer in one BD and one IRQ. The only time we would NOT want
+ * to use it is when hardware constraints prevent it, or if we'd
+ * trigger the "send a ZLP?" confusion.
+ */
+ rndis = (maxpacket & 0x3f) == 0
+ && length < 0xffff
+ && (length % maxpacket) != 0;
+
+ if (rndis) {
+ maxpacket = length;
+ n_bds = 1;
+ } else {
+ n_bds = length / maxpacket;
+ if (!length || (length % maxpacket))
+ n_bds++;
+ n_bds = min(n_bds, (unsigned) NUM_TXCHAN_BD);
+ length = min(n_bds * maxpacket, length);
+ }
+
+ DBG(4, "TX DMA%d, pktSz %d %s bds %d dma 0x%x len %u\n",
+ tx->index,
+ maxpacket,
+ rndis ? "rndis" : "transparent",
+ n_bds,
+ addr, length);
+
+ cppi_rndis_update(tx, 0, musb->ctrl_base, rndis);
+
+ /* assuming here that channel_program is called during
+ * transfer initiation ... current code maintains state
+ * for one outstanding request only (no queues, not even
+ * the implicit ones of an iso urb).
+ */
+
+ bd = tx->freelist;
+ tx->head = bd;
+ tx->last_processed = NULL;
+
+ /* FIXME use BD pool like RX side does, and just queue
+ * the minimum number for this request.
+ */
+
+ /* Prepare queue of BDs first, then hand it to hardware.
+ * All BDs except maybe the last should be of full packet
+ * size; for RNDIS there _is_ only that last packet.
+ */
+ for (i = 0; i < n_bds; ) {
+ if (++i < n_bds && bd->next)
+ bd->hw_next = bd->next->dma;
+ else
+ bd->hw_next = 0;
+
+ bd->hw_bufp = tx->buf_dma + tx->offset;
+
+ /* FIXME set EOP only on the last packet,
+ * SOP only on the first ... avoid IRQs
+ */
+ if ((tx->offset + maxpacket) <= tx->buf_len) {
+ tx->offset += maxpacket;
+ bd->hw_off_len = maxpacket;
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | maxpacket;
+ } else {
+ /* only this one may be a partial USB Packet */
+ u32 partial_len;
+
+ partial_len = tx->buf_len - tx->offset;
+ tx->offset = tx->buf_len;
+ bd->hw_off_len = partial_len;
+
+ bd->hw_options = CPPI_SOP_SET | CPPI_EOP_SET
+ | CPPI_OWN_SET | partial_len;
+ if (partial_len == 0)
+ bd->hw_options |= CPPI_ZERO_SET;
+ }
+
+ DBG(5, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ /* update the last BD enqueued to the list */
+ tx->tail = bd;
+ bd = bd->next;
+ }
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* Write to the HeadPtr in state RAM to trigger */
+ musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma);
+
+ cppi_dump_tx(5, tx, "/S");
+}
+
+/*
+ * CPPI RX Woes:
+ * =============
+ * Consider a 1KB bulk RX buffer in two scenarios: (a) it's fed two 300 byte
+ * packets back-to-back, and (b) it's fed two 512 byte packets back-to-back.
+ * (Full speed transfers have similar scenarios.)
+ *
+ * The correct behavior for Linux is that (a) fills the buffer with 300 bytes,
+ * and the next packet goes into a buffer that's queued later; while (b) fills
+ * the buffer with 1024 bytes. How to do that with CPPI?
+ *
+ * - RX queues in "rndis" mode -- one single BD -- handle (a) correctly, but
+ * (b) loses **BADLY** because nothing (!) happens when that second packet
+ * fills the buffer, much less when a third one arrives. (Which makes this
+ * not a "true" RNDIS mode. In the RNDIS protocol short-packet termination
+ * is optional, and it's fine if peripherals -- not hosts! -- pad messages
+ * out to end-of-buffer. Standard PCI host controller DMA descriptors
+ * implement that mode by default ... which is no accident.)
+ *
+ * - RX queues in "transparent" mode -- two BDs with 512 bytes each -- have
+ * converse problems: (b) is handled right, but (a) loses badly. CPPI RX
+ * ignores SOP/EOP markings and processes both of those BDs; so both packets
+ * are loaded into the buffer (with a 212 byte gap between them), and the next
+ * buffer queued will NOT get its 300 bytes of data. (It seems like SOP/EOP
+ * are intended as outputs for RX queues, not inputs...)
+ *
+ * - A variant of "transparent" mode -- one BD at a time -- is the only way to
+ * reliably make both cases work, with software handling both cases correctly
+ * and at the significant penalty of needing an IRQ per packet. (The lack of
+ * I/O overlap can be slightly ameliorated by enabling double buffering.)
+ *
+ * So how to get rid of IRQ-per-packet? The transparent multi-BD case could
+ * be used in special cases like mass storage, which sets URB_SHORT_NOT_OK
+ * (or maybe its peripheral side counterpart) to flag (a) scenarios as errors
+ * with guaranteed driver level fault recovery and scrubbing out what's left
+ * of that garbaged datastream.
+ *
+ * But there seems to be no way to identify the cases where CPPI RNDIS mode
+ * is appropriate -- which do NOT include RNDIS host drivers, but do include
+ * the CDC Ethernet driver! -- and the documentation is incomplete/wrong.
+ * So we can't _ever_ use RX RNDIS mode ... except by using a heuristic
+ * that applies best on the peripheral side (and which could fail rudely).
+ *
+ * Leaving only "transparent" mode; we avoid multi-bd modes in almost all
+ * cases other than mass storage class. Otherwise we're correct but slow,
+ * since CPPI penalizes our need for a "true RNDIS" default mode.
+ */
+
+
+/* Heuristic, intended to kick in for ethernet/rndis peripheral ONLY
+ *
+ * IFF
+ * (a) peripheral mode ... since rndis peripherals could pad their
+ * writes to hosts, causing i/o failure; or we'd have to cope with
+ * a largely unknowable variety of host side protocol variants
+ * (b) and short reads are NOT errors ... since full reads would
+ * cause those same i/o failures
+ * (c) and read length is
+ * - less than 64KB (max per cppi descriptor)
+ * - not a multiple of 4096 (g_zero default, full reads typical)
+ * - N (>1) packets long, ditto (full reads not EXPECTED)
+ * THEN
+ * try rx rndis mode
+ *
+ * Cost of heuristic failing: RXDMA wedges at the end of transfers that
+ * fill out the whole buffer. Buggy host side usb network drivers could
+ * trigger that, but "in the field" such bugs seem to be all but unknown.
+ *
+ * So this module parameter lets the heuristic be disabled. When using
+ * gadgetfs, the heuristic will probably need to be disabled.
+ */
+static int cppi_rx_rndis = 1;
+
+module_param(cppi_rx_rndis, bool, 0);
+MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
+
+
+/**
+ * cppi_next_rx_segment - dma read for the next chunk of a buffer
+ * @musb: the controller
+ * @rx: dma channel
+ * @onepacket: true unless caller treats short reads as errors, and
+ * performs fault recovery above usbcore.
+ * Context: controller irqlocked
+ *
+ * See above notes about why we can't use multi-BD RX queues except in
+ * rare cases (mass storage class), and can never use the hardware "rndis"
+ * mode (since it's not a "true" RNDIS mode) with complete safety..
+ *
+ * It's ESSENTIAL that callers specify "onepacket" mode unless they kick in
+ * code to recover from corrupted datastreams after each short transfer.
+ */
+static void
+cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
+{
+ unsigned maxpacket = rx->maxpacket;
+ dma_addr_t addr = rx->buf_dma + rx->offset;
+ size_t length = rx->buf_len - rx->offset;
+ struct cppi_descriptor *bd, *tail;
+ unsigned n_bds;
+ unsigned i;
+ void __iomem *tibase = musb->ctrl_base;
+ int is_rndis = 0;
+ struct cppi_rx_stateram __iomem *rx_ram = rx->state_ram;
+
+ if (onepacket) {
+ /* almost every USB driver, host or peripheral side */
+ n_bds = 1;
+
+ /* maybe apply the heuristic above */
+ if (cppi_rx_rndis
+ && is_peripheral_active(musb)
+ && length > maxpacket
+ && (length & ~0xffff) == 0
+ && (length & 0x0fff) != 0
+ && (length & (maxpacket - 1)) == 0) {
+ maxpacket = length;
+ is_rndis = 1;
+ }
+ } else {
+ /* virtually nothing except mass storage class */
+ if (length > 0xffff) {
+ n_bds = 0xffff / maxpacket;
+ length = n_bds * maxpacket;
+ } else {
+ n_bds = length / maxpacket;
+ if (length % maxpacket)
+ n_bds++;
+ }
+ if (n_bds == 1)
+ onepacket = 1;
+ else
+ n_bds = min(n_bds, (unsigned) NUM_RXCHAN_BD);
+ }
+
+ /* In host mode, autorequest logic can generate some IN tokens; it's
+ * tricky since we can't leave REQPKT set in RXCSR after the transfer
+ * finishes. So: multipacket transfers involve two or more segments.
+ * And always at least two IRQs ... RNDIS mode is not an option.
+ */
+ if (is_host_active(musb))
+ n_bds = cppi_autoreq_update(rx, tibase, onepacket, n_bds);
+
+ cppi_rndis_update(rx, 1, musb->ctrl_base, is_rndis);
+
+ length = min(n_bds * maxpacket, length);
+
+ DBG(4, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+ "dma 0x%x len %u %u/%u\n",
+ rx->index, maxpacket,
+ onepacket
+ ? (is_rndis ? "rndis" : "onepacket")
+ : "multipacket",
+ n_bds,
+ musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff,
+ addr, length, rx->channel.actual_len, rx->buf_len);
+
+ /* only queue one segment at a time, since the hardware prevents
+ * correct queue shutdown after unexpected short packets
+ */
+ bd = cppi_bd_alloc(rx);
+ rx->head = bd;
+
+ /* Build BDs for all packets in this segment */
+ for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) {
+ u32 bd_len;
+
+ if (i) {
+ bd = cppi_bd_alloc(rx);
+ if (!bd)
+ break;
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+ bd->hw_next = 0;
+
+ /* all but the last packet will be maxpacket size */
+ if (maxpacket < length)
+ bd_len = maxpacket;
+ else
+ bd_len = length;
+
+ bd->hw_bufp = addr;
+ addr += bd_len;
+ rx->offset += bd_len;
+
+ bd->hw_off_len = (0 /*offset*/ << 16) + bd_len;
+ bd->buflen = bd_len;
+
+ bd->hw_options = CPPI_OWN_SET | (i == 0 ? length : 0);
+ length -= bd_len;
+ }
+
+ /* we always expect at least one reusable BD! */
+ if (!tail) {
+ WARNING("rx dma%d -- no BDs? need %d\n", rx->index, n_bds);
+ return;
+ } else if (i < n_bds)
+ WARNING("rx dma%d -- only %d of %d BDs\n", rx->index, i, n_bds);
+
+ tail->next = NULL;
+ tail->hw_next = 0;
+
+ bd = rx->head;
+ rx->tail = tail;
+
+ /* short reads and other faults should terminate this entire
+ * dma segment. we want one "dma packet" per dma segment, not
+ * one per USB packet, terminating the whole queue at once...
+ * NOTE that current hardware seems to ignore SOP and EOP.
+ */
+ bd->hw_options |= CPPI_SOP_SET;
+ tail->hw_options |= CPPI_EOP_SET;
+
+ if (debug >= 5) {
+ struct cppi_descriptor *d;
+
+ for (d = rx->head; d; d = d->next)
+ cppi_dump_rxbd("S", d);
+ }
+
+ /* in case the preceding transfer left some state... */
+ tail = rx->last_processed;
+ if (tail) {
+ tail->next = bd;
+ tail->hw_next = bd->dma;
+ }
+
+ core_rxirq_enable(tibase, rx->index + 1);
+
+ /* BDs live in DMA-coherent memory, but writes might be pending */
+ cpu_drain_writebuffer();
+
+ /* REVISIT specs say to write this AFTER the BUFCNT register
+ * below ... but that loses badly.
+ */
+ musb_writel(&rx_ram->rx_head, 0, bd->dma);
+
+ /* bufferCount must be at least 3, and zeroes on completion
+ * unless it underflows below zero, or stops at two, or keeps
+ * growing ... grr.
+ */
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+
+ if (!i)
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ else if (n_bds > (i - 3))
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds - (i - 3));
+
+ i = musb_readl(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
+ & 0xffff;
+ if (i < (2 + n_bds)) {
+ DBG(2, "bufcnt%d underrun - %d (for %d)\n",
+ rx->index, i, n_bds);
+ musb_writel(tibase,
+ DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
+ n_bds + 2);
+ }
+
+ cppi_dump_rx(4, rx, "/S");
+}
+
+/**
+ * cppi_channel_program - program channel for data transfer
+ * @ch: the channel
+ * @maxpacket: max packet size
+ * @mode: For RX, 1 unless the usb protocol driver promised to treat
+ * all short reads as errors and kick in high level fault recovery.
+ * For TX, ignored because of RNDIS mode races/glitches.
+ * @dma_addr: dma address of buffer
+ * @len: length of buffer
+ * Context: controller irqlocked
+ */
+static int cppi_channel_program(struct dma_channel *ch,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ struct musb *musb;
+
+ cppi_ch = container_of(ch, struct cppi_channel, channel);
+ controller = cppi_ch->controller;
+ musb = controller->musb;
+
+ switch (ch->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* fault irq handler should have handled cleanup */
+ WARNING("%cX DMA%d not cleaned up after abort!\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_BUSY:
+ WARNING("program active channel? %cX DMA%d\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* WARN_ON(1); */
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ DBG(1, "%cX DMA%d not allocated!\n",
+ cppi_ch->transmit ? 'T' : 'R',
+ cppi_ch->index);
+ /* FALLTHROUGH */
+ case MUSB_DMA_STATUS_FREE:
+ break;
+ }
+
+ ch->status = MUSB_DMA_STATUS_BUSY;
+
+ /* set transfer parameters, then queue up its first segment */
+ cppi_ch->buf_dma = dma_addr;
+ cppi_ch->offset = 0;
+ cppi_ch->maxpacket = maxpacket;
+ cppi_ch->buf_len = len;
+
+ /* TX channel? or RX? */
+ if (cppi_ch->transmit)
+ cppi_next_tx_segment(musb, cppi_ch);
+ else
+ cppi_next_rx_segment(musb, cppi_ch, mode);
+
+ return true;
+}
+
+static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
+{
+ struct cppi_channel *rx = &cppi->rx[ch];
+ struct cppi_rx_stateram __iomem *state = rx->state_ram;
+ struct cppi_descriptor *bd;
+ struct cppi_descriptor *last = rx->last_processed;
+ bool completed = false;
+ bool acked = false;
+ int i;
+ dma_addr_t safe2ack;
+ void __iomem *regs = rx->hw_ep->regs;
+
+ cppi_dump_rx(6, rx, "/K");
+
+ bd = last ? last->next : rx->head;
+ if (!bd)
+ return false;
+
+ /* run through all completed BDs */
+ for (i = 0, safe2ack = musb_readl(&state->rx_complete, 0);
+ (safe2ack || completed) && bd && i < NUM_RXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (!completed && (bd->hw_options & CPPI_OWN_SET))
+ break;
+
+ DBG(5, "C/RXBD %08x: nxt %08x buf %08x "
+ "off.len %08x opt.len %08x (%d)\n",
+ bd->dma, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options,
+ rx->channel.actual_len);
+
+ /* actual packet received length */
+ if ((bd->hw_options & CPPI_SOP_SET) && !completed)
+ len = bd->hw_off_len & CPPI_RECV_PKTLEN_MASK;
+ else
+ len = 0;
+
+ if (bd->hw_options & CPPI_EOQ_MASK)
+ completed = true;
+
+ if (!completed && len < bd->buflen) {
+ /* NOTE: when we get a short packet, RXCSR_H_REQPKT
+ * must have been cleared, and no more DMA packets may
+ * active be in the queue... TI docs didn't say, but
+ * CPPI ignores those BDs even though OWN is still set.
+ */
+ completed = true;
+ DBG(3, "rx short %d/%d (%d)\n",
+ len, bd->buflen,
+ rx->channel.actual_len);
+ }
+
+ /* If we got here, we expect to ack at least one BD; meanwhile
+ * CPPI may completing other BDs while we scan this list...
+ *
+ * RACE: we can notice OWN cleared before CPPI raises the
+ * matching irq by writing that BD as the completion pointer.
+ * In such cases, stop scanning and wait for the irq, avoiding
+ * lost acks and states where BD ownership is unclear.
+ */
+ if (bd->dma == safe2ack) {
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ safe2ack = musb_readl(&state->rx_complete, 0);
+ acked = true;
+ if (bd->dma == safe2ack)
+ safe2ack = 0;
+ }
+
+ rx->channel.actual_len += len;
+
+ cppi_bd_free(rx, last);
+ last = bd;
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+ rx->last_processed = last;
+
+ /* dma abort, lost ack, or ... */
+ if (!acked && last) {
+ int csr;
+
+ if (safe2ack == 0 || safe2ack == rx->last_processed->dma)
+ musb_writel(&state->rx_complete, 0, safe2ack);
+ if (safe2ack == 0) {
+ cppi_bd_free(rx, last);
+ rx->last_processed = NULL;
+
+ /* if we land here on the host side, H_REQPKT will
+ * be clear and we need to restart the queue...
+ */
+ WARN_ON(rx->head);
+ }
+ musb_ep_select(cppi->mregs, rx->index + 1);
+ csr = musb_readw(regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_DMAENAB) {
+ DBG(4, "list%d %p/%p, last %08x%s, csr %04x\n",
+ rx->index,
+ rx->head, rx->tail,
+ rx->last_processed
+ ? rx->last_processed->dma
+ : 0,
+ completed ? ", completed" : "",
+ csr);
+ cppi_dump_rxq(4, "/what?", rx);
+ }
+ }
+ if (!completed) {
+ int csr;
+
+ rx->head = bd;
+
+ /* REVISIT seems like "autoreq all but EOP" doesn't...
+ * setting it here "should" be racey, but seems to work
+ */
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ if (is_host_active(cppi->musb)
+ && bd
+ && !(csr & MUSB_RXCSR_H_REQPKT)) {
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(regs, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | csr);
+ csr = musb_readw(rx->hw_ep->regs, MUSB_RXCSR);
+ }
+ } else {
+ rx->head = NULL;
+ rx->tail = NULL;
+ }
+
+ cppi_dump_rx(6, rx, completed ? "/completed" : "/cleaned");
+ return completed;
+}
+
+void cppi_completion(struct musb *musb, u32 rx, u32 tx)
+{
+ void __iomem *tibase;
+ int i, index;
+ struct cppi *cppi;
+ struct musb_hw_ep *hw_ep = NULL;
+
+ cppi = container_of(musb->dma_controller, struct cppi, controller);
+
+ tibase = musb->ctrl_base;
+
+ /* process TX channels */
+ for (index = 0; tx; tx = tx >> 1, index++) {
+ struct cppi_channel *tx_ch;
+ struct cppi_tx_stateram __iomem *tx_ram;
+ bool completed = false;
+ struct cppi_descriptor *bd;
+
+ if (!(tx & 1))
+ continue;
+
+ tx_ch = cppi->tx + index;
+ tx_ram = tx_ch->state_ram;
+
+ /* FIXME need a cppi_tx_scan() routine, which
+ * can also be called from abort code
+ */
+
+ cppi_dump_tx(5, tx_ch, "/E");
+
+ bd = tx_ch->head;
+
+ if (NULL == bd) {
+ DBG(1, "null BD\n");
+ continue;
+ }
+
+ /* run through all completed BDs */
+ for (i = 0; !completed && bd && i < NUM_TXCHAN_BD;
+ i++, bd = bd->next) {
+ u16 len;
+
+ /* catch latest BD writes from CPPI */
+ rmb();
+ if (bd->hw_options & CPPI_OWN_SET)
+ break;
+
+ DBG(5, "C/TXBD %p n %x b %x off %x opt %x\n",
+ bd, bd->hw_next, bd->hw_bufp,
+ bd->hw_off_len, bd->hw_options);
+
+ len = bd->hw_off_len & CPPI_BUFFER_LEN_MASK;
+ tx_ch->channel.actual_len += len;
+
+ tx_ch->last_processed = bd;
+
+ /* write completion register to acknowledge
+ * processing of completed BDs, and possibly
+ * release the IRQ; EOQ might not be set ...
+ *
+ * REVISIT use the same ack strategy as rx
+ *
+ * REVISIT have observed bit 18 set; huh??
+ */
+ /* if ((bd->hw_options & CPPI_EOQ_MASK)) */
+ musb_writel(&tx_ram->tx_complete, 0, bd->dma);
+
+ /* stop scanning on end-of-segment */
+ if (bd->hw_next == 0)
+ completed = true;
+ }
+
+ /* on end of segment, maybe go to next one */
+ if (completed) {
+ /* cppi_dump_tx(4, tx_ch, "/complete"); */
+
+ /* transfer more, or report completion */
+ if (tx_ch->offset >= tx_ch->buf_len) {
+ tx_ch->head = NULL;
+ tx_ch->tail = NULL;
+ tx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = tx_ch->hw_ep;
+
+ /* Peripheral role never repurposes the
+ * endpoint, so immediate completion is
+ * safe. Host role waits for the fifo
+ * to empty (TXPKTRDY irq) before going
+ * to the next queued bulk transfer.
+ */
+ if (is_host_active(cppi->musb)) {
+#if 0
+ /* WORKAROUND because we may
+ * not always get TXKPTRDY ...
+ */
+ int csr;
+
+ csr = musb_readw(hw_ep->regs,
+ MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+#endif
+ completed = false;
+ }
+ if (completed)
+ musb_dma_completion(musb, index + 1, 1);
+
+ } else {
+ /* Bigger transfer than we could fit in
+ * that first batch of descriptors...
+ */
+ cppi_next_tx_segment(musb, tx_ch);
+ }
+ } else
+ tx_ch->head = bd;
+ }
+
+ /* Start processing the RX block */
+ for (index = 0; rx; rx = rx >> 1, index++) {
+
+ if (rx & 1) {
+ struct cppi_channel *rx_ch;
+
+ rx_ch = cppi->rx + index;
+
+ /* let incomplete dma segments finish */
+ if (!cppi_rx_scan(cppi, index))
+ continue;
+
+ /* start another dma segment if needed */
+ if (rx_ch->channel.actual_len != rx_ch->buf_len
+ && rx_ch->channel.actual_len
+ == rx_ch->offset) {
+ cppi_next_rx_segment(musb, rx_ch, 1);
+ continue;
+ }
+
+ /* all segments completed! */
+ rx_ch->channel.status = MUSB_DMA_STATUS_FREE;
+
+ hw_ep = rx_ch->hw_ep;
+
+ core_rxirq_disable(tibase, index + 1);
+ musb_dma_completion(musb, index + 1, 0);
+ }
+ }
+
+ /* write to CPPI EOI register to re-enable interrupts */
+ musb_writel(tibase, DAVINCI_CPPI_EOI_REG, 0);
+}
+
+/* Instantiate a software object representing a DMA controller. */
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *mregs)
+{
+ struct cppi *controller;
+
+ controller = kzalloc(sizeof *controller, GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->mregs = mregs;
+ controller->tibase = mregs - DAVINCI_BASE_OFFSET;
+
+ controller->musb = musb;
+ controller->controller.start = cppi_controller_start;
+ controller->controller.stop = cppi_controller_stop;
+ controller->controller.channel_alloc = cppi_channel_allocate;
+ controller->controller.channel_release = cppi_channel_release;
+ controller->controller.channel_program = cppi_channel_program;
+ controller->controller.channel_abort = cppi_channel_abort;
+
+ /* NOTE: allocating from on-chip SRAM would give the least
+ * contention for memory access, if that ever matters here.
+ */
+
+ /* setup BufferPool */
+ controller->pool = dma_pool_create("cppi",
+ controller->musb->controller,
+ sizeof(struct cppi_descriptor),
+ CPPI_DESCRIPTOR_ALIGN, 0);
+ if (!controller->pool) {
+ kfree(controller);
+ return NULL;
+ }
+
+ return &controller->controller;
+}
+
+/*
+ * Destroy a previously-instantiated DMA controller.
+ */
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct cppi *cppi;
+
+ cppi = container_of(c, struct cppi, controller);
+
+ /* assert: caller stopped the controller first */
+ dma_pool_destroy(cppi->pool);
+
+ kfree(cppi);
+}
+
+/*
+ * Context: controller irqlocked, endpoint selected
+ */
+static int cppi_channel_abort(struct dma_channel *channel)
+{
+ struct cppi_channel *cppi_ch;
+ struct cppi *controller;
+ void __iomem *mbase;
+ void __iomem *tibase;
+ void __iomem *regs;
+ u32 value;
+ struct cppi_descriptor *queue;
+
+ cppi_ch = container_of(channel, struct cppi_channel, channel);
+
+ controller = cppi_ch->controller;
+
+ switch (channel->status) {
+ case MUSB_DMA_STATUS_BUS_ABORT:
+ case MUSB_DMA_STATUS_CORE_ABORT:
+ /* from RX or TX fault irq handler */
+ case MUSB_DMA_STATUS_BUSY:
+ /* the hardware needs shutting down */
+ regs = cppi_ch->hw_ep->regs;
+ break;
+ case MUSB_DMA_STATUS_UNKNOWN:
+ case MUSB_DMA_STATUS_FREE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (!cppi_ch->transmit && cppi_ch->head)
+ cppi_dump_rxq(3, "/abort", cppi_ch);
+
+ mbase = controller->mregs;
+ tibase = controller->tibase;
+
+ queue = cppi_ch->head;
+ cppi_ch->head = NULL;
+ cppi_ch->tail = NULL;
+
+ /* REVISIT should rely on caller having done this,
+ * and caller should rely on us not changing it.
+ * peripheral code is safe ... check host too.
+ */
+ musb_ep_select(mbase, cppi_ch->index + 1);
+
+ if (cppi_ch->transmit) {
+ struct cppi_tx_stateram __iomem *tx_ram;
+ int enabled;
+
+ /* mask interrupts raised to signal teardown complete. */
+ enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
+ & (1 << cppi_ch->index);
+ if (enabled)
+ musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
+ (1 << cppi_ch->index));
+
+ /* REVISIT put timeouts on these controller handshakes */
+
+ cppi_dump_tx(6, cppi_ch, " (teardown)");
+
+ /* teardown DMA engine then usb core */
+ do {
+ value = musb_readl(tibase, DAVINCI_TXCPPI_TEAR_REG);
+ } while (!(value & CPPI_TEAR_READY));
+ musb_writel(tibase, DAVINCI_TXCPPI_TEAR_REG, cppi_ch->index);
+
+ tx_ram = cppi_ch->state_ram;
+ do {
+ value = musb_readl(&tx_ram->tx_complete, 0);
+ } while (0xFFFFFFFC != value);
+ musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
+
+ /* FIXME clean up the transfer state ... here?
+ * the completion routine should get called with
+ * an appropriate status code.
+ */
+
+ value = musb_readw(regs, MUSB_TXCSR);
+ value &= ~MUSB_TXCSR_DMAENAB;
+ value |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(regs, MUSB_TXCSR, value);
+ musb_writew(regs, MUSB_TXCSR, value);
+
+ /* re-enable interrupt */
+ if (enabled)
+ musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
+ (1 << cppi_ch->index));
+
+ /* While we scrub the TX state RAM, ensure that we clean
+ * up any interrupt that's currently asserted:
+ * 1. Write to completion Ptr value 0x1(bit 0 set)
+ * (write back mode)
+ * 2. Write to completion Ptr value 0x0(bit 0 cleared)
+ * (compare mode)
+ * Value written is compared(for bits 31:2) and when
+ * equal, interrupt is deasserted.
+ */
+ cppi_reset_tx(tx_ram, 1);
+ musb_writel(&tx_ram->tx_complete, 0, 0);
+
+ cppi_dump_tx(5, cppi_ch, " (done teardown)");
+
+ /* REVISIT tx side _should_ clean up the same way
+ * as the RX side ... this does no cleanup at all!
+ */
+
+ } else /* RX */ {
+ u16 csr;
+
+ /* NOTE: docs don't guarantee any of this works ... we
+ * expect that if the usb core stops telling the cppi core
+ * to pull more data from it, then it'll be safe to flush
+ * current RX DMA state iff any pending fifo transfer is done.
+ */
+
+ core_rxirq_disable(tibase, cppi_ch->index + 1);
+
+ /* for host, ensure ReqPkt is never set again */
+ if (is_host_active(cppi_ch->controller->musb)) {
+ value = musb_readl(tibase, DAVINCI_AUTOREQ_REG);
+ value &= ~((0x3) << (cppi_ch->index * 2));
+ musb_writel(tibase, DAVINCI_AUTOREQ_REG, value);
+ }
+
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* for host, clear (just) ReqPkt at end of current packet(s) */
+ if (is_host_active(cppi_ch->controller->musb)) {
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ csr &= ~MUSB_RXCSR_H_REQPKT;
+ } else
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+
+ /* clear dma enable */
+ csr &= ~(MUSB_RXCSR_DMAENAB);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ csr = musb_readw(regs, MUSB_RXCSR);
+
+ /* Quiesce: wait for current dma to finish (if not cleanup).
+ * We can't use bit zero of stateram->rx_sop, since that
+ * refers to an entire "DMA packet" not just emptying the
+ * current fifo. Most segments need multiple usb packets.
+ */
+ if (channel->status == MUSB_DMA_STATUS_BUSY)
+ udelay(50);
+
+ /* scan the current list, reporting any data that was
+ * transferred and acking any IRQ
+ */
+ cppi_rx_scan(controller, cppi_ch->index);
+
+ /* clobber the existing state once it's idle
+ *
+ * NOTE: arguably, we should also wait for all the other
+ * RX channels to quiesce (how??) and then temporarily
+ * disable RXCPPI_CTRL_REG ... but it seems that we can
+ * rely on the controller restarting from state ram, with
+ * only RXCPPI_BUFCNT state being bogus. BUFCNT will
+ * correct itself after the next DMA transfer though.
+ *
+ * REVISIT does using rndis mode change that?
+ */
+ cppi_reset_rx(cppi_ch->state_ram);
+
+ /* next DMA request _should_ load cppi head ptr */
+
+ /* ... we don't "free" that list, only mutate it in place. */
+ cppi_dump_rx(5, cppi_ch, " (done abort)");
+
+ /* clean up previously pending bds */
+ cppi_bd_free(cppi_ch, cppi_ch->last_processed);
+ cppi_ch->last_processed = NULL;
+
+ while (queue) {
+ struct cppi_descriptor *tmp = queue->next;
+
+ cppi_bd_free(cppi_ch, queue);
+ queue = tmp;
+ }
+ }
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+ cppi_ch->buf_dma = 0;
+ cppi_ch->offset = 0;
+ cppi_ch->buf_len = 0;
+ cppi_ch->maxpacket = 0;
+ return 0;
+}
+
+/* TBD Queries:
+ *
+ * Power Management ... probably turn off cppi during suspend, restart;
+ * check state ram? Clocking is presumably shared with usb core.
+ */
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
new file mode 100644
index 000000000000..fc5216b5d2c5
--- /dev/null
+++ b/drivers/usb/musb/cppi_dma.h
@@ -0,0 +1,133 @@
+/* Copyright (C) 2005-2006 by Texas Instruments */
+
+#ifndef _CPPI_DMA_H_
+#define _CPPI_DMA_H_
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/dmapool.h>
+
+#include "musb_dma.h"
+#include "musb_core.h"
+
+
+/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
+ * would seem to be shared with the TUSB6020 (over VLYNQ).
+ */
+
+#include "davinci.h"
+
+
+/* CPPI RX/TX state RAM */
+
+struct cppi_tx_stateram {
+ u32 tx_head; /* "DMA packet" head descriptor */
+ u32 tx_buf;
+ u32 tx_current; /* current descriptor */
+ u32 tx_buf_current;
+ u32 tx_info; /* flags, remaining buflen */
+ u32 tx_rem_len;
+ u32 tx_dummy; /* unused */
+ u32 tx_complete;
+};
+
+struct cppi_rx_stateram {
+ u32 rx_skipbytes;
+ u32 rx_head;
+ u32 rx_sop; /* "DMA packet" head descriptor */
+ u32 rx_current; /* current descriptor */
+ u32 rx_buf_current;
+ u32 rx_len_len;
+ u32 rx_cnt_cnt;
+ u32 rx_complete;
+};
+
+/* hw_options bits in CPPI buffer descriptors */
+#define CPPI_SOP_SET ((u32)(1 << 31))
+#define CPPI_EOP_SET ((u32)(1 << 30))
+#define CPPI_OWN_SET ((u32)(1 << 29)) /* owned by cppi */
+#define CPPI_EOQ_MASK ((u32)(1 << 28))
+#define CPPI_ZERO_SET ((u32)(1 << 23)) /* rx saw zlp; tx issues one */
+#define CPPI_RXABT_MASK ((u32)(1 << 19)) /* need more rx buffers */
+
+#define CPPI_RECV_PKTLEN_MASK 0xFFFF
+#define CPPI_BUFFER_LEN_MASK 0xFFFF
+
+#define CPPI_TEAR_READY ((u32)(1 << 31))
+
+/* CPPI data structure definitions */
+
+#define CPPI_DESCRIPTOR_ALIGN 16 /* bytes; 5-dec docs say 4-byte align */
+
+struct cppi_descriptor {
+ /* hardware overlay */
+ u32 hw_next; /* next buffer descriptor Pointer */
+ u32 hw_bufp; /* i/o buffer pointer */
+ u32 hw_off_len; /* buffer_offset16, buffer_length16 */
+ u32 hw_options; /* flags: SOP, EOP etc*/
+
+ struct cppi_descriptor *next;
+ dma_addr_t dma; /* address of this descriptor */
+ u32 buflen; /* for RX: original buffer length */
+} __attribute__ ((aligned(CPPI_DESCRIPTOR_ALIGN)));
+
+
+struct cppi;
+
+/* CPPI Channel Control structure */
+struct cppi_channel {
+ struct dma_channel channel;
+
+ /* back pointer to the DMA controller structure */
+ struct cppi *controller;
+
+ /* which direction of which endpoint? */
+ struct musb_hw_ep *hw_ep;
+ bool transmit;
+ u8 index;
+
+ /* DMA modes: RNDIS or "transparent" */
+ u8 is_rndis;
+
+ /* book keeping for current transfer request */
+ dma_addr_t buf_dma;
+ u32 buf_len;
+ u32 maxpacket;
+ u32 offset; /* dma requested */
+
+ void __iomem *state_ram; /* CPPI state */
+
+ struct cppi_descriptor *freelist;
+
+ /* BD management fields */
+ struct cppi_descriptor *head;
+ struct cppi_descriptor *tail;
+ struct cppi_descriptor *last_processed;
+
+ /* use tx_complete in host role to track endpoints waiting for
+ * FIFONOTEMPTY to clear.
+ */
+ struct list_head tx_complete;
+};
+
+/* CPPI DMA controller object */
+struct cppi {
+ struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *mregs; /* Mentor regs */
+ void __iomem *tibase; /* TI/CPPI regs */
+
+ struct cppi_channel tx[MUSB_C_NUM_EPT - 1];
+ struct cppi_channel rx[MUSB_C_NUM_EPR - 1];
+
+ struct dma_pool *pool;
+
+ struct list_head tx_complete;
+};
+
+/* irq handling hook */
+extern void cppi_completion(struct musb *, u32 rx, u32 tx);
+
+#endif /* end of ifndef _CPPI_DMA_H_ */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
new file mode 100644
index 000000000000..75baf181a8cd
--- /dev/null
+++ b/drivers/usb/musb/davinci.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/arch/gpio.h>
+#include <asm/mach-types.h>
+
+#include "musb_core.h"
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#include <asm/arch/i2c-client.h>
+#endif
+
+#include "davinci.h"
+#include "cppi_dma.h"
+
+
+/* REVISIT (PM) we should be able to keep the PHY in low power mode most
+ * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
+ * and, when in host mode, autosuspending idle root ports... PHYPLLON
+ * (overriding SUSPENDM?) then likely needs to stay off.
+ */
+
+static inline void phy_on(void)
+{
+ /* start the on-chip PHY and its PLL */
+ __raw_writel(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON,
+ (void __force __iomem *) IO_ADDRESS(USBPHY_CTL_PADDR));
+ while ((__raw_readl((void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR))
+ & USBPHY_PHYCLKGD) == 0)
+ cpu_relax();
+}
+
+static inline void phy_off(void)
+{
+ /* powerdown the on-chip PHY and its oscillator */
+ __raw_writel(USBPHY_OSCPDWN | USBPHY_PHYPDWN, (void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR));
+}
+
+static int dma_off = 1;
+
+void musb_platform_enable(struct musb *musb)
+{
+ u32 tmp, old, val;
+
+ /* workaround: setup irqs through both register sets */
+ tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
+ << DAVINCI_USB_TXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ old = tmp;
+ tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
+ << DAVINCI_USB_RXINT_SHIFT;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+ tmp |= old;
+
+ val = ~MUSB_INTR_SOF;
+ tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
+
+ if (is_dma_capable() && !dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 0;
+
+ /* force a DRVVBUS irq so we can start polling for ID change */
+ if (is_otg_enabled(musb))
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
+}
+
+/*
+ * Disable the HDRC and flush interrupts
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ /* because we don't set CTRLR.UINT, "important" to:
+ * - not read/write INTRUSB/INTRUSBE
+ * - (except during initial setup, as workaround)
+ * - use INTSETR/INTCLRR instead
+ */
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
+ DAVINCI_USB_USBINT_MASK
+ | DAVINCI_USB_TXINT_MASK
+ | DAVINCI_USB_RXINT_MASK);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
+
+ if (is_dma_capable() && !dma_off)
+ WARNING("dma still active\n");
+}
+
+
+/* REVISIT it's not clear whether DaVinci can support full OTG. */
+
+static int vbus_state = -1;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#define portstate(stmt) stmt
+#else
+#define portstate(stmt)
+#endif
+
+
+/* VBUS SWITCHING IS BOARD-SPECIFIC */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+#ifndef CONFIG_MACH_DAVINCI_EVM_OTG
+
+/* I2C operations are always synchronous, and require a task context.
+ * With unloaded systems, using the shared workqueue seems to suffice
+ * to satisfy the 100msec A_WAIT_VRISE timeout...
+ */
+static void evm_deferred_drvvbus(struct work_struct *ignored)
+{
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, vbus_state);
+ vbus_state = !vbus_state;
+}
+static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
+
+#endif /* modified board */
+#endif /* EVM */
+
+static void davinci_source_power(struct musb *musb, int is_on, int immediate)
+{
+ if (is_on)
+ is_on = 1;
+
+ if (vbus_state == is_on)
+ return;
+ vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
+
+#ifdef CONFIG_MACH_DAVINCI_EVM
+ if (machine_is_davinci_evm()) {
+#ifdef CONFIG_MACH_DAVINCI_EVM_OTG
+ /* modified EVM board switching VBUS with GPIO(6) not I2C
+ * NOTE: PINMUX0.RGB888 (bit23) must be clear
+ */
+ if (is_on)
+ gpio_set(GPIO(6));
+ else
+ gpio_clear(GPIO(6));
+ immediate = 1;
+#else
+ if (immediate)
+ davinci_i2c_expander_op(0x3a, USB_DRVVBUS, !is_on);
+ else
+ schedule_work(&evm_vbus_work);
+#endif
+ }
+#endif
+ if (immediate)
+ vbus_state = is_on;
+}
+
+static void davinci_set_vbus(struct musb *musb, int is_on)
+{
+ WARN_ON(is_on && is_peripheral_active(musb));
+ davinci_source_power(musb, is_on, 0);
+}
+
+
+#define POLL_SECONDS 2
+
+static struct timer_list otg_workaround;
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ void __iomem *mregs = musb->mregs;
+ u8 devctl;
+ unsigned long flags;
+
+ /* We poll because DaVinci's won't expose several OTG-critical
+ * status change events (from the transceiver) otherwise.
+ */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ DBG(7, "poll devctl %02x (%s)\n", devctl, otg_state_string(musb));
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VFALL:
+ /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
+ * seems to mis-handle session "start" otherwise (or in our
+ * case "recover"), in routine "VBUS was valid by the time
+ * VBUSERR got reported during enumeration" cases.
+ */
+ if (devctl & MUSB_DEVCTL_VBUS) {
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ break;
+ }
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
+ MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
+ break;
+ case OTG_STATE_B_IDLE:
+ if (!is_peripheral_enabled(musb))
+ break;
+
+ /* There's no ID-changed IRQ, so we have no good way to tell
+ * when to switch to the A-Default state machine (by setting
+ * the DEVCTL.SESSION flag).
+ *
+ * Workaround: whenever we're in B_IDLE, try setting the
+ * session flag every few seconds. If it works, ID was
+ * grounded and we're now in the A-Default state machine.
+ *
+ * NOTE setting the session flag is _supposed_ to trigger
+ * SRP, but clearly it doesn't.
+ */
+ musb_writeb(mregs, MUSB_DEVCTL,
+ devctl | MUSB_DEVCTL_SESSION);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ else
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static irqreturn_t davinci_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+ void __iomem *tibase = musb->ctrl_base;
+ u32 tmp;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
+ * the Mentor registers (except for setup), use the TI ones and EOI.
+ *
+ * Docs describe irq "vector" registers asociated with the CPPI and
+ * USB EOI registers. These hold a bitmask corresponding to the
+ * current IRQ, not an irq handler address. Would using those bits
+ * resolve some of the races observed in this dispatch code??
+ */
+
+ /* CPPI interrupts share the same IRQ line, but have their own
+ * mask, state, "vector", and EOI registers.
+ */
+ if (is_cppi_enabled()) {
+ u32 cppi_tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG);
+ u32 cppi_rx = musb_readl(tibase, DAVINCI_RXCPPI_MASKED_REG);
+
+ if (cppi_tx || cppi_rx) {
+ DBG(4, "CPPI IRQ t%x r%x\n", cppi_tx, cppi_rx);
+ cppi_completion(musb, cppi_rx, cppi_tx);
+ retval = IRQ_HANDLED;
+ }
+ }
+
+ /* ack and handle non-CPPI interrupts */
+ tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
+ musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
+ DBG(4, "IRQ %08x\n", tmp);
+
+ musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
+ >> DAVINCI_USB_RXINT_SHIFT;
+ musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
+ >> DAVINCI_USB_TXINT_SHIFT;
+ musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
+ >> DAVINCI_USB_USBINT_SHIFT;
+
+ /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
+ * DaVinci's missing ID change IRQ. We need an ID change IRQ to
+ * switch appropriately between halves of the OTG state machine.
+ * Managing DEVCTL.SESSION per Mentor docs requires we know its
+ * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
+ * Also, DRVVBUS pulses for SRP (but not at 5V) ...
+ */
+ if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
+ int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+ int err = musb->int_usb & MUSB_INTR_VBUSERROR;
+
+ err = is_host_enabled(musb)
+ && (musb->int_usb & MUSB_INTR_VBUSERROR);
+ if (err) {
+ /* The Mentor core doesn't debounce VBUS as needed
+ * to cope with device connect current spikes. This
+ * means it's not uncommon for bus-powered devices
+ * to get VBUS errors during enumeration.
+ *
+ * This is a workaround, but newer RTL from Mentor
+ * seems to allow a better one: "re"starting sessions
+ * without waiting (on EVM, a **long** time) for VBUS
+ * to stop registering in devctl.
+ */
+ musb->int_usb &= ~MUSB_INTR_VBUSERROR;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+ WARNING("VBUS error workaround (delay coming)\n");
+ } else if (is_host_enabled(musb) && drvvbus) {
+ musb->is_active = 1;
+ MUSB_HST_MODE(musb);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ del_timer(&otg_workaround);
+ } else {
+ musb->is_active = 0;
+ MUSB_DEV_MODE(musb);
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
+ }
+
+ /* NOTE: this must complete poweron within 100 msec */
+ davinci_source_power(musb, drvvbus, 0);
+ DBG(2, "VBUS %s (%s)%s, devctl %02x\n",
+ drvvbus ? "on" : "off",
+ otg_state_string(musb),
+ err ? " ERROR" : "",
+ devctl);
+ retval = IRQ_HANDLED;
+ }
+
+ if (musb->int_tx || musb->int_rx || musb->int_usb)
+ retval |= musb_interrupt(musb);
+
+ /* irq stays asserted until EOI is written */
+ musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
+
+ /* poll for ID change */
+ if (is_otg_enabled(musb)
+ && musb->xceiv.state == OTG_STATE_B_IDLE)
+ mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* REVISIT we sometimes get unhandled IRQs
+ * (e.g. ep0). not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "unhandled? %08x\n", tmp);
+ return IRQ_HANDLED;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ void __iomem *tibase = musb->ctrl_base;
+ u32 revision;
+
+ musb->mregs += DAVINCI_BASE_OFFSET;
+#if 0
+ /* REVISIT there's something odd about clocking, this
+ * didn't appear do the job ...
+ */
+ musb->clock = clk_get(pDevice, "usb");
+ if (IS_ERR(musb->clock))
+ return PTR_ERR(musb->clock);
+
+ status = clk_enable(musb->clock);
+ if (status < 0)
+ return -ENODEV;
+#endif
+
+ /* returns zero if e.g. not clocked */
+ revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
+ if (revision == 0)
+ return -ENODEV;
+
+ if (is_host_enabled(musb))
+ setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
+
+ musb->board_set_vbus = davinci_set_vbus;
+ davinci_source_power(musb, 0, 1);
+
+ /* reset the controller */
+ musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
+
+ /* start the on-chip PHY and its PLL */
+ phy_on();
+
+ msleep(5);
+
+ /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
+ pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
+ revision, __raw_readl((void __force __iomem *)
+ IO_ADDRESS(USBPHY_CTL_PADDR)),
+ musb_readb(tibase, DAVINCI_USB_CTRL_REG));
+
+ musb->isr = davinci_interrupt;
+ return 0;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ if (is_host_enabled(musb))
+ del_timer_sync(&otg_workaround);
+
+ davinci_source_power(musb, 0 /*off*/, 1);
+
+ /* delay, to avoid problems with module reload */
+ if (is_host_enabled(musb) && musb->xceiv.default_a) {
+ int maxdelay = 30;
+ u8 devctl, warn = 0;
+
+ /* if there's no peripheral connected, this can take a
+ * long time to fall, especially on EVM with huge C133.
+ */
+ do {
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (!(devctl & MUSB_DEVCTL_VBUS))
+ break;
+ if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
+ warn = devctl & MUSB_DEVCTL_VBUS;
+ DBG(1, "VBUS %d\n",
+ warn >> MUSB_DEVCTL_VBUS_SHIFT);
+ }
+ msleep(1000);
+ maxdelay--;
+ } while (maxdelay > 0);
+
+ /* in OTG mode, another host might be connected */
+ if (devctl & MUSB_DEVCTL_VBUS)
+ DBG(1, "VBUS off timeout (devctl %02x)\n", devctl);
+ }
+
+ phy_off();
+ return 0;
+}
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
new file mode 100644
index 000000000000..7fb6238e270f
--- /dev/null
+++ b/drivers/usb/musb/davinci.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_HDRDF_H__
+#define __MUSB_HDRDF_H__
+
+/*
+ * DaVinci-specific definitions
+ */
+
+/* Integrated highspeed/otg PHY */
+#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_PHYCLKGD (1 << 8)
+#define USBPHY_SESNDEN (1 << 7) /* v(sess_end) comparator */
+#define USBPHY_VBDTCTEN (1 << 6) /* v(bus) comparator */
+#define USBPHY_PHYPLLON (1 << 4) /* override pll suspend */
+#define USBPHY_CLKO1SEL (1 << 3)
+#define USBPHY_OSCPDWN (1 << 2)
+#define USBPHY_PHYPDWN (1 << 0)
+
+/* For now include usb OTG module registers here */
+#define DAVINCI_USB_VERSION_REG 0x00
+#define DAVINCI_USB_CTRL_REG 0x04
+#define DAVINCI_USB_STAT_REG 0x08
+#define DAVINCI_RNDIS_REG 0x10
+#define DAVINCI_AUTOREQ_REG 0x14
+#define DAVINCI_USB_INT_SOURCE_REG 0x20
+#define DAVINCI_USB_INT_SET_REG 0x24
+#define DAVINCI_USB_INT_SRC_CLR_REG 0x28
+#define DAVINCI_USB_INT_MASK_REG 0x2c
+#define DAVINCI_USB_INT_MASK_SET_REG 0x30
+#define DAVINCI_USB_INT_MASK_CLR_REG 0x34
+#define DAVINCI_USB_INT_SRC_MASKED_REG 0x38
+#define DAVINCI_USB_EOI_REG 0x3c
+#define DAVINCI_USB_EOI_INTVEC 0x40
+
+/* BEGIN CPPI-generic (?) */
+
+/* CPPI related registers */
+#define DAVINCI_TXCPPI_CTRL_REG 0x80
+#define DAVINCI_TXCPPI_TEAR_REG 0x84
+#define DAVINCI_CPPI_EOI_REG 0x88
+#define DAVINCI_CPPI_INTVEC_REG 0x8c
+#define DAVINCI_TXCPPI_MASKED_REG 0x90
+#define DAVINCI_TXCPPI_RAW_REG 0x94
+#define DAVINCI_TXCPPI_INTENAB_REG 0x98
+#define DAVINCI_TXCPPI_INTCLR_REG 0x9c
+
+#define DAVINCI_RXCPPI_CTRL_REG 0xC0
+#define DAVINCI_RXCPPI_MASKED_REG 0xD0
+#define DAVINCI_RXCPPI_RAW_REG 0xD4
+#define DAVINCI_RXCPPI_INTENAB_REG 0xD8
+#define DAVINCI_RXCPPI_INTCLR_REG 0xDC
+
+#define DAVINCI_RXCPPI_BUFCNT0_REG 0xE0
+#define DAVINCI_RXCPPI_BUFCNT1_REG 0xE4
+#define DAVINCI_RXCPPI_BUFCNT2_REG 0xE8
+#define DAVINCI_RXCPPI_BUFCNT3_REG 0xEC
+
+/* CPPI state RAM entries */
+#define DAVINCI_CPPI_STATERAM_BASE_OFFSET 0x100
+
+#define DAVINCI_TXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + ((chnum) * 0x40))
+#define DAVINCI_RXCPPI_STATERAM_OFFSET(chnum) \
+ (DAVINCI_CPPI_STATERAM_BASE_OFFSET + 0x20 + ((chnum) * 0x40))
+
+/* CPPI masks */
+#define DAVINCI_DMA_CTRL_ENABLE 1
+#define DAVINCI_DMA_CTRL_DISABLE 0
+
+#define DAVINCI_DMA_ALL_CHANNELS_ENABLE 0xF
+#define DAVINCI_DMA_ALL_CHANNELS_DISABLE 0xF
+
+/* END CPPI-generic (?) */
+
+#define DAVINCI_USB_TX_ENDPTS_MASK 0x1f /* ep0 + 4 tx */
+#define DAVINCI_USB_RX_ENDPTS_MASK 0x1e /* 4 rx */
+
+#define DAVINCI_USB_USBINT_SHIFT 16
+#define DAVINCI_USB_TXINT_SHIFT 0
+#define DAVINCI_USB_RXINT_SHIFT 8
+
+#define DAVINCI_INTR_DRVVBUS 0x0100
+
+#define DAVINCI_USB_USBINT_MASK 0x01ff0000 /* 8 Mentor, DRVVBUS */
+#define DAVINCI_USB_TXINT_MASK \
+ (DAVINCI_USB_TX_ENDPTS_MASK << DAVINCI_USB_TXINT_SHIFT)
+#define DAVINCI_USB_RXINT_MASK \
+ (DAVINCI_USB_RX_ENDPTS_MASK << DAVINCI_USB_RXINT_SHIFT)
+
+#define DAVINCI_BASE_OFFSET 0x400
+
+#endif /* __MUSB_HDRDF_H__ */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
new file mode 100644
index 000000000000..c5b8f0296fcf
--- /dev/null
+++ b/drivers/usb/musb/musb_core.c
@@ -0,0 +1,2253 @@
+/*
+ * MUSB OTG driver core code
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
+ *
+ * This consists of a Host Controller Driver (HCD) and a peripheral
+ * controller driver implementing the "Gadget" API; OTG support is
+ * in the works. These are normal Linux-USB controller drivers which
+ * use IRQs and have no dedicated thread.
+ *
+ * This version of the driver has only been used with products from
+ * Texas Instruments. Those products integrate the Inventra logic
+ * with other DMA, IRQ, and bus modules, as well as other logic that
+ * needs to be reflected in this driver.
+ *
+ *
+ * NOTE: the original Mentor code here was pretty much a collection
+ * of mechanisms that don't seem to have been fully integrated/working
+ * for *any* Linux kernel version. This version aims at Linux 2.6.now,
+ * Key open issues include:
+ *
+ * - Lack of host-side transaction scheduling, for all transfer types.
+ * The hardware doesn't do it; instead, software must.
+ *
+ * This is not an issue for OTG devices that don't support external
+ * hubs, but for more "normal" USB hosts it's a user issue that the
+ * "multipoint" support doesn't scale in the expected ways. That
+ * includes DaVinci EVM in a common non-OTG mode.
+ *
+ * * Control and bulk use dedicated endpoints, and there's as
+ * yet no mechanism to either (a) reclaim the hardware when
+ * peripherals are NAKing, which gets complicated with bulk
+ * endpoints, or (b) use more than a single bulk endpoint in
+ * each direction.
+ *
+ * RESULT: one device may be perceived as blocking another one.
+ *
+ * * Interrupt and isochronous will dynamically allocate endpoint
+ * hardware, but (a) there's no record keeping for bandwidth;
+ * (b) in the common case that few endpoints are available, there
+ * is no mechanism to reuse endpoints to talk to multiple devices.
+ *
+ * RESULT: At one extreme, bandwidth can be overcommitted in
+ * some hardware configurations, no faults will be reported.
+ * At the other extreme, the bandwidth capabilities which do
+ * exist tend to be severely undercommitted. You can't yet hook
+ * up both a keyboard and a mouse to an external USB hub.
+ */
+
+/*
+ * This gets many kinds of configuration information:
+ * - Kconfig for everything user-configurable
+ * - <asm/arch/hdrc_cnf.h> for SOC or family details
+ * - platform_device for addressing, irq, and platform_data
+ * - platform_data is mostly for board-specific informarion
+ *
+ * Most of the conditional compilation will (someday) vanish.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_ARM
+#include <asm/arch/hardware.h>
+#include <asm/arch/memory.h>
+#include <asm/mach-types.h>
+#endif
+
+#include "musb_core.h"
+
+
+#ifdef CONFIG_ARCH_DAVINCI
+#include "davinci.h"
+#endif
+
+
+
+unsigned debug;
+module_param(debug, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug message level. Default = 0");
+
+#define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
+#define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
+
+#define MUSB_VERSION "6.0"
+
+#define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
+
+#define MUSB_DRIVER_NAME "musb_hdrc"
+const char musb_driver_name[] = MUSB_DRIVER_NAME;
+
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
+
+
+/*-------------------------------------------------------------------------*/
+
+static inline struct musb *dev_to_musb(struct device *dev)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* usbcore insists dev->driver_data is a "struct hcd *" */
+ return hcd_to_musb(dev_get_drvdata(dev));
+#else
+ return dev_get_drvdata(dev);
+#endif
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifndef CONFIG_USB_TUSB6010
+/*
+ * Load an endpoint's FIFO
+ */
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ prefetch((u8 *)src);
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', hw_ep->epnum, fifo, len, src);
+
+ /* we can't assume unaligned reads work */
+ if (likely((0x01 & (unsigned long) src) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned source address */
+ if ((0x02 & (unsigned long) src) == 0) {
+ if (len >= 4) {
+ writesl(fifo, src + index, len >> 2);
+ index += len & ~0x03;
+ }
+ if (len & 0x02) {
+ musb_writew(fifo, 0, *(u16 *)&src[index]);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ writesw(fifo, src + index, len >> 1);
+ index += len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ musb_writeb(fifo, 0, src[index]);
+ } else {
+ /* byte aligned */
+ writesb(fifo, src, len);
+ }
+}
+
+/*
+ * Unload an endpoint's FIFO
+ */
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
+{
+ void __iomem *fifo = hw_ep->fifo;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', hw_ep->epnum, fifo, len, dst);
+
+ /* we can't assume unaligned writes work */
+ if (likely((0x01 & (unsigned long) dst) == 0)) {
+ u16 index = 0;
+
+ /* best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) dst) == 0) {
+ if (len >= 4) {
+ readsl(fifo, dst, len >> 2);
+ index = len & ~0x03;
+ }
+ if (len & 0x02) {
+ *(u16 *)&dst[index] = musb_readw(fifo, 0);
+ index += 2;
+ }
+ } else {
+ if (len >= 2) {
+ readsw(fifo, dst, len >> 1);
+ index = len & ~0x01;
+ }
+ }
+ if (len & 0x01)
+ dst[index] = musb_readb(fifo, 0);
+ } else {
+ /* byte aligned */
+ readsb(fifo, dst, len);
+ }
+}
+
+#endif /* normal PIO */
+
+
+/*-------------------------------------------------------------------------*/
+
+/* for high speed test mode; see USB 2.0 spec 7.1.20 */
+static const u8 musb_test_packet[53] = {
+ /* implicit SYNC then DATA0 to start */
+
+ /* JKJKJKJK x9 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* JJKKJJKK x8 */
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ /* JJJJKKKK x8 */
+ 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
+ /* JJJJJJJKKKKKKK x8 */
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ /* JJJJJJJK x8 */
+ 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
+ /* JKKKKKKK x10, JK */
+ 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
+
+ /* implicit CRC16 then EOP to end */
+};
+
+void musb_load_testpacket(struct musb *musb)
+{
+ void __iomem *regs = musb->endpoints[0].regs;
+
+ musb_ep_select(musb->mregs, 0);
+ musb_write_fifo(musb->control_ep,
+ sizeof(musb_test_packet), musb_test_packet);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
+}
+
+/*-------------------------------------------------------------------------*/
+
+const char *otg_state_string(struct musb *musb)
+{
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_IDLE: return "a_idle";
+ case OTG_STATE_A_WAIT_VRISE: return "a_wait_vrise";
+ case OTG_STATE_A_WAIT_BCON: return "a_wait_bcon";
+ case OTG_STATE_A_HOST: return "a_host";
+ case OTG_STATE_A_SUSPEND: return "a_suspend";
+ case OTG_STATE_A_PERIPHERAL: return "a_peripheral";
+ case OTG_STATE_A_WAIT_VFALL: return "a_wait_vfall";
+ case OTG_STATE_A_VBUS_ERR: return "a_vbus_err";
+ case OTG_STATE_B_IDLE: return "b_idle";
+ case OTG_STATE_B_SRP_INIT: return "b_srp_init";
+ case OTG_STATE_B_PERIPHERAL: return "b_peripheral";
+ case OTG_STATE_B_WAIT_ACON: return "b_wait_acon";
+ case OTG_STATE_B_HOST: return "b_host";
+ default: return "UNDEFINED";
+ }
+}
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+/*
+ * See also USB_OTG_1-3.pdf 6.6.5 Timers
+ * REVISIT: Are the other timers done in the hardware?
+ */
+#define TB_ASE0_BRST 100 /* Min 3.125 ms */
+
+/*
+ * Handles OTG hnp timeouts, such as b_ase0_brst
+ */
+void musb_otg_timer_func(unsigned long data)
+{
+ struct musb *musb = (struct musb *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+ musb_g_disconnect(musb);
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ DBG(1, "HNP: a_wait_bcon timeout; back to a_host\n");
+ musb_hnp_stop(musb);
+ break;
+ default:
+ DBG(1, "HNP: Unhandled mode %s\n", otg_state_string(musb));
+ }
+ musb->ignore_disconnect = 0;
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static DEFINE_TIMER(musb_otg_timer, musb_otg_timer_func, 0, 0);
+
+/*
+ * Stops the B-device HNP state. Caller must take care of locking.
+ */
+void musb_hnp_stop(struct musb *musb)
+{
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+ void __iomem *mbase = musb->mregs;
+ u8 reg;
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_PERIPHERAL:
+ case OTG_STATE_A_WAIT_VFALL:
+ case OTG_STATE_A_WAIT_BCON:
+ DBG(1, "HNP: Switching back to A-host\n");
+ musb_g_disconnect(musb);
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_B_HOST:
+ DBG(1, "HNP: Disabling HR\n");
+ hcd->self.is_b_host = 0;
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ reg = musb_readb(mbase, MUSB_POWER);
+ reg |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, reg);
+ /* REVISIT: Start SESSION_REQUEST here? */
+ break;
+ default:
+ DBG(1, "HNP: Stopping in unknown state %s\n",
+ otg_state_string(musb));
+ }
+
+ /*
+ * When returning to A state after HNP, avoid hub_port_rebounce(),
+ * which cause occasional OPT A "Did not receive reset after connect"
+ * errors.
+ */
+ musb->port1_status &=
+ ~(1 << USB_PORT_FEAT_C_CONNECTION);
+}
+
+#endif
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+
+#define STAGE0_MASK (MUSB_INTR_RESUME | MUSB_INTR_SESSREQ \
+ | MUSB_INTR_VBUSERROR | MUSB_INTR_CONNECT \
+ | MUSB_INTR_RESET)
+
+static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+ void __iomem *mbase = musb->mregs;
+
+ DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
+ int_usb);
+
+ /* in host mode, the peripheral may issue remote wakeup.
+ * in peripheral mode, the host may resume the link.
+ * spurious RESUME irqs happen too, paired with SUSPEND.
+ */
+ if (int_usb & MUSB_INTR_RESUME) {
+ handled = IRQ_HANDLED;
+ DBG(3, "RESUME (%s)\n", otg_state_string(musb));
+
+ if (devctl & MUSB_DEVCTL_HM) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_SUSPEND:
+ /* remote wakeup? later, GetPortStatus
+ * will stop RESUME signaling
+ */
+
+ if (power & MUSB_POWER_SUSPENDM) {
+ /* spurious */
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ DBG(2, "Spurious SUSPENDM\n");
+ break;
+ }
+
+ power &= ~MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER,
+ power | MUSB_POWER_RESUME);
+
+ musb->port1_status |=
+ (USB_PORT_STAT_C_SUSPEND << 16)
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(20);
+
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ musb->is_active = 1;
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->is_active = 1;
+ MUSB_DEV_MODE(musb);
+ break;
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "host",
+ otg_state_string(musb));
+ }
+#endif
+ } else {
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_SUSPEND:
+ /* possibly DISCONNECT is upcoming */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ break;
+#endif
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ /* disconnect while suspended? we may
+ * not get a disconnect irq...
+ */
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != (3 << MUSB_DEVCTL_VBUS_SHIFT)
+ ) {
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+ }
+ musb_g_resume(musb);
+ break;
+ case OTG_STATE_B_IDLE:
+ musb->int_usb &= ~MUSB_INTR_SUSPEND;
+ break;
+#endif
+ default:
+ WARNING("bogus %s RESUME (%s)\n",
+ "peripheral",
+ otg_state_string(musb));
+ }
+ }
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* see manual for the order of the tests */
+ if (int_usb & MUSB_INTR_SESSREQ) {
+ DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
+
+ /* IRQ arrives from ID pin sense or (later, if VBUS power
+ * is removed) SRP. responses are time critical:
+ * - turn on VBUS (with silicon-specific mechanism)
+ * - go through A_WAIT_VRISE
+ * - ... to A_WAIT_BCON.
+ * a_wait_vrise_tmout triggers VBUS_ERROR transitions
+ */
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+ musb->ep0_stage = MUSB_EP0_START;
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ musb_set_vbus(musb, 1);
+
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_VBUSERROR) {
+ int ignore = 0;
+
+ /* During connection as an A-Device, we may see a short
+ * current spikes causing voltage drop, because of cable
+ * and peripheral capacitance combined with vbus draw.
+ * (So: less common with truly self-powered devices, where
+ * vbus doesn't act like a power supply.)
+ *
+ * Such spikes are short; usually less than ~500 usec, max
+ * of ~2 msec. That is, they're not sustained overcurrent
+ * errors, though they're reported using VBUSERROR irqs.
+ *
+ * Workarounds: (a) hardware: use self powered devices.
+ * (b) software: ignore non-repeated VBUS errors.
+ *
+ * REVISIT: do delays from lots of DEBUG_KERNEL checks
+ * make trouble here, keeping VBUS < 4.4V ?
+ */
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ /* recovery is dicey once we've gotten past the
+ * initial stages of enumeration, but if VBUS
+ * stayed ok at the other end of the link, and
+ * another reset is due (at least for high speed,
+ * to redo the chirp etc), it might work OK...
+ */
+ case OTG_STATE_A_WAIT_BCON:
+ case OTG_STATE_A_WAIT_VRISE:
+ if (musb->vbuserr_retry) {
+ musb->vbuserr_retry--;
+ ignore = 1;
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mbase, MUSB_DEVCTL, devctl);
+ } else {
+ musb->port1_status |=
+ (1 << USB_PORT_FEAT_OVER_CURRENT)
+ | (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ }
+ break;
+ default:
+ break;
+ }
+
+ DBG(1, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
+ otg_state_string(musb),
+ devctl,
+ ({ char *s;
+ switch (devctl & MUSB_DEVCTL_VBUS) {
+ case 0 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<SessEnd"; break;
+ case 1 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<AValid"; break;
+ case 2 << MUSB_DEVCTL_VBUS_SHIFT:
+ s = "<VBusValid"; break;
+ /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
+ default:
+ s = "VALID"; break;
+ }; s; }),
+ VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
+ musb->port1_status);
+
+ /* go through A_WAIT_VFALL then start a new session */
+ if (!ignore)
+ musb_set_vbus(musb, 0);
+ handled = IRQ_HANDLED;
+ }
+
+ if (int_usb & MUSB_INTR_CONNECT) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+
+ handled = IRQ_HANDLED;
+ musb->is_active = 1;
+ set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
+
+ musb->ep0_stage = MUSB_EP0_START;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ /* flush endpoints when transitioning from Device Mode */
+ if (is_peripheral_active(musb)) {
+ /* REVISIT HNP; just force disconnect */
+ }
+ musb_writew(mbase, MUSB_INTRTXE, musb->epmask);
+ musb_writew(mbase, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xf7);
+#endif
+ musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
+ |USB_PORT_STAT_HIGH_SPEED
+ |USB_PORT_STAT_ENABLE
+ );
+ musb->port1_status |= USB_PORT_STAT_CONNECTION
+ |(USB_PORT_STAT_C_CONNECTION << 16);
+
+ /* high vs full speed is just a guess until after reset */
+ if (devctl & MUSB_DEVCTL_LSDEV)
+ musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
+
+ if (hcd->status_urb)
+ usb_hcd_poll_rh_status(hcd);
+ else
+ usb_hcd_resume_root_hub(hcd);
+
+ MUSB_HST_MODE(musb);
+
+ /* indicate new connection to OTG machine */
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "HNP: SUSPEND+CONNECT, now b_host\n");
+ musb->xceiv.state = OTG_STATE_B_HOST;
+ hcd->self.is_b_host = 1;
+ int_usb &= ~MUSB_INTR_SUSPEND;
+ } else
+ DBG(1, "CONNECT as b_peripheral???\n");
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: Waiting to switch to b_host state\n");
+ musb->xceiv.state = OTG_STATE_B_HOST;
+ hcd->self.is_b_host = 1;
+ break;
+ default:
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ hcd->self.is_b_host = 0;
+ }
+ break;
+ }
+ DBG(1, "CONNECT (%s) devctl %02x\n",
+ otg_state_string(musb), devctl);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+ /* mentor saves a bit: bus reset and babble share the same irq.
+ * only host sees babble; only peripheral sees bus reset.
+ */
+ if (int_usb & MUSB_INTR_RESET) {
+ if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
+ /*
+ * Looks like non-HS BABBLE can be ignored, but
+ * HS BABBLE is an error condition. For HS the solution
+ * is to avoid babble in the first place and fix what
+ * caused BABBLE. When HS BABBLE happens we can only
+ * stop the session.
+ */
+ if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
+ DBG(1, "BABBLE devctl: %02x\n", devctl);
+ else {
+ ERR("Stopping host session -- babble\n");
+ musb_writeb(mbase, MUSB_DEVCTL, 0);
+ }
+ } else if (is_peripheral_capable()) {
+ DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_OTG
+ case OTG_STATE_A_SUSPEND:
+ /* We need to ignore disconnect on suspend
+ * otherwise tusb 2.0 won't reconnect after a
+ * power cycle, which breaks otg compliance.
+ */
+ musb->ignore_disconnect = 1;
+ musb_g_reset(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
+ DBG(1, "HNP: Setting timer as %s\n",
+ otg_state_string(musb));
+ musb_otg_timer.data = (unsigned long)musb;
+ mod_timer(&musb_otg_timer, jiffies
+ + msecs_to_jiffies(100));
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ DBG(1, "HNP: RESET (%s), to b_peripheral\n",
+ otg_state_string(musb));
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb_g_reset(musb);
+ break;
+#endif
+ case OTG_STATE_B_IDLE:
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ /* FALLTHROUGH */
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_reset(musb);
+ break;
+ default:
+ DBG(1, "Unhandled BUS RESET as %s\n",
+ otg_state_string(musb));
+ }
+ }
+
+ handled = IRQ_HANDLED;
+ }
+ schedule_work(&musb->irq_work);
+
+ return handled;
+}
+
+/*
+ * Interrupt Service Routine to record USB "global" interrupts.
+ * Since these do not happen often and signify things of
+ * paramount importance, it seems OK to check them individually;
+ * the order of the tests is specified in the manual
+ *
+ * @param musb instance pointer
+ * @param int_usb register contents
+ * @param devctl
+ * @param power
+ */
+static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
+ u8 devctl, u8 power)
+{
+ irqreturn_t handled = IRQ_NONE;
+
+#if 0
+/* REVISIT ... this would be for multiplexing periodic endpoints, or
+ * supporting transfer phasing to prevent exceeding ISO bandwidth
+ * limits of a given frame or microframe.
+ *
+ * It's not needed for peripheral side, which dedicates endpoints;
+ * though it _might_ use SOF irqs for other purposes.
+ *
+ * And it's not currently needed for host side, which also dedicates
+ * endpoints, relies on TX/RX interval registers, and isn't claimed
+ * to support ISO transfers yet.
+ */
+ if (int_usb & MUSB_INTR_SOF) {
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *ep;
+ u8 epnum;
+ u16 frame;
+
+ DBG(6, "START_OF_FRAME\n");
+ handled = IRQ_HANDLED;
+
+ /* start any periodic Tx transfers waiting for current frame */
+ frame = musb_readw(mbase, MUSB_FRAME);
+ ep = musb->endpoints;
+ for (epnum = 1; (epnum < musb->nr_endpoints)
+ && (musb->epmask >= (1 << epnum));
+ epnum++, ep++) {
+ /*
+ * FIXME handle framecounter wraps (12 bits)
+ * eliminate duplicated StartUrb logic
+ */
+ if (ep->dwWaitFrame >= frame) {
+ ep->dwWaitFrame = 0;
+ pr_debug("SOF --> periodic TX%s on %d\n",
+ ep->tx_channel ? " DMA" : "",
+ epnum);
+ if (!ep->tx_channel)
+ musb_h_tx_start(musb, epnum);
+ else
+ cppi_hostdma_start(musb, epnum);
+ }
+ } /* end of for loop */
+ }
+#endif
+
+ if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+ DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
+ otg_state_string(musb),
+ MUSB_MODE(musb), devctl);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb_root_disconnect(musb);
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+#endif /* HOST */
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ musb_hnp_stop(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ musb_root_disconnect(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_B_WAIT_ACON:
+ /* FALLTHROUGH */
+#endif /* OTG */
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_g_disconnect(musb);
+ break;
+#endif /* GADGET */
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+ otg_state_string(musb));
+ break;
+ }
+
+ schedule_work(&musb->irq_work);
+ }
+
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+ otg_state_string(musb), devctl, power);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv.state) {
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_A_PERIPHERAL:
+ /*
+ * We cannot stop HNP here, devctl BDEVICE might be
+ * still set.
+ */
+ break;
+#endif
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.gadget->b_hnp_enable;
+ if (musb->is_active) {
+#ifdef CONFIG_USB_MUSB_OTG
+ musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+ musb_otg_timer.data = (unsigned long)musb;
+ mod_timer(&musb_otg_timer, jiffies
+ + msecs_to_jiffies(TB_ASE0_BRST));
+#endif
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+ schedule_work(&musb->irq_work);
+ }
+
+
+ return handled;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+* Program the HDRC to start (enable interrupts, dma, etc.).
+*/
+void musb_start(struct musb *musb)
+{
+ void __iomem *regs = musb->mregs;
+ u8 devctl = musb_readb(regs, MUSB_DEVCTL);
+
+ DBG(2, "<== devctl %02x\n", devctl);
+
+ /* Set INT enable registers, enable interrupts */
+ musb_writew(regs, MUSB_INTRTXE, musb->epmask);
+ musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
+ musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+ musb_writeb(regs, MUSB_TESTMODE, 0);
+
+ /* put into basic highspeed mode and start session */
+ musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+ | MUSB_POWER_SOFTCONN
+ | MUSB_POWER_HSENAB
+ /* ENSUSPEND wedges tusb */
+ /* | MUSB_POWER_ENSUSPEND */
+ );
+
+ musb->is_active = 0;
+ devctl = musb_readb(regs, MUSB_DEVCTL);
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ if (is_otg_enabled(musb)) {
+ /* session started after:
+ * (a) ID-grounded irq, host mode;
+ * (b) vbus present/connect IRQ, peripheral mode;
+ * (c) peripheral initiates, using SRP
+ */
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->is_active = 1;
+ else
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ } else if (is_host_enabled(musb)) {
+ /* assume ID pin is hard-wired to ground */
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ } else /* peripheral is enabled */ {
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->is_active = 1;
+ }
+ musb_platform_enable(musb);
+ musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
+
+static void musb_generic_disable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u16 temp;
+
+ /* disable interrupts */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0);
+ musb_writew(mbase, MUSB_INTRTXE, 0);
+ musb_writew(mbase, MUSB_INTRRXE, 0);
+
+ /* off */
+ musb_writeb(mbase, MUSB_DEVCTL, 0);
+
+ /* flush pending interrupts */
+ temp = musb_readb(mbase, MUSB_INTRUSB);
+ temp = musb_readw(mbase, MUSB_INTRTX);
+ temp = musb_readw(mbase, MUSB_INTRRX);
+
+}
+
+/*
+ * Make the HDRC stop (disable interrupts, etc.);
+ * reversible by musb_start
+ * called on gadget driver unregister
+ * with controller locked, irqs blocked
+ * acts as a NOP unless some role activated the hardware
+ */
+void musb_stop(struct musb *musb)
+{
+ /* stop IRQs, timers, ... */
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ DBG(3, "HDRC disabled\n");
+
+ /* FIXME
+ * - mark host and/or peripheral drivers unusable/inactive
+ * - disable DMA (and enable it in HdrcStart)
+ * - make sure we can musb_start() after musb_stop(); with
+ * OTG mode, gadget driver module rmmod/modprobe cycles that
+ * - ...
+ */
+ musb_platform_try_idle(musb, 0);
+}
+
+static void musb_shutdown(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ if (musb->clock) {
+ clk_put(musb->clock);
+ musb->clock = NULL;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* FIXME power down */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The silicon either has hard-wired endpoint configurations, or else
+ * "dynamic fifo" sizing. The driver has support for both, though at this
+ * writing only the dynamic sizing is very well tested. We use normal
+ * idioms to so both modes are compile-tested, but dead code elimination
+ * leaves only the relevant one in the object file.
+ *
+ * We don't currently use dynamic fifo setup capability to do anything
+ * more than selecting one of a bunch of predefined configurations.
+ */
+#if defined(CONFIG_USB_TUSB6010) || \
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+static ushort __initdata fifo_mode = 4;
+#else
+static ushort __initdata fifo_mode = 2;
+#endif
+
+/* "modprobe ... fifo_mode=1" etc */
+module_param(fifo_mode, ushort, 0);
+MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
+
+
+enum fifo_style { FIFO_RXTX, FIFO_TX, FIFO_RX } __attribute__ ((packed));
+enum buf_mode { BUF_SINGLE, BUF_DOUBLE } __attribute__ ((packed));
+
+struct fifo_cfg {
+ u8 hw_ep_num;
+ enum fifo_style style;
+ enum buf_mode mode;
+ u16 maxpacket;
+};
+
+/*
+ * tables defining fifo_mode values. define more if you like.
+ * for host side, make sure both halves of ep1 are set up.
+ */
+
+/* mode 0 - fits in 2KB */
+static struct fifo_cfg __initdata mode_0_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 1 - fits in 4KB */
+static struct fifo_cfg __initdata mode_1_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 2 - fits in 4KB */
+static struct fifo_cfg __initdata mode_2_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 3 - fits in 4KB */
+static struct fifo_cfg __initdata mode_3_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
+{ .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
+};
+
+/* mode 4 - fits in 16KB */
+static struct fifo_cfg __initdata mode_4_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 13, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
+
+
+/*
+ * configure a fifo; for non-shared endpoints, this may be called
+ * once for a tx fifo and once for an rx fifo.
+ *
+ * returns negative errno or offset for next fifo.
+ */
+static int __init
+fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
+ const struct fifo_cfg *cfg, u16 offset)
+{
+ void __iomem *mbase = musb->mregs;
+ int size = 0;
+ u16 maxpacket = cfg->maxpacket;
+ u16 c_off = offset >> 3;
+ u8 c_size;
+
+ /* expect hw_ep has already been zero-initialized */
+
+ size = ffs(max(maxpacket, (u16) 8)) - 1;
+ maxpacket = 1 << size;
+
+ c_size = size - 3;
+ if (cfg->mode == BUF_DOUBLE) {
+ if ((offset + (maxpacket << 1)) >
+ (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ c_size |= MUSB_FIFOSZ_DPB;
+ } else {
+ if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
+ return -EMSGSIZE;
+ }
+
+ /* configure the FIFO */
+ musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* EP0 reserved endpoint for control, bidirectional;
+ * EP1 reserved for bulk, two unidirection halves.
+ */
+ if (hw_ep->epnum == 1)
+ musb->bulk_ep = hw_ep;
+ /* REVISIT error check: be sure ep0 can both rx and tx ... */
+#endif
+ switch (cfg->style) {
+ case FIFO_TX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_tx = maxpacket;
+ break;
+ case FIFO_RX:
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+ break;
+ case FIFO_RXTX:
+ musb_writeb(mbase, MUSB_TXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_TXFIFOADD, c_off);
+ hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
+ hw_ep->max_packet_sz_rx = maxpacket;
+
+ musb_writeb(mbase, MUSB_RXFIFOSZ, c_size);
+ musb_writew(mbase, MUSB_RXFIFOADD, c_off);
+ hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
+ hw_ep->max_packet_sz_tx = maxpacket;
+
+ hw_ep->is_shared_fifo = true;
+ break;
+ }
+
+ /* NOTE rx and tx endpoint irqs aren't managed separately,
+ * which happens to be ok
+ */
+ musb->epmask |= (1 << hw_ep->epnum);
+
+ return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
+}
+
+static struct fifo_cfg __initdata ep0_cfg = {
+ .style = FIFO_RXTX, .maxpacket = 64,
+};
+
+static int __init ep_config_from_table(struct musb *musb)
+{
+ const struct fifo_cfg *cfg;
+ unsigned i, n;
+ int offset;
+ struct musb_hw_ep *hw_ep = musb->endpoints;
+
+ switch (fifo_mode) {
+ default:
+ fifo_mode = 0;
+ /* FALLTHROUGH */
+ case 0:
+ cfg = mode_0_cfg;
+ n = ARRAY_SIZE(mode_0_cfg);
+ break;
+ case 1:
+ cfg = mode_1_cfg;
+ n = ARRAY_SIZE(mode_1_cfg);
+ break;
+ case 2:
+ cfg = mode_2_cfg;
+ n = ARRAY_SIZE(mode_2_cfg);
+ break;
+ case 3:
+ cfg = mode_3_cfg;
+ n = ARRAY_SIZE(mode_3_cfg);
+ break;
+ case 4:
+ cfg = mode_4_cfg;
+ n = ARRAY_SIZE(mode_4_cfg);
+ break;
+ }
+
+ printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
+ musb_driver_name, fifo_mode);
+
+
+ offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
+ /* assert(offset > 0) */
+
+ /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
+ * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
+ */
+
+ for (i = 0; i < n; i++) {
+ u8 epn = cfg->hw_ep_num;
+
+ if (epn >= musb->config->num_eps) {
+ pr_debug("%s: invalid ep %d\n",
+ musb_driver_name, epn);
+ continue;
+ }
+ offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
+ if (offset < 0) {
+ pr_debug("%s: mem overrun, ep %d\n",
+ musb_driver_name, epn);
+ return -EINVAL;
+ }
+ epn++;
+ musb->nr_endpoints = max(epn, musb->nr_endpoints);
+ }
+
+ printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
+ musb_driver_name,
+ n + 1, musb->config->num_eps * 2 - 1,
+ offset, (1 << (musb->config->ram_bits + 2)));
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+
+/*
+ * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
+ * @param musb the controller
+ */
+static int __init ep_config_from_hw(struct musb *musb)
+{
+ u8 epnum = 0, reg;
+ struct musb_hw_ep *hw_ep;
+ void *mbase = musb->mregs;
+
+ DBG(2, "<== static silicon ep config\n");
+
+ /* FIXME pick up ep0 maxpacket size */
+
+ for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
+ musb_ep_select(mbase, epnum);
+ hw_ep = musb->endpoints + epnum;
+
+ /* read from core using indexed model */
+ reg = musb_readb(hw_ep->regs, 0x10 + MUSB_FIFOSIZE);
+ if (!reg) {
+ /* 0's returned when no more endpoints */
+ break;
+ }
+ musb->nr_endpoints++;
+ musb->epmask |= (1 << epnum);
+
+ hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
+
+ /* shared TX/RX FIFO? */
+ if ((reg & 0xf0) == 0xf0) {
+ hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
+ hw_ep->is_shared_fifo = true;
+ continue;
+ } else {
+ hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
+ hw_ep->is_shared_fifo = false;
+ }
+
+ /* FIXME set up hw_ep->{rx,tx}_double_buffered */
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* pick an RX/TX endpoint for bulk */
+ if (hw_ep->max_packet_sz_tx < 512
+ || hw_ep->max_packet_sz_rx < 512)
+ continue;
+
+ /* REVISIT: this algorithm is lazy, we should at least
+ * try to pick a double buffered endpoint.
+ */
+ if (musb->bulk_ep)
+ continue;
+ musb->bulk_ep = hw_ep;
+#endif
+ }
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (!musb->bulk_ep) {
+ pr_debug("%s: missing bulk\n", musb_driver_name);
+ return -EINVAL;
+ }
+#endif
+
+ return 0;
+}
+
+enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
+
+/* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
+ * configure endpoints, or take their config from silicon
+ */
+static int __init musb_core_init(u16 musb_type, struct musb *musb)
+{
+#ifdef MUSB_AHB_ID
+ u32 data;
+#endif
+ u8 reg;
+ char *type;
+ u16 hwvers, rev_major, rev_minor;
+ char aInfo[78], aRevision[32], aDate[12];
+ void __iomem *mbase = musb->mregs;
+ int status = 0;
+ int i;
+
+ /* log core options (read using indexed model) */
+ musb_ep_select(mbase, 0);
+ reg = musb_readb(mbase, 0x10 + MUSB_CONFIGDATA);
+
+ strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
+ if (reg & MUSB_CONFIGDATA_DYNFIFO)
+ strcat(aInfo, ", dyn FIFOs");
+ if (reg & MUSB_CONFIGDATA_MPRXE) {
+ strcat(aInfo, ", bulk combine");
+#ifdef C_MP_RX
+ musb->bulk_combine = true;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MUSB_CONFIGDATA_MPTXE) {
+ strcat(aInfo, ", bulk split");
+#ifdef C_MP_TX
+ musb->bulk_split = true;
+#else
+ strcat(aInfo, " (X)"); /* no driver support */
+#endif
+ }
+ if (reg & MUSB_CONFIGDATA_HBRXE) {
+ strcat(aInfo, ", HB-ISO Rx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MUSB_CONFIGDATA_HBTXE) {
+ strcat(aInfo, ", HB-ISO Tx");
+ strcat(aInfo, " (X)"); /* no driver support */
+ }
+ if (reg & MUSB_CONFIGDATA_SOFTCONE)
+ strcat(aInfo, ", SoftConn");
+
+ printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
+ musb_driver_name, reg, aInfo);
+
+#ifdef MUSB_AHB_ID
+ data = musb_readl(mbase, 0x404);
+ sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
+ (data >> 16) & 0xff, (data >> 24) & 0xff);
+ /* FIXME ID2 and ID3 are unused */
+ data = musb_readl(mbase, 0x408);
+ printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
+ data = musb_readl(mbase, 0x40c);
+ printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
+ reg = musb_readb(mbase, 0x400);
+ musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
+#else
+ aDate[0] = 0;
+#endif
+ if (MUSB_CONTROLLER_MHDRC == musb_type) {
+ musb->is_multipoint = 1;
+ type = "M";
+ } else {
+ musb->is_multipoint = 0;
+ type = "";
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+#ifndef CONFIG_USB_OTG_BLACKLIST_HUB
+ printk(KERN_ERR
+ "%s: kernel must blacklist external hubs\n",
+ musb_driver_name);
+#endif
+#endif
+ }
+
+ /* log release info */
+ hwvers = musb_readw(mbase, MUSB_HWVERS);
+ rev_major = (hwvers >> 10) & 0x1f;
+ rev_minor = hwvers & 0x3ff;
+ snprintf(aRevision, 32, "%d.%d%s", rev_major,
+ rev_minor, (hwvers & 0x8000) ? "RC" : "");
+ printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
+ musb_driver_name, type, aRevision, aDate);
+
+ /* configure ep0 */
+ musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
+ musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
+
+ /* discover endpoint configuration */
+ musb->nr_endpoints = 1;
+ musb->epmask = 1;
+
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
+ if (musb->config->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else {
+ ERR("reconfigure software for Dynamic FIFOs\n");
+ status = -ENODEV;
+ }
+ } else {
+ if (!musb->config->dyn_fifo)
+ status = ep_config_from_hw(musb);
+ else {
+ ERR("reconfigure software for static FIFOs\n");
+ return -ENODEV;
+ }
+ }
+
+ if (status < 0)
+ return status;
+
+ /* finish init, and print endpoint config */
+ for (i = 0; i < musb->nr_endpoints; i++) {
+ struct musb_hw_ep *hw_ep = musb->endpoints + i;
+
+ hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
+#ifdef CONFIG_USB_TUSB6010
+ hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
+ hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
+ hw_ep->fifo_sync_va =
+ musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
+
+ if (i == 0)
+ hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
+ else
+ hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
+#endif
+
+ hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ hw_ep->target_regs = MUSB_BUSCTL_OFFSET(i, 0) + mbase;
+ hw_ep->rx_reinit = 1;
+ hw_ep->tx_reinit = 1;
+#endif
+
+ if (hw_ep->max_packet_sz_tx) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ hw_ep->is_shared_fifo ? "shared" : "tx",
+ hw_ep->tx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_tx);
+ }
+ if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
+ printk(KERN_DEBUG
+ "%s: hw_ep %d%s, %smax %d\n",
+ musb_driver_name, i,
+ "rx",
+ hw_ep->rx_double_buffered
+ ? "doublebuffer, " : "",
+ hw_ep->max_packet_sz_rx);
+ }
+ if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
+ DBG(1, "hw_ep %d not configured\n", i);
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+
+static irqreturn_t generic_interrupt(int irq, void *__hci)
+{
+ unsigned long flags;
+ irqreturn_t retval = IRQ_NONE;
+ struct musb *musb = __hci;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
+ musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
+ musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
+
+ if (musb->int_usb || musb->int_tx || musb->int_rx)
+ retval = musb_interrupt(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ /* REVISIT we sometimes get spurious IRQs on g_ep0
+ * not clear why...
+ */
+ if (retval != IRQ_HANDLED)
+ DBG(5, "spurious?\n");
+
+ return IRQ_HANDLED;
+}
+
+#else
+#define generic_interrupt NULL
+#endif
+
+/*
+ * handle all the irqs defined by the HDRC core. for now we expect: other
+ * irq sources (phy, dma, etc) will be handled first, musb->int_* values
+ * will be assigned, and the irq will already have been acked.
+ *
+ * called in irq context with spinlock held, irqs blocked
+ */
+irqreturn_t musb_interrupt(struct musb *musb)
+{
+ irqreturn_t retval = IRQ_NONE;
+ u8 devctl, power;
+ int ep_num;
+ u32 reg;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ power = musb_readb(musb->mregs, MUSB_POWER);
+
+ DBG(4, "** IRQ %s usb%04x tx%04x rx%04x\n",
+ (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
+ musb->int_usb, musb->int_tx, musb->int_rx);
+
+ /* the core can interrupt us for multiple reasons; docs have
+ * a generic interrupt flowchart to follow
+ */
+ if (musb->int_usb & STAGE0_MASK)
+ retval |= musb_stage0_irq(musb, musb->int_usb,
+ devctl, power);
+
+ /* "stage 1" is handling endpoint irqs */
+
+ /* handle endpoint 0 first */
+ if (musb->int_tx & 1) {
+ if (devctl & MUSB_DEVCTL_HM)
+ retval |= musb_h_ep0_irq(musb);
+ else
+ retval |= musb_g_ep0_irq(musb);
+ }
+
+ /* RX on endpoints 1-15 */
+ reg = musb->int_rx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ /* musb_ep_select(musb->mregs, ep_num); */
+ /* REVISIT just retval = ep->rx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_rx(musb, ep_num);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_rx(musb, ep_num);
+ }
+ }
+
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* TX on endpoints 1-15 */
+ reg = musb->int_tx >> 1;
+ ep_num = 1;
+ while (reg) {
+ if (reg & 1) {
+ /* musb_ep_select(musb->mregs, ep_num); */
+ /* REVISIT just retval |= ep->tx_irq(...) */
+ retval = IRQ_HANDLED;
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_tx(musb, ep_num);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_tx(musb, ep_num);
+ }
+ }
+ reg >>= 1;
+ ep_num++;
+ }
+
+ /* finish handling "global" interrupts after handling fifos */
+ if (musb->int_usb)
+ retval |= musb_stage2_irq(musb,
+ musb->int_usb, devctl, power);
+
+ return retval;
+}
+
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static int __initdata use_dma = 1;
+
+/* "modprobe ... use_dma=0" etc */
+module_param(use_dma, bool, 0);
+MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
+
+void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
+{
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ /* called with controller lock already held */
+
+ if (!epnum) {
+#ifndef CONFIG_USB_TUSB_OMAP_DMA
+ if (!is_cppi_enabled()) {
+ /* endpoint 0 */
+ if (devctl & MUSB_DEVCTL_HM)
+ musb_h_ep0_irq(musb);
+ else
+ musb_g_ep0_irq(musb);
+ }
+#endif
+ } else {
+ /* endpoints 1..15 */
+ if (transmit) {
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_tx(musb, epnum);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_tx(musb, epnum);
+ }
+ } else {
+ /* receive */
+ if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_capable())
+ musb_host_rx(musb, epnum);
+ } else {
+ if (is_peripheral_capable())
+ musb_g_rx(musb, epnum);
+ }
+ }
+ }
+}
+
+#else
+#define use_dma 0
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t
+musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = sprintf(buf, "%s\n", otg_state_string(musb));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return ret;
+}
+
+static ssize_t
+musb_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ if (!strncmp(buf, "host", 4))
+ musb_platform_set_mode(musb, MUSB_HOST);
+ if (!strncmp(buf, "peripheral", 10))
+ musb_platform_set_mode(musb, MUSB_PERIPHERAL);
+ if (!strncmp(buf, "otg", 3))
+ musb_platform_set_mode(musb, MUSB_OTG);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return n;
+}
+static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
+
+static ssize_t
+musb_vbus_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+
+ if (sscanf(buf, "%lu", &val) < 1) {
+ printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->a_wait_bcon = val;
+ if (musb->xceiv.state == OTG_STATE_A_WAIT_BCON)
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return n;
+}
+
+static ssize_t
+musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ unsigned long val;
+ int vbus;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ val = musb->a_wait_bcon;
+ vbus = musb_platform_get_vbus_status(musb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return sprintf(buf, "Vbus %s, timeout %lu\n",
+ vbus ? "on" : "off", val);
+}
+static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* Gadget drivers can't know that a host is connected so they might want
+ * to start SRP, but users can. This allows userspace to trigger SRP.
+ */
+static ssize_t
+musb_srp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct musb *musb = dev_to_musb(dev);
+ unsigned short srp;
+
+ if (sscanf(buf, "%hu", &srp) != 1
+ || (srp != 1)) {
+ printk(KERN_ERR "SRP: Value must be 1\n");
+ return -EINVAL;
+ }
+
+ if (srp == 1)
+ musb_g_wakeup(musb);
+
+ return n;
+}
+static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
+
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+
+#endif /* sysfs */
+
+/* Only used to provide driver mode change events */
+static void musb_irq_work(struct work_struct *data)
+{
+ struct musb *musb = container_of(data, struct musb, irq_work);
+ static int old_state;
+
+ if (musb->xceiv.state != old_state) {
+ old_state = musb->xceiv.state;
+ sysfs_notify(&musb->controller->kobj, NULL, "mode");
+ }
+}
+
+/* --------------------------------------------------------------------------
+ * Init support
+ */
+
+static struct musb *__init
+allocate_instance(struct device *dev,
+ struct musb_hdrc_config *config, void __iomem *mbase)
+{
+ struct musb *musb;
+ struct musb_hw_ep *ep;
+ int epnum;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct usb_hcd *hcd;
+
+ hcd = usb_create_hcd(&musb_hc_driver, dev, dev->bus_id);
+ if (!hcd)
+ return NULL;
+ /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
+
+ musb = hcd_to_musb(hcd);
+ INIT_LIST_HEAD(&musb->control);
+ INIT_LIST_HEAD(&musb->in_bulk);
+ INIT_LIST_HEAD(&musb->out_bulk);
+
+ hcd->uses_new_polling = 1;
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+#else
+ musb = kzalloc(sizeof *musb, GFP_KERNEL);
+ if (!musb)
+ return NULL;
+ dev_set_drvdata(dev, musb);
+
+#endif
+
+ musb->mregs = mbase;
+ musb->ctrl_base = mbase;
+ musb->nIrq = -ENODEV;
+ musb->config = config;
+ for (epnum = 0, ep = musb->endpoints;
+ epnum < musb->config->num_eps;
+ epnum++, ep++) {
+
+ ep->musb = musb;
+ ep->epnum = epnum;
+ }
+
+ musb->controller = dev;
+ return musb;
+}
+
+static void musb_free(struct musb *musb)
+{
+ /* this has multiple entry modes. it handles fault cleanup after
+ * probe(), where things may be partially set up, as well as rmmod
+ * cleanup after everything's been de-activated.
+ */
+
+#ifdef CONFIG_SYSFS
+ device_remove_file(musb->controller, &dev_attr_mode);
+ device_remove_file(musb->controller, &dev_attr_vbus);
+#ifdef CONFIG_USB_MUSB_OTG
+ device_remove_file(musb->controller, &dev_attr_srp);
+#endif
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ musb_gadget_cleanup(musb);
+#endif
+
+ if (musb->nIrq >= 0) {
+ disable_irq_wake(musb->nIrq);
+ free_irq(musb->nIrq, musb);
+ }
+ if (is_dma_capable() && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+
+ (void) c->stop(c);
+ dma_controller_destroy(c);
+ }
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ musb_platform_exit(musb);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
+ if (musb->clock) {
+ clk_disable(musb->clock);
+ clk_put(musb->clock);
+ }
+
+#ifdef CONFIG_USB_MUSB_OTG
+ put_device(musb->xceiv.dev);
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ usb_put_hcd(musb_to_hcd(musb));
+#else
+ kfree(musb);
+#endif
+}
+
+/*
+ * Perform generic per-controller initialization.
+ *
+ * @pDevice: the controller (already clocked, etc)
+ * @nIrq: irq
+ * @mregs: virtual address of controller registers,
+ * not yet corrected for platform-specific offsets
+ */
+static int __init
+musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
+{
+ int status;
+ struct musb *musb;
+ struct musb_hdrc_platform_data *plat = dev->platform_data;
+
+ /* The driver might handle more features than the board; OK.
+ * Fail when the board needs a feature that's not enabled.
+ */
+ if (!plat) {
+ dev_dbg(dev, "no platform_data?\n");
+ return -ENODEV;
+ }
+ switch (plat->mode) {
+ case MUSB_HOST:
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_PERIPHERAL:
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ break;
+#else
+ goto bad_config;
+#endif
+ case MUSB_OTG:
+#ifdef CONFIG_USB_MUSB_OTG
+ break;
+#else
+bad_config:
+#endif
+ default:
+ dev_err(dev, "incompatible Kconfig role setting\n");
+ return -EINVAL;
+ }
+
+ /* allocate */
+ musb = allocate_instance(dev, plat->config, ctrl);
+ if (!musb)
+ return -ENOMEM;
+
+ spin_lock_init(&musb->lock);
+ musb->board_mode = plat->mode;
+ musb->board_set_power = plat->set_power;
+ musb->set_clock = plat->set_clock;
+ musb->min_power = plat->min_power;
+
+ /* Clock usage is chip-specific ... functional clock (DaVinci,
+ * OMAP2430), or PHY ref (some TUSB6010 boards). All this core
+ * code does is make sure a clock handle is available; platform
+ * code manages it during start/stop and suspend/resume.
+ */
+ if (plat->clock) {
+ musb->clock = clk_get(dev, plat->clock);
+ if (IS_ERR(musb->clock)) {
+ status = PTR_ERR(musb->clock);
+ musb->clock = NULL;
+ goto fail;
+ }
+ }
+
+ /* assume vbus is off */
+
+ /* platform adjusts musb->mregs and musb->isr if needed,
+ * and activates clocks
+ */
+ musb->isr = generic_interrupt;
+ status = musb_platform_init(musb);
+
+ if (status < 0)
+ goto fail;
+ if (!musb->isr) {
+ status = -ENODEV;
+ goto fail2;
+ }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (use_dma && dev->dma_mask) {
+ struct dma_controller *c;
+
+ c = dma_controller_create(musb, musb->mregs);
+ musb->dma_controller = c;
+ if (c)
+ (void) c->start(c);
+ }
+#endif
+ /* ideally this would be abstracted in platform setup */
+ if (!is_dma_capable() || !musb->dma_controller)
+ dev->dma_mask = NULL;
+
+ /* be sure interrupts are disabled before connecting ISR */
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+
+ /* setup musb parts of the core (especially endpoints) */
+ status = musb_core_init(plat->config->multipoint
+ ? MUSB_CONTROLLER_MHDRC
+ : MUSB_CONTROLLER_HDRC, musb);
+ if (status < 0)
+ goto fail2;
+
+ /* Init IRQ workqueue before request_irq */
+ INIT_WORK(&musb->irq_work, musb_irq_work);
+
+ /* attach to the IRQ */
+ if (request_irq(nIrq, musb->isr, 0, dev->bus_id, musb)) {
+ dev_err(dev, "request_irq %d failed!\n", nIrq);
+ status = -ENODEV;
+ goto fail2;
+ }
+ musb->nIrq = nIrq;
+/* FIXME this handles wakeup irqs wrong */
+ if (enable_irq_wake(nIrq) == 0)
+ device_init_wakeup(dev, 1);
+
+ pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
+ musb_driver_name,
+ ({char *s;
+ switch (musb->board_mode) {
+ case MUSB_HOST: s = "Host"; break;
+ case MUSB_PERIPHERAL: s = "Peripheral"; break;
+ default: s = "OTG"; break;
+ }; s; }),
+ ctrl,
+ (is_dma_capable() && musb->dma_controller)
+ ? "DMA" : "PIO",
+ musb->nIrq);
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* host side needs more setup, except for no-host modes */
+ if (musb->board_mode != MUSB_PERIPHERAL) {
+ struct usb_hcd *hcd = musb_to_hcd(musb);
+
+ if (musb->board_mode == MUSB_OTG)
+ hcd->self.otg_port = 1;
+ musb->xceiv.host = &hcd->self;
+ hcd->power_budget = 2 * (plat->power ? : 250);
+ }
+#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+
+ /* For the host-only role, we can activate right away.
+ * (We expect the ID pin to be forcibly grounded!!)
+ * Otherwise, wait till the gadget driver hooks up.
+ */
+ if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
+ MUSB_HST_MODE(musb);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+
+ status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+ if (status)
+ goto fail;
+
+ DBG(1, "%s mode, status %d, devctl %02x %c\n",
+ "HOST", status,
+ musb_readb(musb->mregs, MUSB_DEVCTL),
+ (musb_readb(musb->mregs, MUSB_DEVCTL)
+ & MUSB_DEVCTL_BDEVICE
+ ? 'B' : 'A'));
+
+ } else /* peripheral is enabled */ {
+ MUSB_DEV_MODE(musb);
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+
+ status = musb_gadget_setup(musb);
+ if (status)
+ goto fail;
+
+ DBG(1, "%s mode, status %d, dev%02x\n",
+ is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
+ status,
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+
+ }
+
+ return 0;
+
+fail:
+ if (musb->clock)
+ clk_put(musb->clock);
+ device_init_wakeup(dev, 0);
+ musb_free(musb);
+ return status;
+
+#ifdef CONFIG_SYSFS
+ status = device_create_file(dev, &dev_attr_mode);
+ status = device_create_file(dev, &dev_attr_vbus);
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ status = device_create_file(dev, &dev_attr_srp);
+#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+ status = 0;
+#endif
+
+ return status;
+
+fail2:
+ musb_platform_exit(musb);
+ goto fail;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
+ * bridge to a platform device; this driver then suffices.
+ */
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+static u64 *orig_dma_mask;
+#endif
+
+static int __init musb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int irq = platform_get_irq(pdev, 0);
+ struct resource *iomem;
+ void __iomem *base;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem || irq == 0)
+ return -ENODEV;
+
+ base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+ if (!base) {
+ dev_err(dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ /* clobbered by use_dma=n */
+ orig_dma_mask = dev->dma_mask;
+#endif
+ return musb_init_controller(dev, irq, base);
+}
+
+static int __devexit musb_remove(struct platform_device *pdev)
+{
+ struct musb *musb = dev_to_musb(&pdev->dev);
+ void __iomem *ctrl_base = musb->ctrl_base;
+
+ /* this gets called on rmmod.
+ * - Host mode: host may still be active
+ * - Peripheral mode: peripheral is deactivated (or never-activated)
+ * - OTG mode: both roles are deactivated (or never-activated)
+ */
+ musb_shutdown(pdev);
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (musb->board_mode == MUSB_HOST)
+ usb_remove_hcd(musb_to_hcd(musb));
+#endif
+ musb_free(musb);
+ iounmap(ctrl_base);
+ device_init_wakeup(&pdev->dev, 0);
+#ifndef CONFIG_MUSB_PIO_ONLY
+ pdev->dev.dma_mask = orig_dma_mask;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int musb_suspend(struct platform_device *pdev, pm_message_t message)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (is_peripheral_active(musb)) {
+ /* FIXME force disconnect unless we know USB will wake
+ * the system up quickly enough to respond ...
+ */
+ } else if (is_host_active(musb)) {
+ /* we know all the children are suspended; sometimes
+ * they will even be wakeup-enabled.
+ */
+ }
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ else
+ clk_disable(musb->clock);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static int musb_resume(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct musb *musb = dev_to_musb(&pdev->dev);
+
+ if (!musb->clock)
+ return 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ else
+ clk_enable(musb->clock);
+
+ /* for static cmos like DaVinci, register values were preserved
+ * unless for some reason the whole soc powered down and we're
+ * not treating that as a whole-system restart (e.g. swsusp)
+ */
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+#else
+#define musb_suspend NULL
+#define musb_resume NULL
+#endif
+
+static struct platform_driver musb_driver = {
+ .driver = {
+ .name = (char *)musb_driver_name,
+ .bus = &platform_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(musb_remove),
+ .shutdown = musb_shutdown,
+ .suspend = musb_suspend,
+ .resume = musb_resume,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init musb_init(void)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ if (usb_disabled())
+ return 0;
+#endif
+
+ pr_info("%s: version " MUSB_VERSION ", "
+#ifdef CONFIG_MUSB_PIO_ONLY
+ "pio"
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ "cppi-dma"
+#elif defined(CONFIG_USB_INVENTRA_DMA)
+ "musb-dma"
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ "tusb-omap-dma"
+#else
+ "?dma?"
+#endif
+ ", "
+#ifdef CONFIG_USB_MUSB_OTG
+ "otg (peripheral+host)"
+#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
+ "peripheral"
+#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+ "host"
+#endif
+ ", debug=%d\n",
+ musb_driver_name, debug);
+ return platform_driver_probe(&musb_driver, musb_probe);
+}
+
+/* make us init after usbcore and before usb
+ * gadget and host-side drivers start to register
+ */
+subsys_initcall(musb_init);
+
+static void __exit musb_cleanup(void)
+{
+ platform_driver_unregister(&musb_driver);
+}
+module_exit(musb_cleanup);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
new file mode 100644
index 000000000000..82227251931b
--- /dev/null
+++ b/drivers/usb/musb/musb_core.h
@@ -0,0 +1,488 @@
+/*
+ * MUSB OTG driver defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_CORE_H__
+#define __MUSB_CORE_H__
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/musb.h>
+
+struct musb;
+struct musb_hw_ep;
+struct musb_ep;
+
+
+#include "musb_debug.h"
+#include "musb_dma.h"
+
+#include "musb_io.h"
+#include "musb_regs.h"
+
+#include "musb_gadget.h"
+#include "../core/hcd.h"
+#include "musb_host.h"
+
+
+
+#ifdef CONFIG_USB_MUSB_OTG
+
+#define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
+#define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
+#define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
+
+/* NOTE: otg and peripheral-only state machines start at B_IDLE.
+ * OTG or host-only go to A_IDLE when ID is sensed.
+ */
+#define is_peripheral_active(m) (!(m)->is_host)
+#define is_host_active(m) ((m)->is_host)
+
+#else
+#define is_peripheral_enabled(musb) is_peripheral_capable()
+#define is_host_enabled(musb) is_host_capable()
+#define is_otg_enabled(musb) 0
+
+#define is_peripheral_active(musb) is_peripheral_capable()
+#define is_host_active(musb) is_host_capable()
+#endif
+
+#if defined(CONFIG_USB_MUSB_OTG) || defined(CONFIG_USB_MUSB_PERIPHERAL)
+/* for some reason, the "select USB_GADGET_MUSB_HDRC" doesn't always
+ * override that choice selection (often USB_GADGET_DUMMY_HCD).
+ */
+#ifndef CONFIG_USB_GADGET_MUSB_HDRC
+#error bogus Kconfig output ... select CONFIG_USB_GADGET_MUSB_HDRC
+#endif
+#endif /* need MUSB gadget selection */
+
+
+#ifdef CONFIG_PROC_FS
+#include <linux/fs.h>
+#define MUSB_CONFIG_PROC_FS
+#endif
+
+/****************************** PERIPHERAL ROLE *****************************/
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+#define is_peripheral_capable() (1)
+
+extern irqreturn_t musb_g_ep0_irq(struct musb *);
+extern void musb_g_tx(struct musb *, u8);
+extern void musb_g_rx(struct musb *, u8);
+extern void musb_g_reset(struct musb *);
+extern void musb_g_suspend(struct musb *);
+extern void musb_g_resume(struct musb *);
+extern void musb_g_wakeup(struct musb *);
+extern void musb_g_disconnect(struct musb *);
+
+#else
+
+#define is_peripheral_capable() (0)
+
+static inline irqreturn_t musb_g_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_g_reset(struct musb *m) {}
+static inline void musb_g_suspend(struct musb *m) {}
+static inline void musb_g_resume(struct musb *m) {}
+static inline void musb_g_wakeup(struct musb *m) {}
+static inline void musb_g_disconnect(struct musb *m) {}
+
+#endif
+
+/****************************** HOST ROLE ***********************************/
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+
+#define is_host_capable() (1)
+
+extern irqreturn_t musb_h_ep0_irq(struct musb *);
+extern void musb_host_tx(struct musb *, u8);
+extern void musb_host_rx(struct musb *, u8);
+
+#else
+
+#define is_host_capable() (0)
+
+static inline irqreturn_t musb_h_ep0_irq(struct musb *m) { return IRQ_NONE; }
+static inline void musb_host_tx(struct musb *m, u8 e) {}
+static inline void musb_host_rx(struct musb *m, u8 e) {}
+
+#endif
+
+
+/****************************** CONSTANTS ********************************/
+
+#ifndef MUSB_C_NUM_EPS
+#define MUSB_C_NUM_EPS ((u8)16)
+#endif
+
+#ifndef MUSB_MAX_END0_PACKET
+#define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
+#endif
+
+/* host side ep0 states */
+enum musb_h_ep0_state {
+ MUSB_EP0_IDLE,
+ MUSB_EP0_START, /* expect ack of setup */
+ MUSB_EP0_IN, /* expect IN DATA */
+ MUSB_EP0_OUT, /* expect ack of OUT DATA */
+ MUSB_EP0_STATUS, /* expect ack of STATUS */
+} __attribute__ ((packed));
+
+/* peripheral side ep0 states */
+enum musb_g_ep0_state {
+ MUSB_EP0_STAGE_SETUP, /* idle, waiting for setup */
+ MUSB_EP0_STAGE_TX, /* IN data */
+ MUSB_EP0_STAGE_RX, /* OUT data */
+ MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
+ MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
+ MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
+} __attribute__ ((packed));
+
+/* OTG protocol constants */
+#define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
+#define OTG_TIME_A_WAIT_BCON 0 /* 0=infinite; min 1000 msec */
+#define OTG_TIME_A_IDLE_BDIS 200 /* msec (min) */
+
+/*************************** REGISTER ACCESS ********************************/
+
+/* Endpoint registers (other than dynfifo setup) can be accessed either
+ * directly with the "flat" model, or after setting up an index register.
+ */
+
+#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \
+ || defined(CONFIG_ARCH_OMAP3430)
+/* REVISIT indexed access seemed to
+ * misbehave (on DaVinci) for at least peripheral IN ...
+ */
+#define MUSB_FLAT_REG
+#endif
+
+/* TUSB mapping: "flat" plus ep0 special cases */
+#if defined(CONFIG_USB_TUSB6010)
+#define musb_ep_select(_mbase, _epnum) \
+ musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
+
+/* "flat" mapping: each endpoint has its own i/o address */
+#elif defined(MUSB_FLAT_REG)
+#define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
+#define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
+
+/* "indexed" mapping: INDEX register controls register bank select */
+#else
+#define musb_ep_select(_mbase, _epnum) \
+ musb_writeb((_mbase), MUSB_INDEX, (_epnum))
+#define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
+#endif
+
+/****************************** FUNCTIONS ********************************/
+
+#define MUSB_HST_MODE(_musb)\
+ { (_musb)->is_host = true; }
+#define MUSB_DEV_MODE(_musb) \
+ { (_musb)->is_host = false; }
+
+#define test_devctl_hst_mode(_x) \
+ (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
+
+#define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
+
+/******************************** TYPES *************************************/
+
+/*
+ * struct musb_hw_ep - endpoint hardware (bidirectional)
+ *
+ * Ordered slightly for better cacheline locality.
+ */
+struct musb_hw_ep {
+ struct musb *musb;
+ void __iomem *fifo;
+ void __iomem *regs;
+
+#ifdef CONFIG_USB_TUSB6010
+ void __iomem *conf;
+#endif
+
+ /* index in musb->endpoints[] */
+ u8 epnum;
+
+ /* hardware configuration, possibly dynamic */
+ bool is_shared_fifo;
+ bool tx_double_buffered;
+ bool rx_double_buffered;
+ u16 max_packet_sz_tx;
+ u16 max_packet_sz_rx;
+
+ struct dma_channel *tx_channel;
+ struct dma_channel *rx_channel;
+
+#ifdef CONFIG_USB_TUSB6010
+ /* TUSB has "asynchronous" and "synchronous" dma modes */
+ dma_addr_t fifo_async;
+ dma_addr_t fifo_sync;
+ void __iomem *fifo_sync_va;
+#endif
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ void __iomem *target_regs;
+
+ /* currently scheduled peripheral endpoint */
+ struct musb_qh *in_qh;
+ struct musb_qh *out_qh;
+
+ u8 rx_reinit;
+ u8 tx_reinit;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ /* peripheral side */
+ struct musb_ep ep_in; /* TX */
+ struct musb_ep ep_out; /* RX */
+#endif
+};
+
+static inline struct usb_request *next_in_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_in);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct usb_request *next_out_request(struct musb_hw_ep *hw_ep)
+{
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ return next_request(&hw_ep->ep_out);
+#else
+ return NULL;
+#endif
+}
+
+/*
+ * struct musb - Driver instance data.
+ */
+struct musb {
+ /* device lock */
+ spinlock_t lock;
+ struct clk *clock;
+ irqreturn_t (*isr)(int, void *);
+ struct work_struct irq_work;
+
+/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
+#define MUSB_PORT_STAT_RESUME (1 << 31)
+
+ u32 port1_status;
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ unsigned long rh_timer;
+
+ enum musb_h_ep0_state ep0_stage;
+
+ /* bulk traffic normally dedicates endpoint hardware, and each
+ * direction has its own ring of host side endpoints.
+ * we try to progress the transfer at the head of each endpoint's
+ * queue until it completes or NAKs too much; then we try the next
+ * endpoint.
+ */
+ struct musb_hw_ep *bulk_ep;
+
+ struct list_head control; /* of musb_qh */
+ struct list_head in_bulk; /* of musb_qh */
+ struct list_head out_bulk; /* of musb_qh */
+ struct musb_qh *periodic[32]; /* tree of interrupt+iso */
+#endif
+
+ /* called with IRQs blocked; ON/nonzero implies starting a session,
+ * and waiting at least a_wait_vrise_tmout.
+ */
+ void (*board_set_vbus)(struct musb *, int is_on);
+
+ struct dma_controller *dma_controller;
+
+ struct device *controller;
+ void __iomem *ctrl_base;
+ void __iomem *mregs;
+
+#ifdef CONFIG_USB_TUSB6010
+ dma_addr_t async;
+ dma_addr_t sync;
+ void __iomem *sync_va;
+#endif
+
+ /* passed down from chip/board specific irq handlers */
+ u8 int_usb;
+ u16 int_rx;
+ u16 int_tx;
+
+ struct otg_transceiver xceiv;
+
+ int nIrq;
+
+ struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
+#define control_ep endpoints
+
+#define VBUSERR_RETRY_COUNT 3
+ u16 vbuserr_retry;
+ u16 epmask;
+ u8 nr_endpoints;
+
+ u8 board_mode; /* enum musb_mode */
+ int (*board_set_power)(int state);
+
+ int (*set_clock)(struct clk *clk, int is_active);
+
+ u8 min_power; /* vbus for periph, in mA/2 */
+
+ bool is_host;
+
+ int a_wait_bcon; /* VBUS timeout in msecs */
+ unsigned long idle_timeout; /* Next timeout in jiffies */
+
+ /* active means connected and not suspended */
+ unsigned is_active:1;
+
+ unsigned is_multipoint:1;
+ unsigned ignore_disconnect:1; /* during bus resets */
+
+#ifdef C_MP_TX
+ unsigned bulk_split:1;
+#define can_bulk_split(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
+#else
+#define can_bulk_split(musb, type) 0
+#endif
+
+#ifdef C_MP_RX
+ unsigned bulk_combine:1;
+#define can_bulk_combine(musb,type) \
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
+#else
+#define can_bulk_combine(musb, type) 0
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ /* is_suspended means USB B_PERIPHERAL suspend */
+ unsigned is_suspended:1;
+
+ /* may_wakeup means remote wakeup is enabled */
+ unsigned may_wakeup:1;
+
+ /* is_self_powered is reported in device status and the
+ * config descriptor. is_bus_powered means B_PERIPHERAL
+ * draws some VBUS current; both can be true.
+ */
+ unsigned is_self_powered:1;
+ unsigned is_bus_powered:1;
+
+ unsigned set_address:1;
+ unsigned test_mode:1;
+ unsigned softconnect:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u16 ackpend; /* ep0 */
+ enum musb_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver; /* its driver */
+#endif
+
+ struct musb_hdrc_config *config;
+
+#ifdef MUSB_CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry;
+#endif
+};
+
+static inline void musb_set_vbus(struct musb *musb, int is_on)
+{
+ musb->board_set_vbus(musb, is_on);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+static inline struct musb *gadget_to_musb(struct usb_gadget *g)
+{
+ return container_of(g, struct musb, g);
+}
+#endif
+
+
+/***************************** Glue it together *****************************/
+
+extern const char musb_driver_name[];
+
+extern void musb_start(struct musb *musb);
+extern void musb_stop(struct musb *musb);
+
+extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
+extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
+
+extern void musb_load_testpacket(struct musb *);
+
+extern irqreturn_t musb_interrupt(struct musb *);
+
+extern void musb_platform_enable(struct musb *musb);
+extern void musb_platform_disable(struct musb *musb);
+
+extern void musb_hnp_stop(struct musb *musb);
+
+extern void musb_platform_set_mode(struct musb *musb, u8 musb_mode);
+
+#if defined(CONFIG_USB_TUSB6010) || \
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
+#else
+#define musb_platform_try_idle(x, y) do {} while (0)
+#endif
+
+#ifdef CONFIG_USB_TUSB6010
+extern int musb_platform_get_vbus_status(struct musb *musb);
+#else
+#define musb_platform_get_vbus_status(x) 0
+#endif
+
+extern int __init musb_platform_init(struct musb *musb);
+extern int musb_platform_exit(struct musb *musb);
+
+#endif /* __MUSB_CORE_H__ */
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
new file mode 100644
index 000000000000..4d2794441b15
--- /dev/null
+++ b/drivers/usb/musb/musb_debug.h
@@ -0,0 +1,62 @@
+/*
+ * MUSB OTG driver debug defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_DEBUG_H__
+#define __MUSB_LINUX_DEBUG_H__
+
+#define yprintk(facility, format, args...) \
+ do { printk(facility "%s %d: " format , \
+ __func__, __LINE__ , ## args); } while (0)
+#define WARNING(fmt, args...) yprintk(KERN_WARNING, fmt, ## args)
+#define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
+#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
+
+#define xprintk(level, facility, format, args...) do { \
+ if (_dbg_level(level)) { \
+ printk(facility "%s %d: " format , \
+ __func__, __LINE__ , ## args); \
+ } } while (0)
+
+extern unsigned debug;
+
+static inline int _dbg_level(unsigned l)
+{
+ return debug >= l;
+}
+
+#define DBG(level, fmt, args...) xprintk(level, KERN_DEBUG, fmt, ## args)
+
+extern const char *otg_state_string(struct musb *);
+
+#endif /* __MUSB_LINUX_DEBUG_H__ */
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
new file mode 100644
index 000000000000..0a2c4e3602c1
--- /dev/null
+++ b/drivers/usb/musb/musb_dma.h
@@ -0,0 +1,172 @@
+/*
+ * MUSB OTG driver DMA controller abstraction
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_DMA_H__
+#define __MUSB_DMA_H__
+
+struct musb_hw_ep;
+
+/*
+ * DMA Controller Abstraction
+ *
+ * DMA Controllers are abstracted to allow use of a variety of different
+ * implementations of DMA, as allowed by the Inventra USB cores. On the
+ * host side, usbcore sets up the DMA mappings and flushes caches; on the
+ * peripheral side, the gadget controller driver does. Responsibilities
+ * of a DMA controller driver include:
+ *
+ * - Handling the details of moving multiple USB packets
+ * in cooperation with the Inventra USB core, including especially
+ * the correct RX side treatment of short packets and buffer-full
+ * states (both of which terminate transfers).
+ *
+ * - Knowing the correlation between dma channels and the
+ * Inventra core's local endpoint resources and data direction.
+ *
+ * - Maintaining a list of allocated/available channels.
+ *
+ * - Updating channel status on interrupts,
+ * whether shared with the Inventra core or separate.
+ */
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+#define is_dma_capable() (1)
+#else
+#define is_dma_capable() (0)
+#endif
+
+#ifdef CONFIG_USB_TI_CPPI_DMA
+#define is_cppi_enabled() 1
+#else
+#define is_cppi_enabled() 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap() 1
+#else
+#define tusb_dma_omap() 0
+#endif
+
+/*
+ * DMA channel status ... updated by the dma controller driver whenever that
+ * status changes, and protected by the overall controller spinlock.
+ */
+enum dma_channel_status {
+ /* unallocated */
+ MUSB_DMA_STATUS_UNKNOWN,
+ /* allocated ... but not busy, no errors */
+ MUSB_DMA_STATUS_FREE,
+ /* busy ... transactions are active */
+ MUSB_DMA_STATUS_BUSY,
+ /* transaction(s) aborted due to ... dma or memory bus error */
+ MUSB_DMA_STATUS_BUS_ABORT,
+ /* transaction(s) aborted due to ... core error or USB fault */
+ MUSB_DMA_STATUS_CORE_ABORT
+};
+
+struct dma_controller;
+
+/**
+ * struct dma_channel - A DMA channel.
+ * @private_data: channel-private data
+ * @max_len: the maximum number of bytes the channel can move in one
+ * transaction (typically representing many USB maximum-sized packets)
+ * @actual_len: how many bytes have been transferred
+ * @status: current channel status (updated e.g. on interrupt)
+ * @desired_mode: true if mode 1 is desired; false if mode 0 is desired
+ *
+ * channels are associated with an endpoint for the duration of at least
+ * one usb transfer.
+ */
+struct dma_channel {
+ void *private_data;
+ /* FIXME not void* private_data, but a dma_controller * */
+ size_t max_len;
+ size_t actual_len;
+ enum dma_channel_status status;
+ bool desired_mode;
+};
+
+/*
+ * dma_channel_status - return status of dma channel
+ * @c: the channel
+ *
+ * Returns the software's view of the channel status. If that status is BUSY
+ * then it's possible that the hardware has completed (or aborted) a transfer,
+ * so the driver needs to update that status.
+ */
+static inline enum dma_channel_status
+dma_channel_status(struct dma_channel *c)
+{
+ return (is_dma_capable() && c) ? c->status : MUSB_DMA_STATUS_UNKNOWN;
+}
+
+/**
+ * struct dma_controller - A DMA Controller.
+ * @start: call this to start a DMA controller;
+ * return 0 on success, else negative errno
+ * @stop: call this to stop a DMA controller
+ * return 0 on success, else negative errno
+ * @channel_alloc: call this to allocate a DMA channel
+ * @channel_release: call this to release a DMA channel
+ * @channel_abort: call this to abort a pending DMA transaction,
+ * returning it to FREE (but allocated) state
+ *
+ * Controllers manage dma channels.
+ */
+struct dma_controller {
+ int (*start)(struct dma_controller *);
+ int (*stop)(struct dma_controller *);
+ struct dma_channel *(*channel_alloc)(struct dma_controller *,
+ struct musb_hw_ep *, u8 is_tx);
+ void (*channel_release)(struct dma_channel *);
+ int (*channel_program)(struct dma_channel *channel,
+ u16 maxpacket, u8 mode,
+ dma_addr_t dma_addr,
+ u32 length);
+ int (*channel_abort)(struct dma_channel *);
+};
+
+/* called after channel_program(), may indicate a fault */
+extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+
+
+extern struct dma_controller *__init
+dma_controller_create(struct musb *, void __iomem *);
+
+extern void dma_controller_destroy(struct dma_controller *);
+
+#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
new file mode 100644
index 000000000000..d6a802c224fa
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.c
@@ -0,0 +1,2031 @@
+/*
+ * MUSB OTG driver peripheral support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/stat.h>
+#include <linux/dma-mapping.h>
+
+#include "musb_core.h"
+
+
+/* MUSB PERIPHERAL status 3-mar-2006:
+ *
+ * - EP0 seems solid. It passes both USBCV and usbtest control cases.
+ * Minor glitches:
+ *
+ * + remote wakeup to Linux hosts work, but saw USBCV failures;
+ * in one test run (operator error?)
+ * + endpoint halt tests -- in both usbtest and usbcv -- seem
+ * to break when dma is enabled ... is something wrongly
+ * clearing SENDSTALL?
+ *
+ * - Mass storage behaved ok when last tested. Network traffic patterns
+ * (with lots of short transfers etc) need retesting; they turn up the
+ * worst cases of the DMA, since short packets are typical but are not
+ * required.
+ *
+ * - TX/IN
+ * + both pio and dma behave in with network and g_zero tests
+ * + no cppi throughput issues other than no-hw-queueing
+ * + failed with FLAT_REG (DaVinci)
+ * + seems to behave with double buffering, PIO -and- CPPI
+ * + with gadgetfs + AIO, requests got lost?
+ *
+ * - RX/OUT
+ * + both pio and dma behave in with network and g_zero tests
+ * + dma is slow in typical case (short_not_ok is clear)
+ * + double buffering ok with PIO
+ * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
+ * + request lossage observed with gadgetfs
+ *
+ * - ISO not tested ... might work, but only weakly isochronous
+ *
+ * - Gadget driver disabling of softconnect during bind() is ignored; so
+ * drivers can't hold off host requests until userspace is ready.
+ * (Workaround: they can turn it off later.)
+ *
+ * - PORTABILITY (assumes PIO works):
+ * + DaVinci, basically works with cppi dma
+ * + OMAP 2430, ditto with mentor dma
+ * + TUSB 6010, platform-specific dma in the works
+ */
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Immediately complete a request.
+ *
+ * @param request the request to complete
+ * @param status the status to complete the request with
+ * Context: controller locked, IRQs blocked.
+ */
+void musb_g_giveback(
+ struct musb_ep *ep,
+ struct usb_request *request,
+ int status)
+__releases(ep->musb->lock)
+__acquires(ep->musb->lock)
+{
+ struct musb_request *req;
+ struct musb *musb;
+ int busy = ep->busy;
+
+ req = to_musb_request(request);
+
+ list_del(&request->list);
+ if (req->request.status == -EINPROGRESS)
+ req->request.status = status;
+ musb = req->musb;
+
+ ep->busy = 1;
+ spin_unlock(&musb->lock);
+ if (is_dma_capable()) {
+ if (req->mapped) {
+ dma_unmap_single(musb->controller,
+ req->request.dma,
+ req->request.length,
+ req->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ req->request.dma = DMA_ADDR_INVALID;
+ req->mapped = 0;
+ } else if (req->request.dma != DMA_ADDR_INVALID)
+ dma_sync_single_for_cpu(musb->controller,
+ req->request.dma,
+ req->request.length,
+ req->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ }
+ if (request->status == 0)
+ DBG(5, "%s done request %p, %d/%d\n",
+ ep->end_point.name, request,
+ req->request.actual, req->request.length);
+ else
+ DBG(2, "%s request %p, %d/%d fault %d\n",
+ ep->end_point.name, request,
+ req->request.actual, req->request.length,
+ request->status);
+ req->request.complete(&req->ep->end_point, &req->request);
+ spin_lock(&musb->lock);
+ ep->busy = busy;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * Abort requests queued to an endpoint using the status. Synchronous.
+ * caller locked controller and blocked irqs, and selected this ep.
+ */
+static void nuke(struct musb_ep *ep, const int status)
+{
+ struct musb_request *req = NULL;
+ void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
+
+ ep->busy = 1;
+
+ if (is_dma_capable() && ep->dma) {
+ struct dma_controller *c = ep->musb->dma_controller;
+ int value;
+ if (ep->is_in) {
+ musb_writew(epio, MUSB_TXCSR,
+ 0 | MUSB_TXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_TXCSR,
+ 0 | MUSB_TXCSR_FLUSHFIFO);
+ } else {
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ musb_writew(epio, MUSB_RXCSR,
+ 0 | MUSB_RXCSR_FLUSHFIFO);
+ }
+
+ value = c->channel_abort(ep->dma);
+ DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
+ c->channel_release(ep->dma);
+ ep->dma = NULL;
+ }
+
+ while (!list_empty(&(ep->req_list))) {
+ req = container_of(ep->req_list.next, struct musb_request,
+ request.list);
+ musb_g_giveback(ep, &req->request, status);
+ }
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* Data transfers - pure PIO, pure DMA, or mixed mode */
+
+/*
+ * This assumes the separate CPPI engine is responding to DMA requests
+ * from the usb core ... sequenced a bit differently from mentor dma.
+ */
+
+static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
+{
+ if (can_bulk_split(musb, ep->type))
+ return ep->hw_ep->max_packet_sz_tx;
+ else
+ return ep->packet_sz;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral tx (IN) using Mentor DMA works as follows:
+ Only mode 0 is used for transfers <= wPktSize,
+ mode 1 is used for larger transfers,
+
+ One of the following happens:
+ - Host sends IN token which causes an endpoint interrupt
+ -> TxAvail
+ -> if DMA is currently busy, exit.
+ -> if queue is non-empty, txstate().
+
+ - Request is queued by the gadget driver.
+ -> if queue was previously empty, txstate()
+
+ txstate()
+ -> start
+ /\ -> setup DMA
+ | (data is transferred to the FIFO, then sent out when
+ | IN token(s) are recd from Host.
+ | -> DMA interrupt on completion
+ | calls TxAvail.
+ | -> stop DMA, ~DmaEenab,
+ | -> set TxPktRdy for last short pkt or zlp
+ | -> Complete Request
+ | -> Continue next request (call txstate)
+ |___________________________________|
+
+ * Non-Mentor DMA engines can of course work differently, such as by
+ * upleveling from irq-per-packet to irq-per-buffer.
+ */
+
+#endif
+
+/*
+ * An endpoint is transmitting data. This can be called either from
+ * the IRQ routine or from ep.queue() to kickstart a request on an
+ * endpoint.
+ *
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void txstate(struct musb *musb, struct musb_request *req)
+{
+ u8 epnum = req->epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct usb_request *request;
+ u16 fifo_count = 0, csr;
+ int use_dma = 0;
+
+ musb_ep = req->ep;
+
+ /* we shouldn't get here while DMA is active ... but we do ... */
+ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
+ DBG(4, "dma pending...\n");
+ return;
+ }
+
+ /* read TXCSR before */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ request = &req->request;
+ fifo_count = min(max_ep_writesize(musb, musb_ep),
+ (int)(request->length - request->actual));
+
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ DBG(5, "%s old packet still ready , txcsr %03x\n",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ if (csr & MUSB_TXCSR_P_SENDSTALL) {
+ DBG(5, "%s stalling, txcsr %03x\n",
+ musb_ep->end_point.name, csr);
+ return;
+ }
+
+ DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+ epnum, musb_ep->packet_sz, fifo_count,
+ csr);
+
+#ifndef CONFIG_MUSB_PIO_ONLY
+ if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+ use_dma = (request->dma != DMA_ADDR_INVALID);
+
+ /* MUSB_TXCSR_P_ISO is still set correctly */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ {
+ size_t request_size;
+
+ /* setup DMA, then program endpoint CSR */
+ request_size = min(request->length,
+ musb_ep->dma->max_len);
+ if (request_size <= musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+ musb_ep->dma->desired_mode = 1;
+
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ musb_ep->dma->desired_mode,
+ request->dma, request_size);
+ if (use_dma) {
+ if (musb_ep->dma->desired_mode == 0) {
+ /* ASSERT: DMAENAB is clear */
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAMODE);
+ csr |= (MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_MODE);
+ /* against programming guide */
+ } else
+ csr |= (MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_MODE);
+
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ }
+
+#elif defined(CONFIG_USB_TI_CPPI_DMA)
+ /* program endpoint CSR first, then setup DMA */
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR,
+ (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
+ | csr);
+
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and Mentor
+ * fifos) just tells CPPI it could start. Data only moves
+ * to the USB TX fifo when both fifos are ready.
+ */
+
+ /* "mode" is irrelevant here; handle terminating ZLPs like
+ * PIO does, since the hardware RNDIS mode seems unreliable
+ * except for the last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ 0,
+ request->dma,
+ request->length);
+ if (!use_dma) {
+ c->channel_release(musb_ep->dma);
+ musb_ep->dma = NULL;
+ /* ASSERT: DMAENAB clear */
+ csr &= ~(MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ /* invariant: prequest->buf is non-null */
+ }
+#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ request->zero,
+ request->dma,
+ request->length);
+#endif
+ }
+#endif
+
+ if (!use_dma) {
+ musb_write_fifo(musb_ep->hw_ep, fifo_count,
+ (u8 *) (request->buf + request->actual));
+ request->actual += fifo_count;
+ csr |= MUSB_TXCSR_TXPKTRDY;
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* host may already have the data when this message shows... */
+ DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+ musb_ep->end_point.name, use_dma ? "dma" : "pio",
+ request->actual, request->length,
+ musb_readw(epio, MUSB_TXCSR),
+ fifo_count,
+ musb_readw(epio, MUSB_TXMAXP));
+}
+
+/*
+ * FIFO state update (e.g. data ready).
+ * Called from IRQ, with controller locked.
+ */
+void musb_g_tx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct usb_request *request;
+ u8 __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+ request = next_request(musb_ep);
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+ do {
+ /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
+ * probably rates reporting as a host error
+ */
+ if (csr & MUSB_TXCSR_P_SENTSTALL) {
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~MUSB_TXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ }
+
+ if (request)
+ musb_g_giveback(musb_ep, request, -EPIPE);
+
+ break;
+ }
+
+ if (csr & MUSB_TXCSR_P_UNDERRUN) {
+ /* we NAKed, no big deal ... little reason to care */
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ DBG(20, "underrun on ep%d, req %p\n", epnum, request);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /* SHOULD NOT HAPPEN ... has with cppi though, after
+ * changing SENDSTALL (and other cases); harmless?
+ */
+ DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
+ break;
+ }
+
+ if (request) {
+ u8 is_dma = 0;
+
+ if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
+ is_dma = 1;
+ csr |= MUSB_TXCSR_P_WZC_BITS;
+ csr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_P_UNDERRUN
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ request->actual += musb_ep->dma->actual_len;
+ DBG(4, "TXCSR%d %04x, dma off, "
+ "len %zu, req %p\n",
+ epnum, csr,
+ musb_ep->dma->actual_len,
+ request);
+ }
+
+ if (is_dma || request->actual == request->length) {
+
+ /* First, maybe a terminating short packet.
+ * Some DMA engines might handle this by
+ * themselves.
+ */
+ if ((request->zero
+ && request->length
+ && (request->length
+ % musb_ep->packet_sz)
+ == 0)
+#ifdef CONFIG_USB_INVENTRA_DMA
+ || (is_dma &&
+ ((!dma->desired_mode) ||
+ (request->actual &
+ (musb_ep->packet_sz - 1))))
+#endif
+ ) {
+ /* on dma completion, fifo may not
+ * be available yet ...
+ */
+ if (csr & MUSB_TXCSR_TXPKTRDY)
+ break;
+
+ DBG(4, "sending zero pkt\n");
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_MODE
+ | MUSB_TXCSR_TXPKTRDY);
+ request->zero = 0;
+ }
+
+ /* ... or if not, then complete it */
+ musb_g_giveback(musb_ep, request, 0);
+
+ /* kickstart next transfer if appropriate;
+ * the packet that just completed might not
+ * be transmitted for hours or days.
+ * REVISIT for double buffering...
+ * FIXME revisit for stalls too...
+ */
+ musb_ep_select(mbase, epnum);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ break;
+ request = musb_ep->desc
+ ? next_request(musb_ep)
+ : NULL;
+ if (!request) {
+ DBG(4, "%s idle now\n",
+ musb_ep->end_point.name);
+ break;
+ }
+ }
+
+ txstate(musb, to_musb_request(request));
+ }
+
+ } while (0);
+}
+
+/* ------------------------------------------------------------ */
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Peripheral rx (OUT) using Mentor DMA works as follows:
+ - Only mode 0 is used.
+
+ - Request is queued by the gadget class driver.
+ -> if queue was previously empty, rxstate()
+
+ - Host sends OUT token which causes an endpoint interrupt
+ /\ -> RxReady
+ | -> if request queued, call rxstate
+ | /\ -> setup DMA
+ | | -> DMA interrupt on completion
+ | | -> RxReady
+ | | -> stop DMA
+ | | -> ack the read
+ | | -> if data recd = max expected
+ | | by the request, or host
+ | | sent a short packet,
+ | | complete the request,
+ | | and start the next one.
+ | |_____________________________________|
+ | else just wait for the host
+ | to send the next OUT token.
+ |__________________________________________________|
+
+ * Non-Mentor DMA engines can of course work differently.
+ */
+
+#endif
+
+/*
+ * Context: controller locked, IRQs blocked, endpoint selected
+ */
+static void rxstate(struct musb *musb, struct musb_request *req)
+{
+ u16 csr = 0;
+ const u8 epnum = req->epnum;
+ struct usb_request *request = &req->request;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ u16 fifo_count = 0;
+ u16 len = musb_ep->packet_sz;
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+
+ if (is_cppi_enabled() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+
+ /* NOTE: CPPI won't actually stop advancing the DMA
+ * queue after short packet transfers, so this is almost
+ * always going to run as IRQ-per-packet DMA so that
+ * faults will be handled correctly.
+ */
+ if (c->channel_program(channel,
+ musb_ep->packet_sz,
+ !request->short_not_ok,
+ request->dma + request->actual,
+ request->length - request->actual)) {
+
+ /* make sure that if an rxpkt arrived after the irq,
+ * the cppi engine will be ready to take it as soon
+ * as DMA is enabled
+ */
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAMODE);
+ csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ return;
+ }
+ }
+
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ len = musb_readw(epio, MUSB_RXCOUNT);
+ if (request->actual < request->length) {
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c;
+ struct dma_channel *channel;
+ int use_dma = 0;
+
+ c = musb->dma_controller;
+ channel = musb_ep->dma;
+
+ /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
+ * mode 0 only. So we do not get endpoint interrupts due to DMA
+ * completion. We only get interrupts from DMA controller.
+ *
+ * We could operate in DMA mode 1 if we knew the size of the tranfer
+ * in advance. For mass storage class, request->length = what the host
+ * sends, so that'd work. But for pretty much everything else,
+ * request->length is routinely more than what the host sends. For
+ * most these gadgets, end of is signified either by a short packet,
+ * or filling the last byte of the buffer. (Sending extra data in
+ * that last pckate should trigger an overflow fault.) But in mode 1,
+ * we don't get DMA completion interrrupt for short packets.
+ *
+ * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
+ * to get endpoint interrupt on every DMA req, but that didn't seem
+ * to work reliably.
+ *
+ * REVISIT an updated g_file_storage can set req->short_not_ok, which
+ * then becomes usable as a runtime "use mode 1" hint...
+ */
+
+ csr |= MUSB_RXCSR_DMAENAB;
+#ifdef USE_MODE1
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ /* csr |= MUSB_RXCSR_DMAMODE; */
+
+ /* this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+#endif
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (request->actual < request->length) {
+ int transfer_size = 0;
+#ifdef USE_MODE1
+ transfer_size = min(request->length,
+ channel->max_len);
+#else
+ transfer_size = len;
+#endif
+ if (transfer_size <= musb_ep->packet_sz)
+ musb_ep->dma->desired_mode = 0;
+ else
+ musb_ep->dma->desired_mode = 1;
+
+ use_dma = c->channel_program(
+ channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ request->dma
+ + request->actual,
+ transfer_size);
+ }
+
+ if (use_dma)
+ return;
+ }
+#endif /* Mentor's DMA */
+
+ fifo_count = request->length - request->actual;
+ DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+ musb_ep->end_point.name,
+ len, fifo_count,
+ musb_ep->packet_sz);
+
+ fifo_count = min(len, fifo_count);
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+ if (tusb_dma_omap() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+ struct dma_channel *channel = musb_ep->dma;
+ u32 dma_addr = request->dma + request->actual;
+ int ret;
+
+ ret = c->channel_program(channel,
+ musb_ep->packet_sz,
+ channel->desired_mode,
+ dma_addr,
+ fifo_count);
+ if (ret)
+ return;
+ }
+#endif
+
+ musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
+ (request->buf + request->actual));
+ request->actual += fifo_count;
+
+ /* REVISIT if we left anything in the fifo, flush
+ * it and report -EOVERFLOW
+ */
+
+ /* ack the read! */
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+
+ /* reach the end or short packet detected */
+ if (request->actual == request->length || len < musb_ep->packet_sz)
+ musb_g_giveback(musb_ep, request, 0);
+}
+
+/*
+ * Data ready for a request; called from IRQ
+ */
+void musb_g_rx(struct musb *musb, u8 epnum)
+{
+ u16 csr;
+ struct usb_request *request;
+ void __iomem *mbase = musb->mregs;
+ struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+
+ request = next_request(musb_ep);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ dma = is_dma_capable() ? musb_ep->dma : NULL;
+
+ DBG(4, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+ csr, dma ? " (dma)" : "", request);
+
+ if (csr & MUSB_RXCSR_P_SENTSTALL) {
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ request->actual += musb_ep->dma->actual_len;
+ }
+
+ csr |= MUSB_RXCSR_P_WZC_BITS;
+ csr &= ~MUSB_RXCSR_P_SENTSTALL;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ if (request)
+ musb_g_giveback(musb_ep, request, -EPIPE);
+ goto done;
+ }
+
+ if (csr & MUSB_RXCSR_P_OVERRUN) {
+ /* csr |= MUSB_RXCSR_P_WZC_BITS; */
+ csr &= ~MUSB_RXCSR_P_OVERRUN;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
+ if (request && request->status == -EINPROGRESS)
+ request->status = -EOVERFLOW;
+ }
+ if (csr & MUSB_RXCSR_INCOMPRX) {
+ /* REVISIT not necessarily an error */
+ DBG(4, "%s, incomprx\n", musb_ep->end_point.name);
+ }
+
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ /* "should not happen"; likely RXPKTRDY pending for DMA */
+ DBG((csr & MUSB_RXCSR_DMAENAB) ? 4 : 1,
+ "%s busy, csr %04x\n",
+ musb_ep->end_point.name, csr);
+ goto done;
+ }
+
+ if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_P_WZC_BITS | csr);
+
+ request->actual += musb_ep->dma->actual_len;
+
+ DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
+ epnum, csr,
+ musb_readw(epio, MUSB_RXCSR),
+ musb_ep->dma->actual_len, request);
+
+#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
+ /* Autoclear doesn't clear RxPktRdy for short packets */
+ if ((dma->desired_mode == 0)
+ || (dma->actual_len
+ & (musb_ep->packet_sz - 1))) {
+ /* ack the read! */
+ csr &= ~MUSB_RXCSR_RXPKTRDY;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* incomplete, and not short? wait for next IN packet */
+ if ((request->actual < request->length)
+ && (musb_ep->dma->actual_len
+ == musb_ep->packet_sz))
+ goto done;
+#endif
+ musb_g_giveback(musb_ep, request, 0);
+
+ request = next_request(musb_ep);
+ if (!request)
+ goto done;
+
+ /* don't start more i/o till the stall clears */
+ musb_ep_select(mbase, epnum);
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_P_SENDSTALL)
+ goto done;
+ }
+
+
+ /* analyze request if the ep is hot */
+ if (request)
+ rxstate(musb, to_musb_request(request));
+ else
+ DBG(3, "packet waiting for %s%s request\n",
+ musb_ep->desc ? "" : "inactive ",
+ musb_ep->end_point.name);
+
+done:
+ return;
+}
+
+/* ------------------------------------------------------------ */
+
+static int musb_gadget_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ unsigned long flags;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *hw_ep;
+ void __iomem *regs;
+ struct musb *musb;
+ void __iomem *mbase;
+ u8 epnum;
+ u16 csr;
+ unsigned tmp;
+ int status = -EINVAL;
+
+ if (!ep || !desc)
+ return -EINVAL;
+
+ musb_ep = to_musb_ep(ep);
+ hw_ep = musb_ep->hw_ep;
+ regs = hw_ep->regs;
+ musb = musb_ep->musb;
+ mbase = musb->mregs;
+ epnum = musb_ep->current_epnum;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_ep->desc) {
+ status = -EBUSY;
+ goto fail;
+ }
+ musb_ep->type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* check direction and (later) maxpacket size against endpoint */
+ if ((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != epnum)
+ goto fail;
+
+ /* REVISIT this rules out high bandwidth periodic transfers */
+ tmp = le16_to_cpu(desc->wMaxPacketSize);
+ if (tmp & ~0x07ff)
+ goto fail;
+ musb_ep->packet_sz = tmp;
+
+ /* enable the interrupts for the endpoint, set the endpoint
+ * packet size (or fail), set the mode, clear the fifo
+ */
+ musb_ep_select(mbase, epnum);
+ if (desc->bEndpointAddress & USB_DIR_IN) {
+ u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 1;
+ if (!musb_ep->is_in)
+ goto fail;
+ if (tmp > hw_ep->max_packet_sz_tx)
+ goto fail;
+
+ int_txe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* REVISIT if can_bulk_split(), use by updating "tmp";
+ * likewise high bandwidth periodic tx
+ */
+ musb_writew(regs, MUSB_TXMAXP, tmp);
+
+ csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
+ if (musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_TXCSR_P_ISO;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(regs, MUSB_TXCSR, csr);
+
+ } else {
+ u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
+
+ if (hw_ep->is_shared_fifo)
+ musb_ep->is_in = 0;
+ if (musb_ep->is_in)
+ goto fail;
+ if (tmp > hw_ep->max_packet_sz_rx)
+ goto fail;
+
+ int_rxe |= (1 << epnum);
+ musb_writew(mbase, MUSB_INTRRXE, int_rxe);
+
+ /* REVISIT if can_bulk_combine() use by updating "tmp"
+ * likewise high bandwidth periodic rx
+ */
+ musb_writew(regs, MUSB_RXMAXP, tmp);
+
+ /* force shared fifo to OUT-only mode */
+ if (hw_ep->is_shared_fifo) {
+ csr = musb_readw(regs, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(regs, MUSB_TXCSR, csr);
+ }
+
+ csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
+ if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
+ csr |= MUSB_RXCSR_P_ISO;
+ else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ /* set twice in case of double buffering */
+ musb_writew(regs, MUSB_RXCSR, csr);
+ musb_writew(regs, MUSB_RXCSR, csr);
+ }
+
+ /* NOTE: all the I/O code _should_ work fine without DMA, in case
+ * for some reason you run out of channels here.
+ */
+ if (is_dma_capable() && musb->dma_controller) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep->dma = c->channel_alloc(c, hw_ep,
+ (desc->bEndpointAddress & USB_DIR_IN));
+ } else
+ musb_ep->dma = NULL;
+
+ musb_ep->desc = desc;
+ musb_ep->busy = 0;
+ status = 0;
+
+ pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
+ musb_driver_name, musb_ep->end_point.name,
+ ({ char *s; switch (musb_ep->type) {
+ case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
+ case USB_ENDPOINT_XFER_INT: s = "int"; break;
+ default: s = "iso"; break;
+ }; s; }),
+ musb_ep->is_in ? "IN" : "OUT",
+ musb_ep->dma ? "dma, " : "",
+ musb_ep->packet_sz);
+
+ schedule_work(&musb->irq_work);
+
+fail:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Disable an endpoint flushing all requests queued.
+ */
+static int musb_gadget_disable(struct usb_ep *ep)
+{
+ unsigned long flags;
+ struct musb *musb;
+ u8 epnum;
+ struct musb_ep *musb_ep;
+ void __iomem *epio;
+ int status = 0;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+ epnum = musb_ep->current_epnum;
+ epio = musb->endpoints[epnum].regs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(musb->mregs, epnum);
+
+ /* zero the endpoint sizes */
+ if (musb_ep->is_in) {
+ u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
+ int_txe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
+ musb_writew(epio, MUSB_TXMAXP, 0);
+ } else {
+ u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
+ int_rxe &= ~(1 << epnum);
+ musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
+ musb_writew(epio, MUSB_RXMAXP, 0);
+ }
+
+ musb_ep->desc = NULL;
+
+ /* abort all pending DMA and requests */
+ nuke(musb_ep, -ESHUTDOWN);
+
+ schedule_work(&musb->irq_work);
+
+ spin_unlock_irqrestore(&(musb->lock), flags);
+
+ DBG(2, "%s\n", musb_ep->end_point.name);
+
+ return status;
+}
+
+/*
+ * Allocate a request for an endpoint.
+ * Reused by ep0 code.
+ */
+struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb_request *request = NULL;
+
+ request = kzalloc(sizeof *request, gfp_flags);
+ if (request) {
+ INIT_LIST_HEAD(&request->request.list);
+ request->request.dma = DMA_ADDR_INVALID;
+ request->epnum = musb_ep->current_epnum;
+ request->ep = musb_ep;
+ }
+
+ return &request->request;
+}
+
+/*
+ * Free a request
+ * Reused by ep0 code.
+ */
+void musb_free_request(struct usb_ep *ep, struct usb_request *req)
+{
+ kfree(to_musb_request(req));
+}
+
+static LIST_HEAD(buffers);
+
+struct free_record {
+ struct list_head list;
+ struct device *dev;
+ unsigned bytes;
+ dma_addr_t dma;
+};
+
+/*
+ * Context: controller locked, IRQs blocked.
+ */
+static void musb_ep_restart(struct musb *musb, struct musb_request *req)
+{
+ DBG(3, "<== %s request %p len %u on hw_ep%d\n",
+ req->tx ? "TX/IN" : "RX/OUT",
+ &req->request, req->request.length, req->epnum);
+
+ musb_ep_select(musb->mregs, req->epnum);
+ if (req->tx)
+ txstate(musb, req);
+ else
+ rxstate(musb, req);
+}
+
+static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct musb_ep *musb_ep;
+ struct musb_request *request;
+ struct musb *musb;
+ int status = 0;
+ unsigned long lockflags;
+
+ if (!ep || !req)
+ return -EINVAL;
+ if (!req->buf)
+ return -ENODATA;
+
+ musb_ep = to_musb_ep(ep);
+ musb = musb_ep->musb;
+
+ request = to_musb_request(req);
+ request->musb = musb;
+
+ if (request->ep != musb_ep)
+ return -EINVAL;
+
+ DBG(4, "<== to %s request=%p\n", ep->name, req);
+
+ /* request is mine now... */
+ request->request.actual = 0;
+ request->request.status = -EINPROGRESS;
+ request->epnum = musb_ep->current_epnum;
+ request->tx = musb_ep->is_in;
+
+ if (is_dma_capable() && musb_ep->dma) {
+ if (request->request.dma == DMA_ADDR_INVALID) {
+ request->request.dma = dma_map_single(
+ musb->controller,
+ request->request.buf,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->mapped = 1;
+ } else {
+ dma_sync_single_for_device(musb->controller,
+ request->request.dma,
+ request->request.length,
+ request->tx
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ request->mapped = 0;
+ }
+ } else if (!req->buf) {
+ return -ENODATA;
+ } else
+ request->mapped = 0;
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ /* don't queue if the ep is down */
+ if (!musb_ep->desc) {
+ DBG(4, "req %p queued to %s while ep %s\n",
+ req, ep->name, "disabled");
+ status = -ESHUTDOWN;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&(request->request.list), &(musb_ep->req_list));
+
+ /* it this is the head of the queue, start i/o ... */
+ if (!musb_ep->busy && &request->request.list == musb_ep->req_list.next)
+ musb_ep_restart(musb, request);
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ return status;
+}
+
+static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct usb_request *r;
+ unsigned long flags;
+ int status = 0;
+ struct musb *musb = musb_ep->musb;
+
+ if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+ return -EINVAL;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ list_for_each_entry(r, &musb_ep->req_list, list) {
+ if (r == request)
+ break;
+ }
+ if (r != request) {
+ DBG(3, "request %p not queued to %s\n", request, ep->name);
+ status = -EINVAL;
+ goto done;
+ }
+
+ /* if the hardware doesn't have the request, easy ... */
+ if (musb_ep->req_list.next != &request->list || musb_ep->busy)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+
+ /* ... else abort the dma transfer ... */
+ else if (is_dma_capable() && musb_ep->dma) {
+ struct dma_controller *c = musb->dma_controller;
+
+ musb_ep_select(musb->mregs, musb_ep->current_epnum);
+ if (c->channel_abort)
+ status = c->channel_abort(musb_ep->dma);
+ else
+ status = -EBUSY;
+ if (status == 0)
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ } else {
+ /* NOTE: by sticking to easily tested hardware/driver states,
+ * we leave counting of in-flight packets imprecise.
+ */
+ musb_g_giveback(musb_ep, request, -ECONNRESET);
+ }
+
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+/*
+ * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
+ * data but will queue requests.
+ *
+ * exported to ep0 code
+ */
+int musb_gadget_set_halt(struct usb_ep *ep, int value)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ u8 epnum = musb_ep->current_epnum;
+ struct musb *musb = musb_ep->musb;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr;
+ struct musb_request *request = NULL;
+ int status = 0;
+
+ if (!ep)
+ return -EINVAL;
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
+ status = -EINVAL;
+ goto done;
+ }
+
+ musb_ep_select(mbase, epnum);
+
+ /* cannot portably stall with non-empty FIFO */
+ request = to_musb_request(next_request(musb_ep));
+ if (value && musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ DBG(3, "%s fifo busy, cannot halt\n", ep->name);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -EAGAIN;
+ }
+
+ }
+
+ /* set/clear the stall and toggle bits */
+ DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ csr |= MUSB_TXCSR_P_WZC_BITS
+ | MUSB_TXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_TXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_P_SENTSTALL);
+ csr &= ~MUSB_TXCSR_TXPKTRDY;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_WZC_BITS
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG;
+ if (value)
+ csr |= MUSB_RXCSR_P_SENDSTALL;
+ else
+ csr &= ~(MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_P_SENTSTALL);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+done:
+
+ /* maybe start the first request in the queue */
+ if (!musb_ep->busy && !value && request) {
+ DBG(3, "restarting the request\n");
+ musb_ep_restart(musb, request);
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+static int musb_gadget_fifo_status(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ void __iomem *epio = musb_ep->hw_ep->regs;
+ int retval = -EINVAL;
+
+ if (musb_ep->desc && !musb_ep->is_in) {
+ struct musb *musb = musb_ep->musb;
+ int epnum = musb_ep->current_epnum;
+ void __iomem *mbase = musb->mregs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb_ep_select(mbase, epnum);
+ /* FIXME return zero unless RXPKTRDY is set */
+ retval = musb_readw(epio, MUSB_RXCOUNT);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ return retval;
+}
+
+static void musb_gadget_fifo_flush(struct usb_ep *ep)
+{
+ struct musb_ep *musb_ep = to_musb_ep(ep);
+ struct musb *musb = musb_ep->musb;
+ u8 epnum = musb_ep->current_epnum;
+ void __iomem *epio = musb->endpoints[epnum].regs;
+ void __iomem *mbase;
+ unsigned long flags;
+ u16 csr, int_txe;
+
+ mbase = musb->mregs;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_ep_select(mbase, (u8) epnum);
+
+ /* disable interrupts */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ if (musb_ep->is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static const struct usb_ep_ops musb_ep_ops = {
+ .enable = musb_gadget_enable,
+ .disable = musb_gadget_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_gadget_queue,
+ .dequeue = musb_gadget_dequeue,
+ .set_halt = musb_gadget_set_halt,
+ .fifo_status = musb_gadget_fifo_status,
+ .fifo_flush = musb_gadget_fifo_flush
+};
+
+/* ----------------------------------------------------------------------- */
+
+static int musb_gadget_get_frame(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ return (int)musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ void __iomem *mregs = musb->mregs;
+ unsigned long flags;
+ int status = -EINVAL;
+ u8 power, devctl;
+ int retries;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_PERIPHERAL:
+ /* NOTE: OTG state machine doesn't include B_SUSPENDED;
+ * that's part of the standard usb 1.1 state machine, and
+ * doesn't affect OTG transitions.
+ */
+ if (musb->may_wakeup && musb->is_suspended)
+ break;
+ goto done;
+ case OTG_STATE_B_IDLE:
+ /* Start SRP ... OTG not required. */
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ DBG(2, "Sending SRP: devctl: %02x\n", devctl);
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(mregs, MUSB_DEVCTL, devctl);
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ retries = 100;
+ while (!(devctl & MUSB_DEVCTL_SESSION)) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+ retries = 10000;
+ while (devctl & MUSB_DEVCTL_SESSION) {
+ devctl = musb_readb(mregs, MUSB_DEVCTL);
+ if (retries-- < 1)
+ break;
+ }
+
+ /* Block idling for at least 1s */
+ musb_platform_try_idle(musb,
+ jiffies + msecs_to_jiffies(1 * HZ));
+
+ status = 0;
+ goto done;
+ default:
+ DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
+ goto done;
+ }
+
+ status = 0;
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+ DBG(2, "issue wakeup\n");
+
+ /* FIXME do this next chunk in a timer callback, no udelay */
+ mdelay(2);
+
+ power = musb_readb(mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ musb_writeb(mregs, MUSB_POWER, power);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+static int
+musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ musb->is_self_powered = !!is_selfpowered;
+ return 0;
+}
+
+static void musb_pullup(struct musb *musb, int is_on)
+{
+ u8 power;
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ if (is_on)
+ power |= MUSB_POWER_SOFTCONN;
+ else
+ power &= ~MUSB_POWER_SOFTCONN;
+
+ /* FIXME if on, HdrcStart; if off, HdrcStop */
+
+ DBG(3, "gadget %s D+ pullup %s\n",
+ musb->gadget_driver->function, is_on ? "on" : "off");
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+}
+
+#if 0
+static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+ DBG(2, "<= %s =>\n", __func__);
+
+ /*
+ * FIXME iff driver's softconnect flag is set (as it is during probe,
+ * though that can clear it), just musb_pullup().
+ */
+
+ return -EINVAL;
+}
+#endif
+
+static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+
+ if (!musb->xceiv.set_power)
+ return -EOPNOTSUPP;
+ return otg_set_power(&musb->xceiv, mA);
+}
+
+static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct musb *musb = gadget_to_musb(gadget);
+ unsigned long flags;
+
+ is_on = !!is_on;
+
+ /* NOTE: this assumes we are sensing vbus; we'd rather
+ * not pullup unless the B-session is active.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (is_on != musb->softconnect) {
+ musb->softconnect = is_on;
+ musb_pullup(musb, is_on);
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static const struct usb_gadget_ops musb_gadget_operations = {
+ .get_frame = musb_gadget_get_frame,
+ .wakeup = musb_gadget_wakeup,
+ .set_selfpowered = musb_gadget_set_self_powered,
+ /* .vbus_session = musb_gadget_vbus_session, */
+ .vbus_draw = musb_gadget_vbus_draw,
+ .pullup = musb_gadget_pullup,
+};
+
+/* ----------------------------------------------------------------------- */
+
+/* Registration */
+
+/* Only this registration code "knows" the rule (from USB standards)
+ * about there being only one external upstream port. It assumes
+ * all peripheral ports are external...
+ */
+static struct musb *the_gadget;
+
+static void musb_gadget_release(struct device *dev)
+{
+ /* kref_put(WHAT) */
+ dev_dbg(dev, "%s\n", __func__);
+}
+
+
+static void __init
+init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
+{
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+
+ memset(ep, 0, sizeof *ep);
+
+ ep->current_epnum = epnum;
+ ep->musb = musb;
+ ep->hw_ep = hw_ep;
+ ep->is_in = is_in;
+
+ INIT_LIST_HEAD(&ep->req_list);
+
+ sprintf(ep->name, "ep%d%s", epnum,
+ (!epnum || hw_ep->is_shared_fifo) ? "" : (
+ is_in ? "in" : "out"));
+ ep->end_point.name = ep->name;
+ INIT_LIST_HEAD(&ep->end_point.ep_list);
+ if (!epnum) {
+ ep->end_point.maxpacket = 64;
+ ep->end_point.ops = &musb_g_ep0_ops;
+ musb->g.ep0 = &ep->end_point;
+ } else {
+ if (is_in)
+ ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
+ else
+ ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
+ ep->end_point.ops = &musb_ep_ops;
+ list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
+ }
+}
+
+/*
+ * Initialize the endpoints exposed to peripheral drivers, with backlinks
+ * to the rest of the driver state.
+ */
+static inline void __init musb_g_init_endpoints(struct musb *musb)
+{
+ u8 epnum;
+ struct musb_hw_ep *hw_ep;
+ unsigned count = 0;
+
+ /* intialize endpoint list just once */
+ INIT_LIST_HEAD(&(musb->g.ep_list));
+
+ for (epnum = 0, hw_ep = musb->endpoints;
+ epnum < musb->nr_endpoints;
+ epnum++, hw_ep++) {
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
+ count++;
+ } else {
+ if (hw_ep->max_packet_sz_tx) {
+ init_peripheral_ep(musb, &hw_ep->ep_in,
+ epnum, 1);
+ count++;
+ }
+ if (hw_ep->max_packet_sz_rx) {
+ init_peripheral_ep(musb, &hw_ep->ep_out,
+ epnum, 0);
+ count++;
+ }
+ }
+ }
+}
+
+/* called once during driver setup to initialize and link into
+ * the driver model; memory is zeroed.
+ */
+int __init musb_gadget_setup(struct musb *musb)
+{
+ int status;
+
+ /* REVISIT minor race: if (erroneously) setting up two
+ * musb peripherals at the same time, only the bus lock
+ * is probably held.
+ */
+ if (the_gadget)
+ return -EBUSY;
+ the_gadget = musb;
+
+ musb->g.ops = &musb_gadget_operations;
+ musb->g.is_dualspeed = 1;
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* this "gadget" abstracts/virtualizes the controller */
+ strcpy(musb->g.dev.bus_id, "gadget");
+ musb->g.dev.parent = musb->controller;
+ musb->g.dev.dma_mask = musb->controller->dma_mask;
+ musb->g.dev.release = musb_gadget_release;
+ musb->g.name = musb_driver_name;
+
+ if (is_otg_enabled(musb))
+ musb->g.is_otg = 1;
+
+ musb_g_init_endpoints(musb);
+
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
+
+ status = device_register(&musb->g.dev);
+ if (status != 0)
+ the_gadget = NULL;
+ return status;
+}
+
+void musb_gadget_cleanup(struct musb *musb)
+{
+ if (musb != the_gadget)
+ return;
+
+ device_unregister(&musb->g.dev);
+ the_gadget = NULL;
+}
+
+/*
+ * Register the gadget driver. Used by gadget drivers when
+ * registering themselves with the controller.
+ *
+ * -EINVAL something went wrong (not driver)
+ * -EBUSY another gadget is already using the controller
+ * -ENOMEM no memeory to perform the operation
+ *
+ * @param driver the gadget driver
+ * @return <0 if error, 0 if everything is fine
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *driver)
+{
+ int retval;
+ unsigned long flags;
+ struct musb *musb = the_gadget;
+
+ if (!driver
+ || driver->speed != USB_SPEED_HIGH
+ || !driver->bind
+ || !driver->setup)
+ return -EINVAL;
+
+ /* driver must be initialized to support peripheral mode */
+ if (!musb || !(musb->board_mode == MUSB_OTG
+ || musb->board_mode != MUSB_OTG)) {
+ DBG(1, "%s, no dev??\n", __func__);
+ return -ENODEV;
+ }
+
+ DBG(3, "registering driver %s\n", driver->function);
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb->gadget_driver) {
+ DBG(1, "%s is already bound to %s\n",
+ musb_driver_name,
+ musb->gadget_driver->driver.name);
+ retval = -EBUSY;
+ } else {
+ musb->gadget_driver = driver;
+ musb->g.dev.driver = &driver->driver;
+ driver->driver.bus = NULL;
+ musb->softconnect = 1;
+ retval = 0;
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (retval == 0) {
+ retval = driver->bind(&musb->g);
+ if (retval != 0) {
+ DBG(3, "bind to driver %s failed --> %d\n",
+ driver->driver.name, retval);
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* REVISIT always use otg_set_peripheral(), handling
+ * issues including the root hub one below ...
+ */
+ musb->xceiv.gadget = &musb->g;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->is_active = 1;
+
+ /* FIXME this ignores the softconnect flag. Drivers are
+ * allowed hold the peripheral inactive until for example
+ * userspace hooks up printer hardware or DSP codecs, so
+ * hosts only see fully functional devices.
+ */
+
+ if (!is_otg_enabled(musb))
+ musb_start(musb);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (is_otg_enabled(musb)) {
+ DBG(3, "OTG startup...\n");
+
+ /* REVISIT: funcall to other code, which also
+ * handles power budgeting ... this way also
+ * ensures HdrcStart is indirectly called.
+ */
+ retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
+ if (retval < 0) {
+ DBG(1, "add_hcd failed, %d\n", retval);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb->xceiv.gadget = NULL;
+ musb->xceiv.state = OTG_STATE_UNDEFINED;
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+ spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ }
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
+{
+ int i;
+ struct musb_hw_ep *hw_ep;
+
+ /* don't disconnect if it's not connected */
+ if (musb->g.speed == USB_SPEED_UNKNOWN)
+ driver = NULL;
+ else
+ musb->g.speed = USB_SPEED_UNKNOWN;
+
+ /* deactivate the hardware */
+ if (musb->softconnect) {
+ musb->softconnect = 0;
+ musb_pullup(musb, 0);
+ }
+ musb_stop(musb);
+
+ /* killing any outstanding requests will quiesce the driver;
+ * then report disconnect
+ */
+ if (driver) {
+ for (i = 0, hw_ep = musb->endpoints;
+ i < musb->nr_endpoints;
+ i++, hw_ep++) {
+ musb_ep_select(musb->mregs, i);
+ if (hw_ep->is_shared_fifo /* || !epnum */) {
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ } else {
+ if (hw_ep->max_packet_sz_tx)
+ nuke(&hw_ep->ep_in, -ESHUTDOWN);
+ if (hw_ep->max_packet_sz_rx)
+ nuke(&hw_ep->ep_out, -ESHUTDOWN);
+ }
+ }
+
+ spin_unlock(&musb->lock);
+ driver->disconnect(&musb->g);
+ spin_lock(&musb->lock);
+ }
+}
+
+/*
+ * Unregister the gadget driver. Used by gadget drivers when
+ * unregistering themselves from the controller.
+ *
+ * @param driver the gadget driver to unregister
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+ unsigned long flags;
+ int retval = 0;
+ struct musb *musb = the_gadget;
+
+ if (!driver || !driver->unbind || !musb)
+ return -EINVAL;
+
+ /* REVISIT always use otg_set_peripheral() here too;
+ * this needs to shut down the OTG engine.
+ */
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+#ifdef CONFIG_USB_MUSB_OTG
+ musb_hnp_stop(musb);
+#endif
+
+ if (musb->gadget_driver == driver) {
+
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->xceiv.state = OTG_STATE_UNDEFINED;
+ stop_activity(musb, driver);
+
+ DBG(3, "unregistering driver %s\n", driver->function);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ driver->unbind(&musb->g);
+ spin_lock_irqsave(&musb->lock, flags);
+
+ musb->gadget_driver = NULL;
+ musb->g.dev.driver = NULL;
+
+ musb->is_active = 0;
+ musb_platform_try_idle(musb, 0);
+ } else
+ retval = -EINVAL;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (is_otg_enabled(musb) && retval == 0) {
+ usb_remove_hcd(musb_to_hcd(musb));
+ /* FIXME we need to be able to register another
+ * gadget driver here and have everything work;
+ * that currently misbehaves.
+ */
+ }
+
+ return retval;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+
+/* ----------------------------------------------------------------------- */
+
+/* lifecycle operations called through plat_uds.c */
+
+void musb_g_resume(struct musb *musb)
+{
+ musb->is_suspended = 0;
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_IDLE:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_active = 1;
+ if (musb->gadget_driver && musb->gadget_driver->resume) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->resume(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ WARNING("unhandled RESUME transition (%s)\n",
+ otg_state_string(musb));
+ }
+}
+
+/* called when SOF packets stop for 3+ msec */
+void musb_g_suspend(struct musb *musb)
+{
+ u8 devctl;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ DBG(3, "devctl %02x\n", devctl);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_B_IDLE:
+ if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb->is_suspended = 1;
+ if (musb->gadget_driver && musb->gadget_driver->suspend) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->suspend(&musb->g);
+ spin_lock(&musb->lock);
+ }
+ break;
+ default:
+ /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
+ * A_PERIPHERAL may need care too
+ */
+ WARNING("unhandled SUSPEND transition (%s)\n",
+ otg_state_string(musb));
+ }
+}
+
+/* Called during SRP */
+void musb_g_wakeup(struct musb *musb)
+{
+ musb_gadget_wakeup(&musb->g);
+}
+
+/* called when VBUS drops below session threshold, and in other cases */
+void musb_g_disconnect(struct musb *musb)
+{
+ void __iomem *mregs = musb->mregs;
+ u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
+
+ DBG(3, "devctl %02x\n", devctl);
+
+ /* clear HR */
+ musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
+
+ /* don't draw vbus until new b-default session */
+ (void) musb_gadget_vbus_draw(&musb->g, 0);
+
+ musb->g.speed = USB_SPEED_UNKNOWN;
+ if (musb->gadget_driver && musb->gadget_driver->disconnect) {
+ spin_unlock(&musb->lock);
+ musb->gadget_driver->disconnect(&musb->g);
+ spin_lock(&musb->lock);
+ }
+
+ switch (musb->xceiv.state) {
+ default:
+#ifdef CONFIG_USB_MUSB_OTG
+ DBG(2, "Unhandled disconnect %s, setting a_idle\n",
+ otg_state_string(musb));
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ case OTG_STATE_B_HOST:
+#endif
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_B_SRP_INIT:
+ break;
+ }
+
+ musb->is_active = 0;
+}
+
+void musb_g_reset(struct musb *musb)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
+ u8 power;
+
+ DBG(3, "<== %s addr=%x driver '%s'\n",
+ (devctl & MUSB_DEVCTL_BDEVICE)
+ ? "B-Device" : "A-Device",
+ musb_readb(mbase, MUSB_FADDR),
+ musb->gadget_driver
+ ? musb->gadget_driver->driver.name
+ : NULL
+ );
+
+ /* report disconnect, if we didn't already (flushing EP state) */
+ if (musb->g.speed != USB_SPEED_UNKNOWN)
+ musb_g_disconnect(musb);
+
+ /* clear HR */
+ else if (devctl & MUSB_DEVCTL_HR)
+ musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+
+
+ /* what speed did we negotiate? */
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ /* start in USB_STATE_DEFAULT */
+ musb->is_active = 1;
+ musb->is_suspended = 0;
+ MUSB_DEV_MODE(musb);
+ musb->address = 0;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+
+ musb->may_wakeup = 0;
+ musb->g.b_hnp_enable = 0;
+ musb->g.a_alt_hnp_support = 0;
+ musb->g.a_hnp_support = 0;
+
+ /* Normal reset, as B-Device;
+ * or else after HNP, as A-Device
+ */
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv.state = OTG_STATE_B_PERIPHERAL;
+ musb->g.is_a_peripheral = 0;
+ } else if (is_otg_enabled(musb)) {
+ musb->xceiv.state = OTG_STATE_A_PERIPHERAL;
+ musb->g.is_a_peripheral = 1;
+ } else
+ WARN_ON(1);
+
+ /* start with default limits on VBUS power draw */
+ (void) musb_gadget_vbus_draw(&musb->g,
+ is_otg_enabled(musb) ? 8 : 100);
+}
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
new file mode 100644
index 000000000000..59502da9f739
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget.h
@@ -0,0 +1,108 @@
+/*
+ * MUSB OTG driver peripheral defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_GADGET_H
+#define __MUSB_GADGET_H
+
+struct musb_request {
+ struct usb_request request;
+ struct musb_ep *ep;
+ struct musb *musb;
+ u8 tx; /* endpoint direction */
+ u8 epnum;
+ u8 mapped;
+};
+
+static inline struct musb_request *to_musb_request(struct usb_request *req)
+{
+ return req ? container_of(req, struct musb_request, request) : NULL;
+}
+
+extern struct usb_request *
+musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+extern void musb_free_request(struct usb_ep *ep, struct usb_request *req);
+
+
+/*
+ * struct musb_ep - peripheral side view of endpoint rx or tx side
+ */
+struct musb_ep {
+ /* stuff towards the head is basically write-once. */
+ struct usb_ep end_point;
+ char name[12];
+ struct musb_hw_ep *hw_ep;
+ struct musb *musb;
+ u8 current_epnum;
+
+ /* ... when enabled/disabled ... */
+ u8 type;
+ u8 is_in;
+ u16 packet_sz;
+ const struct usb_endpoint_descriptor *desc;
+ struct dma_channel *dma;
+
+ /* later things are modified based on usage */
+ struct list_head req_list;
+
+ /* true if lock must be dropped but req_list may not be advanced */
+ u8 busy;
+};
+
+static inline struct musb_ep *to_musb_ep(struct usb_ep *ep)
+{
+ return ep ? container_of(ep, struct musb_ep, end_point) : NULL;
+}
+
+static inline struct usb_request *next_request(struct musb_ep *ep)
+{
+ struct list_head *queue = &ep->req_list;
+
+ if (list_empty(queue))
+ return NULL;
+ return container_of(queue->next, struct usb_request, list);
+}
+
+extern void musb_g_tx(struct musb *musb, u8 epnum);
+extern void musb_g_rx(struct musb *musb, u8 epnum);
+
+extern const struct usb_ep_ops musb_g_ep0_ops;
+
+extern int musb_gadget_setup(struct musb *);
+extern void musb_gadget_cleanup(struct musb *);
+
+extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
+
+extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
+
+#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
new file mode 100644
index 000000000000..a57652fff39c
--- /dev/null
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -0,0 +1,983 @@
+/*
+ * MUSB OTG peripheral driver ep0 handling
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include "musb_core.h"
+
+/* ep0 is always musb->endpoints[0].ep_in */
+#define next_ep0_request(musb) next_in_request(&(musb)->endpoints[0])
+
+/*
+ * locking note: we use only the controller lock, for simpler correctness.
+ * It's always held with IRQs blocked.
+ *
+ * It protects the ep0 request queue as well as ep0_state, not just the
+ * controller and indexed registers. And that lock stays held unless it
+ * needs to be dropped to allow reentering this driver ... like upcalls to
+ * the gadget driver, or adjusting endpoint halt status.
+ */
+
+static char *decode_ep0stage(u8 stage)
+{
+ switch (stage) {
+ case MUSB_EP0_STAGE_SETUP: return "idle";
+ case MUSB_EP0_STAGE_TX: return "in";
+ case MUSB_EP0_STAGE_RX: return "out";
+ case MUSB_EP0_STAGE_ACKWAIT: return "wait";
+ case MUSB_EP0_STAGE_STATUSIN: return "in/status";
+ case MUSB_EP0_STAGE_STATUSOUT: return "out/status";
+ default: return "?";
+ }
+}
+
+/* handle a standard GET_STATUS request
+ * Context: caller holds controller lock
+ */
+static int service_tx_status_request(
+ struct musb *musb,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ void __iomem *mbase = musb->mregs;
+ int handled = 1;
+ u8 result[2], epnum = 0;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ result[1] = 0;
+
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ result[0] = musb->is_self_powered << USB_DEVICE_SELF_POWERED;
+ result[0] |= musb->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+#ifdef CONFIG_USB_MUSB_OTG
+ if (musb->g.is_otg) {
+ result[0] |= musb->g.b_hnp_enable
+ << USB_DEVICE_B_HNP_ENABLE;
+ result[0] |= musb->g.a_alt_hnp_support
+ << USB_DEVICE_A_ALT_HNP_SUPPORT;
+ result[0] |= musb->g.a_hnp_support
+ << USB_DEVICE_A_HNP_SUPPORT;
+ }
+#endif
+ break;
+
+ case USB_RECIP_INTERFACE:
+ result[0] = 0;
+ break;
+
+ case USB_RECIP_ENDPOINT: {
+ int is_in;
+ struct musb_ep *ep;
+ u16 tmp;
+ void __iomem *regs;
+
+ epnum = (u8) ctrlrequest->wIndex;
+ if (!epnum) {
+ result[0] = 0;
+ break;
+ }
+
+ is_in = epnum & USB_DIR_IN;
+ if (is_in) {
+ epnum &= 0x0f;
+ ep = &musb->endpoints[epnum].ep_in;
+ } else {
+ ep = &musb->endpoints[epnum].ep_out;
+ }
+ regs = musb->endpoints[epnum].regs;
+
+ if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
+ handled = -EINVAL;
+ break;
+ }
+
+ musb_ep_select(mbase, epnum);
+ if (is_in)
+ tmp = musb_readw(regs, MUSB_TXCSR)
+ & MUSB_TXCSR_P_SENDSTALL;
+ else
+ tmp = musb_readw(regs, MUSB_RXCSR)
+ & MUSB_RXCSR_P_SENDSTALL;
+ musb_ep_select(mbase, 0);
+
+ result[0] = tmp ? 1 : 0;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+
+ /* fill up the fifo; caller updates csr0 */
+ if (handled > 0) {
+ u16 len = le16_to_cpu(ctrlrequest->wLength);
+
+ if (len > 2)
+ len = 2;
+ musb_write_fifo(&musb->endpoints[0], len, result);
+ }
+
+ return handled;
+}
+
+/*
+ * handle a control-IN request, the end0 buffer contains the current request
+ * that is supposed to be a standard control request. Assumes the fifo to
+ * be at least 2 bytes long.
+ *
+ * @return 0 if the request was NOT HANDLED,
+ * < 0 when error
+ * > 0 when the request is processed
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+{
+ int handled = 0; /* not handled */
+
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_GET_STATUS:
+ handled = service_tx_status_request(musb,
+ ctrlrequest);
+ break;
+
+ /* case USB_REQ_SYNC_FRAME: */
+
+ default:
+ break;
+ }
+ }
+ return handled;
+}
+
+/*
+ * Context: caller holds controller lock
+ */
+static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
+{
+ musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+}
+
+/*
+ * Tries to start B-device HNP negotiation if enabled via sysfs
+ */
+static inline void musb_try_b_hnp_enable(struct musb *musb)
+{
+ void __iomem *mbase = musb->mregs;
+ u8 devctl;
+
+ DBG(1, "HNP: Setting HR\n");
+ devctl = musb_readb(mbase, MUSB_DEVCTL);
+ musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
+}
+
+/*
+ * Handle all control requests with no DATA stage, including standard
+ * requests such as:
+ * USB_REQ_SET_CONFIGURATION, USB_REQ_SET_INTERFACE, unrecognized
+ * always delegated to the gadget driver
+ * USB_REQ_SET_ADDRESS, USB_REQ_CLEAR_FEATURE, USB_REQ_SET_FEATURE
+ * always handled here, except for class/vendor/... features
+ *
+ * Context: caller holds controller lock
+ */
+static int
+service_zero_data_request(struct musb *musb,
+ struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int handled = -EINVAL;
+ void __iomem *mbase = musb->mregs;
+ const u8 recip = ctrlrequest->bRequestType & USB_RECIP_MASK;
+
+ /* the gadget driver handles everything except what we MUST handle */
+ if ((ctrlrequest->bRequestType & USB_TYPE_MASK)
+ == USB_TYPE_STANDARD) {
+ switch (ctrlrequest->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ /* change it after the status stage */
+ musb->set_address = true;
+ musb->address = (u8) (ctrlrequest->wValue & 0x7f);
+ handled = 1;
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ if (ctrlrequest->wValue
+ != USB_DEVICE_REMOTE_WAKEUP)
+ break;
+ musb->may_wakeup = 0;
+ handled = 1;
+ break;
+ case USB_RECIP_INTERFACE:
+ break;
+ case USB_RECIP_ENDPOINT:{
+ const u8 num = ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+
+ if (num == 0
+ || num >= MUSB_C_NUM_EPS
+ || ctrlrequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ if (ctrlrequest->wIndex & USB_DIR_IN)
+ musb_ep = &musb->endpoints[num].ep_in;
+ else
+ musb_ep = &musb->endpoints[num].ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ /* REVISIT do it directly, no locking games */
+ spin_unlock(&musb->lock);
+ musb_gadget_set_halt(&musb_ep->end_point, 0);
+ spin_lock(&musb->lock);
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ handled = 1;
+ } break;
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ switch (recip) {
+ case USB_RECIP_DEVICE:
+ handled = 1;
+ switch (ctrlrequest->wValue) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ musb->may_wakeup = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ if (musb->g.speed != USB_SPEED_HIGH)
+ goto stall;
+ if (ctrlrequest->wIndex & 0xff)
+ goto stall;
+
+ switch (ctrlrequest->wIndex >> 8) {
+ case 1:
+ pr_debug("TEST_J\n");
+ /* TEST_J */
+ musb->test_mode_nr =
+ MUSB_TEST_J;
+ break;
+ case 2:
+ /* TEST_K */
+ pr_debug("TEST_K\n");
+ musb->test_mode_nr =
+ MUSB_TEST_K;
+ break;
+ case 3:
+ /* TEST_SE0_NAK */
+ pr_debug("TEST_SE0_NAK\n");
+ musb->test_mode_nr =
+ MUSB_TEST_SE0_NAK;
+ break;
+ case 4:
+ /* TEST_PACKET */
+ pr_debug("TEST_PACKET\n");
+ musb->test_mode_nr =
+ MUSB_TEST_PACKET;
+ break;
+ default:
+ goto stall;
+ }
+
+ /* enter test mode after irq */
+ if (handled > 0)
+ musb->test_mode = true;
+ break;
+#ifdef CONFIG_USB_MUSB_OTG
+ case USB_DEVICE_B_HNP_ENABLE:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.b_hnp_enable = 1;
+ musb_try_b_hnp_enable(musb);
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_hnp_support = 1;
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ if (!musb->g.is_otg)
+ goto stall;
+ musb->g.a_alt_hnp_support = 1;
+ break;
+#endif
+stall:
+ default:
+ handled = -EINVAL;
+ break;
+ }
+ break;
+
+ case USB_RECIP_INTERFACE:
+ break;
+
+ case USB_RECIP_ENDPOINT:{
+ const u8 epnum =
+ ctrlrequest->wIndex & 0x0f;
+ struct musb_ep *musb_ep;
+ struct musb_hw_ep *ep;
+ void __iomem *regs;
+ int is_in;
+ u16 csr;
+
+ if (epnum == 0
+ || epnum >= MUSB_C_NUM_EPS
+ || ctrlrequest->wValue
+ != USB_ENDPOINT_HALT)
+ break;
+
+ ep = musb->endpoints + epnum;
+ regs = ep->regs;
+ is_in = ctrlrequest->wIndex & USB_DIR_IN;
+ if (is_in)
+ musb_ep = &ep->ep_in;
+ else
+ musb_ep = &ep->ep_out;
+ if (!musb_ep->desc)
+ break;
+
+ musb_ep_select(mbase, epnum);
+ if (is_in) {
+ csr = musb_readw(regs,
+ MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_FIFONOTEMPTY)
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ csr |= MUSB_TXCSR_P_SENDSTALL
+ | MUSB_TXCSR_CLRDATATOG
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_TXCSR,
+ csr);
+ } else {
+ csr = musb_readw(regs,
+ MUSB_RXCSR);
+ csr |= MUSB_RXCSR_P_SENDSTALL
+ | MUSB_RXCSR_FLUSHFIFO
+ | MUSB_RXCSR_CLRDATATOG
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(regs, MUSB_RXCSR,
+ csr);
+ }
+
+ /* select ep0 again */
+ musb_ep_select(mbase, 0);
+ handled = 1;
+ } break;
+
+ default:
+ /* class, vendor, etc ... delegate */
+ handled = 0;
+ break;
+ }
+ break;
+ default:
+ /* delegate SET_CONFIGURATION, etc */
+ handled = 0;
+ }
+ } else
+ handled = 0;
+ return handled;
+}
+
+/* we have an ep0out data packet
+ * Context: caller holds controller lock
+ */
+static void ep0_rxstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct usb_request *req;
+ u16 tmp;
+
+ req = next_ep0_request(musb);
+
+ /* read packet and ack; or stall because of gadget driver bug:
+ * should have provided the rx buffer before setup() returned.
+ */
+ if (req) {
+ void *buf = req->buf + req->actual;
+ unsigned len = req->length - req->actual;
+
+ /* read the buffer */
+ tmp = musb_readb(regs, MUSB_COUNT0);
+ if (tmp > len) {
+ req->status = -EOVERFLOW;
+ tmp = len;
+ }
+ musb_read_fifo(&musb->endpoints[0], tmp, buf);
+ req->actual += tmp;
+ tmp = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (tmp < 64 || req->actual == req->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ tmp |= MUSB_CSR0_P_DATAEND;
+ } else
+ req = NULL;
+ } else
+ tmp = MUSB_CSR0_P_SVDRXPKTRDY | MUSB_CSR0_P_SENDSTALL;
+
+
+ /* Completion handler may choose to stall, e.g. because the
+ * message just received holds invalid data.
+ */
+ if (req) {
+ musb->ackpend = tmp;
+ musb_g_ep0_giveback(musb, req);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+ musb_ep_select(musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, tmp);
+}
+
+/*
+ * transmitting to the host (IN), this code might be called from IRQ
+ * and from kernel thread.
+ *
+ * Context: caller holds controller lock
+ */
+static void ep0_txstate(struct musb *musb)
+{
+ void __iomem *regs = musb->control_ep->regs;
+ struct usb_request *request = next_ep0_request(musb);
+ u16 csr = MUSB_CSR0_TXPKTRDY;
+ u8 *fifo_src;
+ u8 fifo_count;
+
+ if (!request) {
+ /* WARN_ON(1); */
+ DBG(2, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+ return;
+ }
+
+ /* load the data */
+ fifo_src = (u8 *) request->buf + request->actual;
+ fifo_count = min((unsigned) MUSB_EP0_FIFOSIZE,
+ request->length - request->actual);
+ musb_write_fifo(&musb->endpoints[0], fifo_count, fifo_src);
+ request->actual += fifo_count;
+
+ /* update the flags */
+ if (fifo_count < MUSB_MAX_END0_PACKET
+ || request->actual == request->length) {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSOUT;
+ csr |= MUSB_CSR0_P_DATAEND;
+ } else
+ request = NULL;
+
+ /* report completions as soon as the fifo's loaded; there's no
+ * win in waiting till this last packet gets acked. (other than
+ * very precise fault reporting, needed by USB TMC; possible with
+ * this hardware, but not usable from portable gadget drivers.)
+ */
+ if (request) {
+ musb->ackpend = csr;
+ musb_g_ep0_giveback(musb, request);
+ if (!musb->ackpend)
+ return;
+ musb->ackpend = 0;
+ }
+
+ /* send it out, triggering a "txpktrdy cleared" irq */
+ musb_ep_select(musb->mregs, 0);
+ musb_writew(regs, MUSB_CSR0, csr);
+}
+
+/*
+ * Read a SETUP packet (struct usb_ctrlrequest) from the hardware.
+ * Fields are left in USB byte-order.
+ *
+ * Context: caller holds controller lock.
+ */
+static void
+musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
+{
+ struct usb_request *r;
+ void __iomem *regs = musb->control_ep->regs;
+
+ musb_read_fifo(&musb->endpoints[0], sizeof *req, (u8 *)req);
+
+ /* NOTE: earlier 2.6 versions changed setup packets to host
+ * order, but now USB packets always stay in USB byte order.
+ */
+ DBG(3, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+ req->bRequestType,
+ req->bRequest,
+ le16_to_cpu(req->wValue),
+ le16_to_cpu(req->wIndex),
+ le16_to_cpu(req->wLength));
+
+ /* clean up any leftover transfers */
+ r = next_ep0_request(musb);
+ if (r)
+ musb_g_ep0_giveback(musb, r);
+
+ /* For zero-data requests we want to delay the STATUS stage to
+ * avoid SETUPEND errors. If we read data (OUT), delay accepting
+ * packets until there's a buffer to store them in.
+ *
+ * If we write data, the controller acts happier if we enable
+ * the TX FIFO right away, and give the controller a moment
+ * to switch modes...
+ */
+ musb->set_address = false;
+ musb->ackpend = MUSB_CSR0_P_SVDRXPKTRDY;
+ if (req->wLength == 0) {
+ if (req->bRequestType & USB_DIR_IN)
+ musb->ackpend |= MUSB_CSR0_TXPKTRDY;
+ musb->ep0_state = MUSB_EP0_STAGE_ACKWAIT;
+ } else if (req->bRequestType & USB_DIR_IN) {
+ musb->ep0_state = MUSB_EP0_STAGE_TX;
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDRXPKTRDY);
+ while ((musb_readw(regs, MUSB_CSR0)
+ & MUSB_CSR0_RXPKTRDY) != 0)
+ cpu_relax();
+ musb->ackpend = 0;
+ } else
+ musb->ep0_state = MUSB_EP0_STAGE_RX;
+}
+
+static int
+forward_to_driver(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ int retval;
+ if (!musb->gadget_driver)
+ return -EOPNOTSUPP;
+ spin_unlock(&musb->lock);
+ retval = musb->gadget_driver->setup(&musb->g, ctrlrequest);
+ spin_lock(&musb->lock);
+ return retval;
+}
+
+/*
+ * Handle peripheral ep0 interrupt
+ *
+ * Context: irq handler; we won't re-enter the driver that way.
+ */
+irqreturn_t musb_g_ep0_irq(struct musb *musb)
+{
+ u16 csr;
+ u16 len;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *regs = musb->endpoints[0].regs;
+ irqreturn_t retval = IRQ_NONE;
+
+ musb_ep_select(mbase, 0); /* select ep0 */
+ csr = musb_readw(regs, MUSB_CSR0);
+ len = musb_readb(regs, MUSB_COUNT0);
+
+ DBG(4, "csr %04x, count %d, myaddr %d, ep0stage %s\n",
+ csr, len,
+ musb_readb(mbase, MUSB_FADDR),
+ decode_ep0stage(musb->ep0_state));
+
+ /* I sent a stall.. need to acknowledge it now.. */
+ if (csr & MUSB_CSR0_P_SENTSTALL) {
+ musb_writew(regs, MUSB_CSR0,
+ csr & ~MUSB_CSR0_P_SENTSTALL);
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ csr = musb_readw(regs, MUSB_CSR0);
+ }
+
+ /* request ended "early" */
+ if (csr & MUSB_CSR0_P_SETUPEND) {
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SVDSETUPEND);
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ csr = musb_readw(regs, MUSB_CSR0);
+ /* NOTE: request may need completion */
+ }
+
+ /* docs from Mentor only describe tx, rx, and idle/setup states.
+ * we need to handle nuances around status stages, and also the
+ * case where status and setup stages come back-to-back ...
+ */
+ switch (musb->ep0_state) {
+
+ case MUSB_EP0_STAGE_TX:
+ /* irq on clearing txpktrdy */
+ if ((csr & MUSB_CSR0_TXPKTRDY) == 0) {
+ ep0_txstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_RX:
+ /* irq on set rxpktrdy */
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ ep0_rxstate(musb);
+ retval = IRQ_HANDLED;
+ }
+ break;
+
+ case MUSB_EP0_STAGE_STATUSIN:
+ /* end of sequence #2 (OUT/RX state) or #3 (no data) */
+
+ /* update address (if needed) only @ the end of the
+ * status phase per usb spec, which also guarantees
+ * we get 10 msec to receive this irq... until this
+ * is done we won't see the next packet.
+ */
+ if (musb->set_address) {
+ musb->set_address = false;
+ musb_writeb(mbase, MUSB_FADDR, musb->address);
+ }
+
+ /* enter test mode if needed (exit by reset) */
+ else if (musb->test_mode) {
+ DBG(1, "entering TESTMODE\n");
+
+ if (MUSB_TEST_PACKET == musb->test_mode_nr)
+ musb_load_testpacket(musb);
+
+ musb_writeb(mbase, MUSB_TESTMODE,
+ musb->test_mode_nr);
+ }
+ /* FALLTHROUGH */
+
+ case MUSB_EP0_STAGE_STATUSOUT:
+ /* end of sequence #1: write to host (TX state) */
+ {
+ struct usb_request *req;
+
+ req = next_ep0_request(musb);
+ if (req)
+ musb_g_ep0_giveback(musb, req);
+ }
+ retval = IRQ_HANDLED;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ /* FALLTHROUGH */
+
+ case MUSB_EP0_STAGE_SETUP:
+ if (csr & MUSB_CSR0_RXPKTRDY) {
+ struct usb_ctrlrequest setup;
+ int handled = 0;
+
+ if (len != 8) {
+ ERR("SETUP packet len %d != 8 ?\n", len);
+ break;
+ }
+ musb_read_setup(musb, &setup);
+ retval = IRQ_HANDLED;
+
+ /* sometimes the RESET won't be reported */
+ if (unlikely(musb->g.speed == USB_SPEED_UNKNOWN)) {
+ u8 power;
+
+ printk(KERN_NOTICE "%s: peripheral reset "
+ "irq lost!\n",
+ musb_driver_name);
+ power = musb_readb(mbase, MUSB_POWER);
+ musb->g.speed = (power & MUSB_POWER_HSMODE)
+ ? USB_SPEED_HIGH : USB_SPEED_FULL;
+
+ }
+
+ switch (musb->ep0_state) {
+
+ /* sequence #3 (no data stage), includes requests
+ * we can't forward (notably SET_ADDRESS and the
+ * device/endpoint feature set/clear operations)
+ * plus SET_CONFIGURATION and others we must
+ */
+ case MUSB_EP0_STAGE_ACKWAIT:
+ handled = service_zero_data_request(
+ musb, &setup);
+
+ /* status stage might be immediate */
+ if (handled > 0) {
+ musb->ackpend |= MUSB_CSR0_P_DATAEND;
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSIN;
+ }
+ break;
+
+ /* sequence #1 (IN to host), includes GET_STATUS
+ * requests that we can't forward, GET_DESCRIPTOR
+ * and others that we must
+ */
+ case MUSB_EP0_STAGE_TX:
+ handled = service_in_request(musb, &setup);
+ if (handled > 0) {
+ musb->ackpend = MUSB_CSR0_TXPKTRDY
+ | MUSB_CSR0_P_DATAEND;
+ musb->ep0_state =
+ MUSB_EP0_STAGE_STATUSOUT;
+ }
+ break;
+
+ /* sequence #2 (OUT from host), always forward */
+ default: /* MUSB_EP0_STAGE_RX */
+ break;
+ }
+
+ DBG(3, "handled %d, csr %04x, ep0stage %s\n",
+ handled, csr,
+ decode_ep0stage(musb->ep0_state));
+
+ /* unless we need to delegate this to the gadget
+ * driver, we know how to wrap this up: csr0 has
+ * not yet been written.
+ */
+ if (handled < 0)
+ goto stall;
+ else if (handled > 0)
+ goto finish;
+
+ handled = forward_to_driver(musb, &setup);
+ if (handled < 0) {
+ musb_ep_select(mbase, 0);
+stall:
+ DBG(3, "stall (%d)\n", handled);
+ musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+finish:
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend);
+ musb->ackpend = 0;
+ }
+ }
+ break;
+
+ case MUSB_EP0_STAGE_ACKWAIT:
+ /* This should not happen. But happens with tusb6010 with
+ * g_file_storage and high speed. Do nothing.
+ */
+ retval = IRQ_HANDLED;
+ break;
+
+ default:
+ /* "can't happen" */
+ WARN_ON(1);
+ musb_writew(regs, MUSB_CSR0, MUSB_CSR0_P_SENDSTALL);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ break;
+ }
+
+ return retval;
+}
+
+
+static int
+musb_g_ep0_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_disable(struct usb_ep *e)
+{
+ /* always enabled */
+ return -EINVAL;
+}
+
+static int
+musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
+{
+ struct musb_ep *ep;
+ struct musb_request *req;
+ struct musb *musb;
+ int status;
+ unsigned long lockflags;
+ void __iomem *regs;
+
+ if (!e || !r)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ regs = musb->control_ep->regs;
+
+ req = to_musb_request(r);
+ req->musb = musb;
+ req->request.actual = 0;
+ req->request.status = -EINPROGRESS;
+ req->tx = ep->is_in;
+
+ spin_lock_irqsave(&musb->lock, lockflags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ switch (musb->ep0_state) {
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* zero-length data */
+ status = 0;
+ break;
+ default:
+ DBG(1, "ep0 request queued in state %d\n",
+ musb->ep0_state);
+ status = -EINVAL;
+ goto cleanup;
+ }
+
+ /* add request to the list */
+ list_add_tail(&(req->request.list), &(ep->req_list));
+
+ DBG(3, "queue to %s (%s), length=%d\n",
+ ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
+ req->request.length);
+
+ musb_ep_select(musb->mregs, 0);
+
+ /* sequence #1, IN ... start writing the data */
+ if (musb->ep0_state == MUSB_EP0_STAGE_TX)
+ ep0_txstate(musb);
+
+ /* sequence #3, no-data ... issue IN status */
+ else if (musb->ep0_state == MUSB_EP0_STAGE_ACKWAIT) {
+ if (req->request.length)
+ status = -EINVAL;
+ else {
+ musb->ep0_state = MUSB_EP0_STAGE_STATUSIN;
+ musb_writew(regs, MUSB_CSR0,
+ musb->ackpend | MUSB_CSR0_P_DATAEND);
+ musb->ackpend = 0;
+ musb_g_ep0_giveback(ep->musb, r);
+ }
+
+ /* else for sequence #2 (OUT), caller provides a buffer
+ * before the next packet arrives. deferred responses
+ * (after SETUP is acked) are racey.
+ */
+ } else if (musb->ackpend) {
+ musb_writew(regs, MUSB_CSR0, musb->ackpend);
+ musb->ackpend = 0;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, lockflags);
+ return status;
+}
+
+static int musb_g_ep0_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ /* we just won't support this */
+ return -EINVAL;
+}
+
+static int musb_g_ep0_halt(struct usb_ep *e, int value)
+{
+ struct musb_ep *ep;
+ struct musb *musb;
+ void __iomem *base, *regs;
+ unsigned long flags;
+ int status;
+ u16 csr;
+
+ if (!e || !value)
+ return -EINVAL;
+
+ ep = to_musb_ep(e);
+ musb = ep->musb;
+ base = musb->mregs;
+ regs = musb->control_ep->regs;
+ status = 0;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (!list_empty(&ep->req_list)) {
+ status = -EBUSY;
+ goto cleanup;
+ }
+
+ musb_ep_select(base, 0);
+ csr = musb->ackpend;
+
+ switch (musb->ep0_state) {
+
+ /* Stalls are usually issued after parsing SETUP packet, either
+ * directly in irq context from setup() or else later.
+ */
+ case MUSB_EP0_STAGE_TX: /* control-IN data */
+ case MUSB_EP0_STAGE_ACKWAIT: /* STALL for zero-length data */
+ case MUSB_EP0_STAGE_RX: /* control-OUT data */
+ csr = musb_readw(regs, MUSB_CSR0);
+ /* FALLTHROUGH */
+
+ /* It's also OK to issue stalls during callbacks when a non-empty
+ * DATA stage buffer has been read (or even written).
+ */
+ case MUSB_EP0_STAGE_STATUSIN: /* control-OUT status */
+ case MUSB_EP0_STAGE_STATUSOUT: /* control-IN status */
+
+ csr |= MUSB_CSR0_P_SENDSTALL;
+ musb_writew(regs, MUSB_CSR0, csr);
+ musb->ep0_state = MUSB_EP0_STAGE_SETUP;
+ musb->ackpend = 0;
+ break;
+ default:
+ DBG(1, "ep0 can't halt in state %d\n", musb->ep0_state);
+ status = -EINVAL;
+ }
+
+cleanup:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return status;
+}
+
+const struct usb_ep_ops musb_g_ep0_ops = {
+ .enable = musb_g_ep0_enable,
+ .disable = musb_g_ep0_disable,
+ .alloc_request = musb_alloc_request,
+ .free_request = musb_free_request,
+ .queue = musb_g_ep0_queue,
+ .dequeue = musb_g_ep0_dequeue,
+ .set_halt = musb_g_ep0_halt,
+};
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
new file mode 100644
index 000000000000..8b4be012669a
--- /dev/null
+++ b/drivers/usb/musb/musb_host.c
@@ -0,0 +1,2170 @@
+/*
+ * MUSB OTG driver host support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include "musb_core.h"
+#include "musb_host.h"
+
+
+/* MUSB HOST status 22-mar-2006
+ *
+ * - There's still lots of partial code duplication for fault paths, so
+ * they aren't handled as consistently as they need to be.
+ *
+ * - PIO mostly behaved when last tested.
+ * + including ep0, with all usbtest cases 9, 10
+ * + usbtest 14 (ep0out) doesn't seem to run at all
+ * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
+ * configurations, but otherwise double buffering passes basic tests.
+ * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
+ *
+ * - DMA (CPPI) ... partially behaves, not currently recommended
+ * + about 1/15 the speed of typical EHCI implementations (PCI)
+ * + RX, all too often reqpkt seems to misbehave after tx
+ * + TX, no known issues (other than evident silicon issue)
+ *
+ * - DMA (Mentor/OMAP) ...has at least toggle update problems
+ *
+ * - Still no traffic scheduling code to make NAKing for bulk or control
+ * transfers unable to starve other requests; or to make efficient use
+ * of hardware with periodic transfers. (Note that network drivers
+ * commonly post bulk reads that stay pending for a long time; these
+ * would make very visible trouble.)
+ *
+ * - Not tested with HNP, but some SRP paths seem to behave.
+ *
+ * NOTE 24-August-2006:
+ *
+ * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
+ * extra endpoint for periodic use enabling hub + keybd + mouse. That
+ * mostly works, except that with "usbnet" it's easy to trigger cases
+ * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
+ * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
+ * although ARP RX wins. (That test was done with a full speed link.)
+ */
+
+
+/*
+ * NOTE on endpoint usage:
+ *
+ * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
+ * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
+ *
+ * (Yes, bulk _could_ use more of the endpoints than that, and would even
+ * benefit from it ... one remote device may easily be NAKing while others
+ * need to perform transfers in that same direction. The same thing could
+ * be done in software though, assuming dma cooperates.)
+ *
+ * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
+ * So far that scheduling is both dumb and optimistic: the endpoint will be
+ * "claimed" until its software queue is no longer refilled. No multiplexing
+ * of transfers between endpoints, or anything clever.
+ */
+
+
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int nOut,
+ u8 *buf, u32 len);
+
+/*
+ * Clear TX fifo. Needed to avoid BABBLE errors.
+ */
+static inline void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
+{
+ void __iomem *epio = ep->regs;
+ u16 csr;
+ int retries = 1000;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+ while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
+ DBG(5, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (retries-- < 1) {
+ ERR("Could not flush host TX fifo: csr: %04x\n", csr);
+ return;
+ }
+ mdelay(1);
+ }
+}
+
+/*
+ * Start transmit. Caller is responsible for locking shared resources.
+ * musb must be locked.
+ */
+static inline void musb_h_tx_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ if (ep->epnum) {
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+ } else {
+ txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
+ musb_writew(ep->regs, MUSB_CSR0, txcsr);
+ }
+
+}
+
+static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
+{
+ u16 txcsr;
+
+ /* NOTE: no locks here; caller should lock and select EP */
+ txcsr = musb_readw(ep->regs, MUSB_TXCSR);
+ txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
+ musb_writew(ep->regs, MUSB_TXCSR, txcsr);
+}
+
+/*
+ * Start the URB at the front of an endpoint's queue
+ * end must be claimed from the caller.
+ *
+ * Context: controller locked, irqs blocked
+ */
+static void
+musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
+{
+ u16 frame;
+ u32 len;
+ void *buf;
+ void __iomem *mbase = musb->mregs;
+ struct urb *urb = next_urb(qh);
+ struct musb_hw_ep *hw_ep = qh->hw_ep;
+ unsigned pipe = urb->pipe;
+ u8 address = usb_pipedevice(pipe);
+ int epnum = hw_ep->epnum;
+
+ /* initialize software qh state */
+ qh->offset = 0;
+ qh->segsize = 0;
+
+ /* gather right source of data */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ /* control transfers always start with SETUP */
+ is_in = 0;
+ hw_ep->out_qh = qh;
+ musb->ep0_stage = MUSB_EP0_START;
+ buf = urb->setup_packet;
+ len = 8;
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ qh->iso_idx = 0;
+ qh->frame = 0;
+ buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
+ len = urb->iso_frame_desc[0].length;
+ break;
+ default: /* bulk, interrupt */
+ buf = urb->transfer_buffer;
+ len = urb->transfer_buffer_length;
+ }
+
+ DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
+ qh, urb, address, qh->epnum,
+ is_in ? "in" : "out",
+ ({char *s; switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
+ case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
+ default: s = "-intr"; break;
+ }; s; }),
+ epnum, buf, len);
+
+ /* Configure endpoint */
+ if (is_in || hw_ep->is_shared_fifo)
+ hw_ep->in_qh = qh;
+ else
+ hw_ep->out_qh = qh;
+ musb_ep_program(musb, epnum, urb, !is_in, buf, len);
+
+ /* transmit may have more work: start it when it is time */
+ if (is_in)
+ return;
+
+ /* determine if the time is right for a periodic transfer */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ DBG(3, "check whether there's still time for periodic Tx\n");
+ qh->iso_idx = 0;
+ frame = musb_readw(mbase, MUSB_FRAME);
+ /* FIXME this doesn't implement that scheduling policy ...
+ * or handle framecounter wrapping
+ */
+ if ((urb->transfer_flags & URB_ISO_ASAP)
+ || (frame >= urb->start_frame)) {
+ /* REVISIT the SOF irq handler shouldn't duplicate
+ * this code; and we don't init urb->start_frame...
+ */
+ qh->frame = 0;
+ goto start;
+ } else {
+ qh->frame = urb->start_frame;
+ /* enable SOF interrupt so we can count down */
+ DBG(1, "SOF for %d\n", epnum);
+#if 1 /* ifndef CONFIG_ARCH_DAVINCI */
+ musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
+#endif
+ }
+ break;
+ default:
+start:
+ DBG(4, "Start TX%d %s\n", epnum,
+ hw_ep->tx_channel ? "dma" : "pio");
+
+ if (!hw_ep->tx_channel)
+ musb_h_tx_start(hw_ep);
+ else if (is_cppi_enabled() || tusb_dma_omap())
+ cppi_host_txdma_start(hw_ep);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static void
+__musb_giveback(struct musb *musb, struct urb *urb, int status)
+__releases(musb->lock)
+__acquires(musb->lock)
+{
+ DBG(({ int level; switch (urb->status) {
+ case 0:
+ level = 4;
+ break;
+ /* common/boring faults */
+ case -EREMOTEIO:
+ case -ESHUTDOWN:
+ case -ECONNRESET:
+ case -EPIPE:
+ level = 3;
+ break;
+ default:
+ level = 2;
+ break;
+ }; level; }),
+ "complete %p (%d), dev%d ep%d%s, %d/%d\n",
+ urb, urb->status,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->actual_length, urb->transfer_buffer_length
+ );
+
+ spin_unlock(&musb->lock);
+ usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
+ spin_lock(&musb->lock);
+}
+
+/* for bulk/interrupt endpoints only */
+static inline void
+musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
+{
+ struct usb_device *udev = urb->dev;
+ u16 csr;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *qh;
+
+ /* FIXME: the current Mentor DMA code seems to have
+ * problems getting toggle correct.
+ */
+
+ if (is_in || ep->is_shared_fifo)
+ qh = ep->in_qh;
+ else
+ qh = ep->out_qh;
+
+ if (!is_in) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ usb_settoggle(udev, qh->epnum, 1,
+ (csr & MUSB_TXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ usb_settoggle(udev, qh->epnum, 0,
+ (csr & MUSB_RXCSR_H_DATATOGGLE)
+ ? 1 : 0);
+ }
+}
+
+/* caller owns controller lock, irqs are blocked */
+static struct musb_qh *
+musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
+{
+ int is_in;
+ struct musb_hw_ep *ep = qh->hw_ep;
+ struct musb *musb = ep->musb;
+ int ready = qh->is_ready;
+
+ if (ep->is_shared_fifo)
+ is_in = 1;
+ else
+ is_in = usb_pipein(urb->pipe);
+
+ /* save toggle eagerly, for paranoia */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ musb_save_toggle(ep, is_in, urb);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (status == 0 && urb->error_count)
+ status = -EXDEV;
+ break;
+ }
+
+ usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
+
+ qh->is_ready = 0;
+ __musb_giveback(musb, urb, status);
+ qh->is_ready = ready;
+
+ /* reclaim resources (and bandwidth) ASAP; deschedule it, and
+ * invalidate qh as soon as list_empty(&hep->urb_list)
+ */
+ if (list_empty(&qh->hep->urb_list)) {
+ struct list_head *head;
+
+ if (is_in)
+ ep->rx_reinit = 1;
+ else
+ ep->tx_reinit = 1;
+
+ /* clobber old pointers to this qh */
+ if (is_in || ep->is_shared_fifo)
+ ep->in_qh = NULL;
+ else
+ ep->out_qh = NULL;
+ qh->hep->hcpriv = NULL;
+
+ switch (qh->type) {
+
+ case USB_ENDPOINT_XFER_ISOC:
+ case USB_ENDPOINT_XFER_INT:
+ /* this is where periodic bandwidth should be
+ * de-allocated if it's tracked and allocated;
+ * and where we'd update the schedule tree...
+ */
+ musb->periodic[ep->epnum] = NULL;
+ kfree(qh);
+ qh = NULL;
+ break;
+
+ case USB_ENDPOINT_XFER_CONTROL:
+ case USB_ENDPOINT_XFER_BULK:
+ /* fifo policy for these lists, except that NAKing
+ * should rotate a qh to the end (for fairness).
+ */
+ head = qh->ring.prev;
+ list_del(&qh->ring);
+ kfree(qh);
+ qh = first_qh(head);
+ break;
+ }
+ }
+ return qh;
+}
+
+/*
+ * Advance this hardware endpoint's queue, completing the specified urb and
+ * advancing to either the next urb queued to that qh, or else invalidating
+ * that qh and advancing to the next qh scheduled after the current one.
+ *
+ * Context: caller owns controller lock, irqs are blocked
+ */
+static void
+musb_advance_schedule(struct musb *musb, struct urb *urb,
+ struct musb_hw_ep *hw_ep, int is_in)
+{
+ struct musb_qh *qh;
+
+ if (is_in || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ if (urb->status == -EINPROGRESS)
+ qh = musb_giveback(qh, urb, 0);
+ else
+ qh = musb_giveback(qh, urb, urb->status);
+
+ if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+ DBG(4, "... next ep%d %cX urb %p\n",
+ hw_ep->epnum, is_in ? 'R' : 'T',
+ next_urb(qh));
+ musb_start_urb(musb, is_in, qh);
+ }
+}
+
+static inline u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
+{
+ /* we don't want fifo to fill itself again;
+ * ignore dma (various models),
+ * leave toggle alone (may not have been saved yet)
+ */
+ csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
+ csr &= ~(MUSB_RXCSR_H_REQPKT
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR);
+
+ /* write 2x to allow double buffering */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+
+ /* flush writebuffer */
+ return musb_readw(hw_ep->regs, MUSB_RXCSR);
+}
+
+/*
+ * PIO RX for a packet (or part of it).
+ */
+static bool
+musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
+{
+ u16 rx_count;
+ u8 *buf;
+ u16 csr;
+ bool done = false;
+ u32 length;
+ int do_flush = 0;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ int pipe = urb->pipe;
+ void *buffer = urb->transfer_buffer;
+
+ /* musb_ep_select(mbase, epnum); */
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+ DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+ urb->transfer_buffer, qh->offset,
+ urb->transfer_buffer_length);
+
+ /* unload FIFO */
+ if (usb_pipeisoc(pipe)) {
+ int status = 0;
+ struct usb_iso_packet_descriptor *d;
+
+ if (iso_err) {
+ status = -EILSEQ;
+ urb->error_count++;
+ }
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ buf = buffer + d->offset;
+ length = d->length;
+ if (rx_count > length) {
+ if (status == 0) {
+ status = -EOVERFLOW;
+ urb->error_count++;
+ }
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ d->actual_length = length;
+
+ d->status = status;
+
+ /* see if we are done */
+ done = (++qh->iso_idx >= urb->number_of_packets);
+ } else {
+ /* non-isoch */
+ buf = buffer + qh->offset;
+ length = urb->transfer_buffer_length - qh->offset;
+ if (rx_count > length) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = -EOVERFLOW;
+ DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
+ do_flush = 1;
+ } else
+ length = rx_count;
+ urb->actual_length += length;
+ qh->offset += length;
+
+ /* see if we are done */
+ done = (urb->actual_length == urb->transfer_buffer_length)
+ || (rx_count < qh->maxpacket)
+ || (urb->status != -EINPROGRESS);
+ if (done
+ && (urb->status == -EINPROGRESS)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (urb->actual_length
+ < urb->transfer_buffer_length))
+ urb->status = -EREMOTEIO;
+ }
+
+ musb_read_fifo(hw_ep, length, buf);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_WZC_BITS;
+ if (unlikely(do_flush))
+ musb_h_flush_rxfifo(hw_ep, csr);
+ else {
+ /* REVISIT this assumes AUTOCLEAR is never set */
+ csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
+ if (!done)
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+
+ return done;
+}
+
+/* we don't always need to reinit a given side of an endpoint...
+ * when we do, use tx/rx reinit routine and then construct a new CSR
+ * to address data toggle, NYET, and DMA or PIO.
+ *
+ * it's possible that driver bugs (especially for DMA) or aborting a
+ * transfer might have left the endpoint busier than it should be.
+ * the busy/not-empty tests are basically paranoia.
+ */
+static void
+musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+{
+ u16 csr;
+
+ /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
+ * That always uses tx_reinit since ep0 repurposes TX register
+ * offsets; the initial SETUP packet is also a kind of OUT.
+ */
+
+ /* if programmed for Tx, put it in RX mode */
+ if (ep->is_shared_fifo) {
+ csr = musb_readw(ep->regs, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_MODE) {
+ musb_h_tx_flush_fifo(ep);
+ musb_writew(ep->regs, MUSB_TXCSR,
+ MUSB_TXCSR_FRCDATATOG);
+ }
+ /* clear mode (and everything else) to enable Rx */
+ musb_writew(ep->regs, MUSB_TXCSR, 0);
+
+ /* scrub all previous state, clearing toggle */
+ } else {
+ csr = musb_readw(ep->regs, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY)
+ WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+ musb_readw(ep->regs, MUSB_RXCOUNT));
+
+ musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(ep->target_regs, MUSB_RXFUNCADDR,
+ qh->addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBADDR,
+ qh->h_addr_reg);
+ musb_writeb(ep->target_regs, MUSB_RXHUBPORT,
+ qh->h_port_reg);
+ } else
+ musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint, interval/NAKlimit, i/o size */
+ musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
+ musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
+ /* NOTE: bulk combining rewrites high bits of maxpacket */
+ musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
+
+ ep->rx_reinit = 0;
+}
+
+
+/*
+ * Program an HDRC endpoint as per the given URB
+ * Context: irqs blocked, controller lock held
+ */
+static void musb_ep_program(struct musb *musb, u8 epnum,
+ struct urb *urb, unsigned int is_out,
+ u8 *buf, u32 len)
+{
+ struct dma_controller *dma_controller;
+ struct dma_channel *dma_channel;
+ u8 dma_ok;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh;
+ u16 packet_sz;
+
+ if (!is_out || hw_ep->is_shared_fifo)
+ qh = hw_ep->in_qh;
+ else
+ qh = hw_ep->out_qh;
+
+ packet_sz = qh->maxpacket;
+
+ DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
+ "h_addr%02x h_port%02x bytes %d\n",
+ is_out ? "-->" : "<--",
+ epnum, urb, urb->dev->speed,
+ qh->addr_reg, qh->epnum, is_out ? "out" : "in",
+ qh->h_addr_reg, qh->h_port_reg,
+ len);
+
+ musb_ep_select(mbase, epnum);
+
+ /* candidate for DMA? */
+ dma_controller = musb->dma_controller;
+ if (is_dma_capable() && epnum && dma_controller) {
+ dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
+ if (!dma_channel) {
+ dma_channel = dma_controller->channel_alloc(
+ dma_controller, hw_ep, is_out);
+ if (is_out)
+ hw_ep->tx_channel = dma_channel;
+ else
+ hw_ep->rx_channel = dma_channel;
+ }
+ } else
+ dma_channel = NULL;
+
+ /* make sure we clear DMAEnab, autoSet bits from previous run */
+
+ /* OUT/transmit/EP0 or IN/receive? */
+ if (is_out) {
+ u16 csr;
+ u16 int_txe;
+ u16 load_count;
+
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* disable interrupt in case we flush */
+ int_txe = musb_readw(mbase, MUSB_INTRTXE);
+ musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
+
+ /* general endpoint setup */
+ if (epnum) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* flush all old state, set default */
+ musb_h_tx_flush_fifo(hw_ep);
+ csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_FRCDATATOG
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY
+ );
+ csr |= MUSB_TXCSR_MODE;
+
+ if (usb_gettoggle(urb->dev,
+ qh->epnum, 1))
+ csr |= MUSB_TXCSR_H_WR_DATATOGGLE
+ | MUSB_TXCSR_H_DATATOGGLE;
+ else
+ csr |= MUSB_TXCSR_CLRDATATOG;
+
+ /* twice in case of double packet buffering */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ } else {
+ /* endpoint 0: just flush */
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0,
+ csr | MUSB_CSR0_FLUSHFIFO);
+ }
+
+ /* target addr and (for multipoint) hub addr/port */
+ if (musb->is_multipoint) {
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR),
+ qh->addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR),
+ qh->h_addr_reg);
+ musb_writeb(mbase,
+ MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT),
+ qh->h_port_reg);
+/* FIXME if !epnum, do the same for RX ... */
+ } else
+ musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
+
+ /* protocol/endpoint/interval/NAKlimit */
+ if (epnum) {
+ musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
+ if (can_bulk_split(musb, qh->type))
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz
+ | ((hw_ep->max_packet_sz_tx /
+ packet_sz) - 1) << 11);
+ else
+ musb_writew(epio, MUSB_TXMAXP,
+ packet_sz);
+ musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
+ } else {
+ musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
+ if (musb->is_multipoint)
+ musb_writeb(epio, MUSB_TYPE0,
+ qh->type_reg);
+ }
+
+ if (can_bulk_split(musb, qh->type))
+ load_count = min((u32) hw_ep->max_packet_sz_tx,
+ len);
+ else
+ load_count = min((u32) packet_sz, len);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma_channel) {
+
+ /* clear previous state */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ qh->segsize = min(len, dma_channel->max_len);
+
+ if (qh->segsize <= packet_sz)
+ dma_channel->desired_mode = 0;
+ else
+ dma_channel->desired_mode = 1;
+
+
+ if (dma_channel->desired_mode == 0) {
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE);
+ csr |= (MUSB_TXCSR_DMAENAB);
+ /* against programming guide */
+ } else
+ csr |= (MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE);
+
+ musb_writew(epio, MUSB_TXCSR, csr);
+
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ dma_channel->desired_mode,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ if (is_out)
+ hw_ep->tx_channel = NULL;
+ else
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ }
+ }
+#endif
+
+ /* candidate for DMA */
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+
+ /* program endpoint CSRs first, then setup DMA.
+ * assume CPPI setup succeeds.
+ * defer enabling dma.
+ */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_DMAENAB);
+ csr |= MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR,
+ csr | MUSB_TXCSR_MODE);
+
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* TX uses "rndis" mode automatically, but needs help
+ * to identify the zero-length-final-packet case.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ (urb->transfer_flags
+ & URB_ZERO_PACKET)
+ == URB_ZERO_PACKET,
+ urb->transfer_dma,
+ qh->segsize);
+ if (dma_ok) {
+ load_count = 0;
+ } else {
+ dma_controller->channel_release(dma_channel);
+ hw_ep->tx_channel = NULL;
+ dma_channel = NULL;
+
+ /* REVISIT there's an error path here that
+ * needs handling: can't do dma, but
+ * there's no pio buffer address...
+ */
+ }
+ }
+
+ if (load_count) {
+ /* ASSERT: TXCSR_DMAENAB was already cleared */
+
+ /* PIO to load FIFO */
+ qh->segsize = load_count;
+ musb_write_fifo(hw_ep, load_count, buf);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE
+ | MUSB_TXCSR_AUTOSET);
+ /* write CSR */
+ csr |= MUSB_TXCSR_MODE;
+
+ if (epnum)
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+
+ /* re-enable interrupt */
+ musb_writew(mbase, MUSB_INTRTXE, int_txe);
+
+ /* IN/receive */
+ } else {
+ u16 csr;
+
+ if (hw_ep->rx_reinit) {
+ musb_rx_reinit(musb, qh, hw_ep);
+
+ /* init new state: toggle and NYET, maybe DMA later */
+ if (usb_gettoggle(urb->dev, qh->epnum, 0))
+ csr = MUSB_RXCSR_H_WR_DATATOGGLE
+ | MUSB_RXCSR_H_DATATOGGLE;
+ else
+ csr = 0;
+ if (qh->type == USB_ENDPOINT_XFER_INT)
+ csr |= MUSB_RXCSR_DISNYET;
+
+ } else {
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+
+ if (csr & (MUSB_RXCSR_RXPKTRDY
+ | MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_REQPKT))
+ ERR("broken !rx_reinit, ep%d csr %04x\n",
+ hw_ep->epnum, csr);
+
+ /* scrub any stale state, leaving toggle alone */
+ csr &= MUSB_RXCSR_DISNYET;
+ }
+
+ /* kick things off */
+
+ if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
+ /* candidate for DMA */
+ if (dma_channel) {
+ dma_channel->actual_len = 0L;
+ qh->segsize = len;
+
+ /* AUTOREQ is in a DMA register */
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs,
+ MUSB_RXCSR);
+
+ /* unless caller treats short rx transfers as
+ * errors, we dare not queue multiple transfers.
+ */
+ dma_ok = dma_controller->channel_program(
+ dma_channel, packet_sz,
+ !(urb->transfer_flags
+ & URB_SHORT_NOT_OK),
+ urb->transfer_dma,
+ qh->segsize);
+ if (!dma_ok) {
+ dma_controller->channel_release(
+ dma_channel);
+ hw_ep->rx_channel = NULL;
+ dma_channel = NULL;
+ } else
+ csr |= MUSB_RXCSR_DMAENAB;
+ }
+ }
+
+ csr |= MUSB_RXCSR_H_REQPKT;
+ DBG(7, "RXCSR%d := %04x\n", epnum, csr);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ }
+}
+
+
+/*
+ * Service the default endpoint (ep0) as host.
+ * Return true until it's time to start the status stage.
+ */
+static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
+{
+ bool more = false;
+ u8 *fifo_dest = NULL;
+ u16 fifo_count = 0;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ struct musb_qh *qh = hw_ep->in_qh;
+ struct usb_ctrlrequest *request;
+
+ switch (musb->ep0_stage) {
+ case MUSB_EP0_IN:
+ fifo_dest = urb->transfer_buffer + urb->actual_length;
+ fifo_count = min(len, ((u16) (urb->transfer_buffer_length
+ - urb->actual_length)));
+ if (fifo_count < len)
+ urb->status = -EOVERFLOW;
+
+ musb_read_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ if (len < qh->maxpacket) {
+ /* always terminate on short read; it's
+ * rarely reported as an error.
+ */
+ } else if (urb->actual_length <
+ urb->transfer_buffer_length)
+ more = true;
+ break;
+ case MUSB_EP0_START:
+ request = (struct usb_ctrlrequest *) urb->setup_packet;
+
+ if (!request->wLength) {
+ DBG(4, "start no-DATA\n");
+ break;
+ } else if (request->bRequestType & USB_DIR_IN) {
+ DBG(4, "start IN-DATA\n");
+ musb->ep0_stage = MUSB_EP0_IN;
+ more = true;
+ break;
+ } else {
+ DBG(4, "start OUT-DATA\n");
+ musb->ep0_stage = MUSB_EP0_OUT;
+ more = true;
+ }
+ /* FALLTHROUGH */
+ case MUSB_EP0_OUT:
+ fifo_count = min(qh->maxpacket, ((u16)
+ (urb->transfer_buffer_length
+ - urb->actual_length)));
+
+ if (fifo_count) {
+ fifo_dest = (u8 *) (urb->transfer_buffer
+ + urb->actual_length);
+ DBG(3, "Sending %d bytes to %p\n",
+ fifo_count, fifo_dest);
+ musb_write_fifo(hw_ep, fifo_count, fifo_dest);
+
+ urb->actual_length += fifo_count;
+ more = true;
+ }
+ break;
+ default:
+ ERR("bogus ep0 stage %d\n", musb->ep0_stage);
+ break;
+ }
+
+ return more;
+}
+
+/*
+ * Handle default endpoint interrupt as host. Only called in IRQ time
+ * from the LinuxIsr() interrupt service routine.
+ *
+ * called with controller irqlocked
+ */
+irqreturn_t musb_h_ep0_irq(struct musb *musb)
+{
+ struct urb *urb;
+ u16 csr, len;
+ int status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct musb_hw_ep *hw_ep = musb->control_ep;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ bool complete = false;
+ irqreturn_t retval = IRQ_NONE;
+
+ /* ep0 only has one queue, "in" */
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, 0);
+ csr = musb_readw(epio, MUSB_CSR0);
+ len = (csr & MUSB_CSR0_RXPKTRDY)
+ ? musb_readb(epio, MUSB_COUNT0)
+ : 0;
+
+ DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+ csr, qh, len, urb, musb->ep0_stage);
+
+ /* if we just did status stage, we are done */
+ if (MUSB_EP0_STATUS == musb->ep0_stage) {
+ retval = IRQ_HANDLED;
+ complete = true;
+ }
+
+ /* prepare status */
+ if (csr & MUSB_CSR0_H_RXSTALL) {
+ DBG(6, "STALLING ENDPOINT\n");
+ status = -EPIPE;
+
+ } else if (csr & MUSB_CSR0_H_ERROR) {
+ DBG(2, "no response, csr0 %04x\n", csr);
+ status = -EPROTO;
+
+ } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
+ DBG(2, "control NAK timeout\n");
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * control transfer, if another one is queued, so that
+ * ep0 is more likely to stay busy.
+ *
+ * if (qh->ring.next != &musb->control), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_writew(epio, MUSB_CSR0, 0);
+ retval = IRQ_HANDLED;
+ }
+
+ if (status) {
+ DBG(6, "aborting\n");
+ retval = IRQ_HANDLED;
+ if (urb)
+ urb->status = status;
+ complete = true;
+
+ /* use the proper sequence to abort the transfer */
+ if (csr & MUSB_CSR0_H_REQPKT) {
+ csr &= ~MUSB_CSR0_H_REQPKT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ } else {
+ csr |= MUSB_CSR0_FLUSHFIFO;
+ musb_writew(epio, MUSB_CSR0, csr);
+ musb_writew(epio, MUSB_CSR0, csr);
+ csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_CSR0, csr);
+ }
+
+ musb_writeb(epio, MUSB_NAKLIMIT0, 0);
+
+ /* clear it */
+ musb_writew(epio, MUSB_CSR0, 0);
+ }
+
+ if (unlikely(!urb)) {
+ /* stop endpoint since we have no place for its data, this
+ * SHOULD NEVER HAPPEN! */
+ ERR("no URB for end 0\n");
+
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
+ musb_writew(epio, MUSB_CSR0, 0);
+
+ goto done;
+ }
+
+ if (!complete) {
+ /* call common logic and prepare response */
+ if (musb_h_ep0_continue(musb, len, urb)) {
+ /* more packets required */
+ csr = (MUSB_EP0_IN == musb->ep0_stage)
+ ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
+ } else {
+ /* data transfer complete; perform status phase */
+ if (usb_pipeout(urb->pipe)
+ || !urb->transfer_buffer_length)
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_H_REQPKT;
+ else
+ csr = MUSB_CSR0_H_STATUSPKT
+ | MUSB_CSR0_TXPKTRDY;
+
+ /* flag status stage */
+ musb->ep0_stage = MUSB_EP0_STATUS;
+
+ DBG(5, "ep0 STATUS, csr %04x\n", csr);
+
+ }
+ musb_writew(epio, MUSB_CSR0, csr);
+ retval = IRQ_HANDLED;
+ } else
+ musb->ep0_stage = MUSB_EP0_IDLE;
+
+ /* call completion handler if done */
+ if (complete)
+ musb_advance_schedule(musb, urb, hw_ep, 1);
+done:
+ return retval;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side TX (OUT) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, Program Endpoint
+ - ... which starts DMA to fifo in mode 1 or 0
+
+ DMA Isr (transfer complete) -> TxAvail()
+ - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
+ only in musb_cleanup_urb)
+ - TxPktRdy has to be set in mode 0 or for
+ short packets in mode 1.
+*/
+
+#endif
+
+/* Service a Tx-Available or dma completion irq for the endpoint */
+void musb_host_tx(struct musb *musb, u8 epnum)
+{
+ int pipe;
+ bool done = false;
+ u16 tx_csr;
+ size_t wLength = 0;
+ u8 *buf = NULL;
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->out_qh;
+ u32 status = 0;
+ void __iomem *mbase = musb->mregs;
+ struct dma_channel *dma;
+
+ urb = next_urb(qh);
+
+ musb_ep_select(mbase, epnum);
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+
+ /* with CPPI, DMA sometimes triggers "extra" irqs */
+ if (!urb) {
+ DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ goto finish;
+ }
+
+ pipe = urb->pipe;
+ dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
+ DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+ dma ? ", dma" : "");
+
+ /* check for errors */
+ if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
+ /* dma was disabled, fifo flushed */
+ DBG(3, "TX end %d stall\n", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
+ /* (NON-ISO) dma was disabled, fifo flushed */
+ DBG(3, "TX 3strikes on ep=%d\n", epnum);
+
+ status = -ETIMEDOUT;
+
+ } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
+ DBG(6, "TX end=%d device not responding\n", epnum);
+
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ goto finish;
+ }
+
+ if (status) {
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ }
+
+ /* do the proper sequence to abort the transfer in the
+ * usb core; the dma engine should already be stopped.
+ */
+ musb_h_tx_flush_fifo(hw_ep);
+ tx_csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ );
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+ musb_writeb(epio, MUSB_TXINTERVAL, 0);
+
+ done = true;
+ }
+
+ /* second cppi case */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+ goto finish;
+
+ }
+
+ /* REVISIT this looks wrong... */
+ if (!status || dma || usb_pipeisoc(pipe)) {
+ if (dma)
+ wLength = dma->actual_len;
+ else
+ wLength = qh->segsize;
+ qh->offset += wLength;
+
+ if (usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length = qh->segsize;
+ if (++qh->iso_idx >= urb->number_of_packets) {
+ done = true;
+ } else {
+ d++;
+ buf = urb->transfer_buffer + d->offset;
+ wLength = d->length;
+ }
+ } else if (dma) {
+ done = true;
+ } else {
+ /* see if we need to send more data, or ZLP */
+ if (qh->segsize < qh->maxpacket)
+ done = true;
+ else if (qh->offset == urb->transfer_buffer_length
+ && !(urb->transfer_flags
+ & URB_ZERO_PACKET))
+ done = true;
+ if (!done) {
+ buf = urb->transfer_buffer
+ + qh->offset;
+ wLength = urb->transfer_buffer_length
+ - qh->offset;
+ }
+ }
+ }
+
+ /* urb->status != -EINPROGRESS means request has been faulted,
+ * so we must abort this transfer after cleanup
+ */
+ if (urb->status != -EINPROGRESS) {
+ done = true;
+ if (status == 0)
+ status = urb->status;
+ }
+
+ if (done) {
+ /* set status */
+ urb->status = status;
+ urb->actual_length = qh->offset;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
+
+ } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
+ /* WARN_ON(!buf); */
+
+ /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
+ * (and presumably, fifo is not half-full) we should write TWO
+ * packets before updating TXCSR ... other docs disagree ...
+ */
+ /* PIO: start next packet in this URB */
+ wLength = min(qh->maxpacket, (u16) wLength);
+ musb_write_fifo(hw_ep, wLength, buf);
+ qh->segsize = wLength;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
+ } else
+ DBG(1, "not complete, but dma enabled?\n");
+
+finish:
+ return;
+}
+
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+
+/* Host side RX (IN) using Mentor DMA works as follows:
+ submit_urb ->
+ - if queue was empty, ProgramEndpoint
+ - first IN token is sent out (by setting ReqPkt)
+ LinuxIsr -> RxReady()
+ /\ => first packet is received
+ | - Set in mode 0 (DmaEnab, ~ReqPkt)
+ | -> DMA Isr (transfer complete) -> RxReady()
+ | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
+ | - if urb not complete, send next IN token (ReqPkt)
+ | | else complete urb.
+ | |
+ ---------------------------
+ *
+ * Nuances of mode 1:
+ * For short packets, no ack (+RxPktRdy) is sent automatically
+ * (even if AutoClear is ON)
+ * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
+ * automatically => major problem, as collecting the next packet becomes
+ * difficult. Hence mode 1 is not used.
+ *
+ * REVISIT
+ * All we care about at this driver level is that
+ * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
+ * (b) termination conditions are: short RX, or buffer full;
+ * (c) fault modes include
+ * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
+ * (and that endpoint's dma queue stops immediately)
+ * - overflow (full, PLUS more bytes in the terminal packet)
+ *
+ * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
+ * thus be a great candidate for using mode 1 ... for all but the
+ * last packet of one URB's transfer.
+ */
+
+#endif
+
+/*
+ * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
+ * and high-bandwidth IN transfer cases.
+ */
+void musb_host_rx(struct musb *musb, u8 epnum)
+{
+ struct urb *urb;
+ struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
+ void __iomem *epio = hw_ep->regs;
+ struct musb_qh *qh = hw_ep->in_qh;
+ size_t xfer_len;
+ void __iomem *mbase = musb->mregs;
+ int pipe;
+ u16 rx_csr, val;
+ bool iso_err = false;
+ bool done = false;
+ u32 status;
+ struct dma_channel *dma;
+
+ musb_ep_select(mbase, epnum);
+
+ urb = next_urb(qh);
+ dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
+ status = 0;
+ xfer_len = 0;
+
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ val = rx_csr;
+
+ if (unlikely(!urb)) {
+ /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
+ * usbtest #11 (unlinks) triggers it regularly, sometimes
+ * with fifo full. (Only with DMA??)
+ */
+ DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
+ musb_readw(epio, MUSB_RXCOUNT));
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ return;
+ }
+
+ pipe = urb->pipe;
+
+ DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
+ epnum, rx_csr, urb->actual_length,
+ dma ? dma->actual_len : 0);
+
+ /* check for errors, concurrent stall & unlink is not really
+ * handled yet! */
+ if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
+ DBG(3, "RX end %d STALL\n", epnum);
+
+ /* stall; record URB status */
+ status = -EPIPE;
+
+ } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
+ DBG(3, "end %d RX proto error\n", epnum);
+
+ status = -EPROTO;
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+
+ } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
+
+ if (USB_ENDPOINT_XFER_ISOC != qh->type) {
+ /* NOTE this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) rx urb
+ * that could use this fifo. (dma complicates it...)
+ *
+ * if (bulk && qh->ring.next != &musb->in_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ DBG(6, "RX end %d NAK timeout\n", epnum);
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS
+ | MUSB_RXCSR_H_REQPKT);
+
+ goto finish;
+ } else {
+ DBG(4, "RX end %d ISO data error\n", epnum);
+ /* packet error reported later */
+ iso_err = true;
+ }
+ }
+
+ /* faults abort the transfer */
+ if (status) {
+ /* clean up dma and collect transfer count */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ }
+ musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
+ musb_writeb(epio, MUSB_RXINTERVAL, 0);
+ done = true;
+ goto finish;
+ }
+
+ if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
+ /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
+ ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
+ goto finish;
+ }
+
+ /* thorough shutdown for now ... given more precise fault handling
+ * and better queueing support, we might keep a DMA pipeline going
+ * while processing this irq for earlier completions.
+ */
+
+ /* FIXME this is _way_ too much in-line logic for Mentor DMA */
+
+#ifndef CONFIG_USB_INVENTRA_DMA
+ if (rx_csr & MUSB_RXCSR_H_REQPKT) {
+ /* REVISIT this happened for a while on some short reads...
+ * the cleanup still needs investigation... looks bad...
+ * and also duplicates dma cleanup code above ... plus,
+ * shouldn't this be the "half full" double buffer case?
+ */
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ (void) musb->dma_controller->channel_abort(dma);
+ xfer_len = dma->actual_len;
+ done = true;
+ }
+
+ DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+ xfer_len, dma ? ", dma" : "");
+ rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | rx_csr);
+ }
+#endif
+ if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
+ xfer_len = dma->actual_len;
+
+ val &= ~(MUSB_RXCSR_DMAENAB
+ | MUSB_RXCSR_H_AUTOREQ
+ | MUSB_RXCSR_AUTOCLEAR
+ | MUSB_RXCSR_RXPKTRDY);
+ musb_writew(hw_ep->regs, MUSB_RXCSR, val);
+
+#ifdef CONFIG_USB_INVENTRA_DMA
+ /* done if urb buffer is full or short packet is recd */
+ done = (urb->actual_length + xfer_len >=
+ urb->transfer_buffer_length
+ || dma->actual_len < qh->maxpacket);
+
+ /* send IN token for next packet, without AUTOREQ */
+ if (!done) {
+ val |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | val);
+ }
+
+ DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
+ done ? "off" : "reset",
+ musb_readw(epio, MUSB_RXCSR),
+ musb_readw(epio, MUSB_RXCOUNT));
+#else
+ done = true;
+#endif
+ } else if (urb->status == -EINPROGRESS) {
+ /* if no errors, be sure a packet is ready for unloading */
+ if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
+ status = -EPROTO;
+ ERR("Rx interrupt with no errors or packet!\n");
+
+ /* FIXME this is another "SHOULD NEVER HAPPEN" */
+
+/* SCRUB (RX) */
+ /* do the proper sequence to abort the transfer */
+ musb_ep_select(mbase, epnum);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, val);
+ goto finish;
+ }
+
+ /* we are expecting IN packets */
+#ifdef CONFIG_USB_INVENTRA_DMA
+ if (dma) {
+ struct dma_controller *c;
+ u16 rx_count;
+ int ret;
+
+ rx_count = musb_readw(epio, MUSB_RXCOUNT);
+
+ DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
+ epnum, rx_count,
+ urb->transfer_dma
+ + urb->actual_length,
+ qh->offset,
+ urb->transfer_buffer_length);
+
+ c = musb->dma_controller;
+
+ dma->desired_mode = 0;
+#ifdef USE_MODE1
+ /* because of the issue below, mode 1 will
+ * only rarely behave with correct semantics.
+ */
+ if ((urb->transfer_flags &
+ URB_SHORT_NOT_OK)
+ && (urb->transfer_buffer_length -
+ urb->actual_length)
+ > qh->maxpacket)
+ dma->desired_mode = 1;
+#endif
+
+/* Disadvantage of using mode 1:
+ * It's basically usable only for mass storage class; essentially all
+ * other protocols also terminate transfers on short packets.
+ *
+ * Details:
+ * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
+ * If you try to use mode 1 for (transfer_buffer_length - 512), and try
+ * to use the extra IN token to grab the last packet using mode 0, then
+ * the problem is that you cannot be sure when the device will send the
+ * last packet and RxPktRdy set. Sometimes the packet is recd too soon
+ * such that it gets lost when RxCSR is re-set at the end of the mode 1
+ * transfer, while sometimes it is recd just a little late so that if you
+ * try to configure for mode 0 soon after the mode 1 transfer is
+ * completed, you will find rxcount 0. Okay, so you might think why not
+ * wait for an interrupt when the pkt is recd. Well, you won't get any!
+ */
+
+ val = musb_readw(epio, MUSB_RXCSR);
+ val &= ~MUSB_RXCSR_H_REQPKT;
+
+ if (dma->desired_mode == 0)
+ val &= ~MUSB_RXCSR_H_AUTOREQ;
+ else
+ val |= MUSB_RXCSR_H_AUTOREQ;
+ val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
+
+ musb_writew(epio, MUSB_RXCSR,
+ MUSB_RXCSR_H_WZC_BITS | val);
+
+ /* REVISIT if when actual_length != 0,
+ * transfer_buffer_length needs to be
+ * adjusted first...
+ */
+ ret = c->channel_program(
+ dma, qh->maxpacket,
+ dma->desired_mode,
+ urb->transfer_dma
+ + urb->actual_length,
+ (dma->desired_mode == 0)
+ ? rx_count
+ : urb->transfer_buffer_length);
+
+ if (!ret) {
+ c->channel_release(dma);
+ hw_ep->rx_channel = NULL;
+ dma = NULL;
+ /* REVISIT reset CSR */
+ }
+ }
+#endif /* Mentor DMA */
+
+ if (!dma) {
+ done = musb_host_packet_rx(musb, urb,
+ epnum, iso_err);
+ DBG(6, "read %spacket\n", done ? "last " : "");
+ }
+ }
+
+ if (dma && usb_pipeisoc(pipe)) {
+ struct usb_iso_packet_descriptor *d;
+ int iso_stat = status;
+
+ d = urb->iso_frame_desc + qh->iso_idx;
+ d->actual_length += xfer_len;
+ if (iso_err) {
+ iso_stat = -EILSEQ;
+ urb->error_count++;
+ }
+ d->status = iso_stat;
+ }
+
+finish:
+ urb->actual_length += xfer_len;
+ qh->offset += xfer_len;
+ if (done) {
+ if (urb->status == -EINPROGRESS)
+ urb->status = status;
+ musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
+ }
+}
+
+/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
+ * the software schedule associates multiple such nodes with a given
+ * host side hardware endpoint + direction; scheduling may activate
+ * that hardware endpoint.
+ */
+static int musb_schedule(
+ struct musb *musb,
+ struct musb_qh *qh,
+ int is_in)
+{
+ int idle;
+ int best_diff;
+ int best_end, epnum;
+ struct musb_hw_ep *hw_ep = NULL;
+ struct list_head *head = NULL;
+
+ /* use fixed hardware for control and bulk */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ head = &musb->control;
+ hw_ep = musb->control_ep;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ hw_ep = musb->bulk_ep;
+ if (is_in)
+ head = &musb->in_bulk;
+ else
+ head = &musb->out_bulk;
+ break;
+ }
+ if (head) {
+ idle = list_empty(head);
+ list_add_tail(&qh->ring, head);
+ goto success;
+ }
+
+ /* else, periodic transfers get muxed to other endpoints */
+
+ /* FIXME this doesn't consider direction, so it can only
+ * work for one half of the endpoint hardware, and assumes
+ * the previous cases handled all non-shared endpoints...
+ */
+
+ /* we know this qh hasn't been scheduled, so all we need to do
+ * is choose which hardware endpoint to put it on ...
+ *
+ * REVISIT what we really want here is a regular schedule tree
+ * like e.g. OHCI uses, but for now musb->periodic is just an
+ * array of the _single_ logical endpoint associated with a
+ * given physical one (identity mapping logical->physical).
+ *
+ * that simplistic approach makes TT scheduling a lot simpler;
+ * there is none, and thus none of its complexity...
+ */
+ best_diff = 4096;
+ best_end = -1;
+
+ for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
+ int diff;
+
+ if (musb->periodic[epnum])
+ continue;
+ hw_ep = &musb->endpoints[epnum];
+ if (hw_ep == musb->bulk_ep)
+ continue;
+
+ if (is_in)
+ diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
+ else
+ diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
+
+ if (diff > 0 && best_diff > diff) {
+ best_diff = diff;
+ best_end = epnum;
+ }
+ }
+ if (best_end < 0)
+ return -ENOSPC;
+
+ idle = 1;
+ hw_ep = musb->endpoints + best_end;
+ musb->periodic[best_end] = qh;
+ DBG(4, "qh %p periodic slot %d\n", qh, best_end);
+success:
+ qh->hw_ep = hw_ep;
+ qh->hep->hcpriv = qh;
+ if (idle)
+ musb_start_urb(musb, is_in, qh);
+ return 0;
+}
+
+static int musb_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags)
+{
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ struct usb_host_endpoint *hep = urb->ep;
+ struct musb_qh *qh = hep->hcpriv;
+ struct usb_endpoint_descriptor *epd = &hep->desc;
+ int ret;
+ unsigned type_reg;
+ unsigned interval;
+
+ /* host role must be active */
+ if (!is_host_active(musb) || !musb->is_active)
+ return -ENODEV;
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ if (ret)
+ return ret;
+
+ /* DMA mapping was already done, if needed, and this urb is on
+ * hep->urb_list ... so there's little to do unless hep wasn't
+ * yet scheduled onto a live qh.
+ *
+ * REVISIT best to keep hep->hcpriv valid until the endpoint gets
+ * disabled, testing for empty qh->ring and avoiding qh setup costs
+ * except for the first urb queued after a config change.
+ */
+ if (qh) {
+ urb->hcpriv = qh;
+ return 0;
+ }
+
+ /* Allocate and initialize qh, minimizing the work done each time
+ * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
+ *
+ * REVISIT consider a dedicated qh kmem_cache, so it's harder
+ * for bugs in other kernel code to break this driver...
+ */
+ qh = kzalloc(sizeof *qh, mem_flags);
+ if (!qh) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ return -ENOMEM;
+ }
+
+ qh->hep = hep;
+ qh->dev = urb->dev;
+ INIT_LIST_HEAD(&qh->ring);
+ qh->is_ready = 1;
+
+ qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+
+ /* no high bandwidth support yet */
+ if (qh->maxpacket & ~0x7ff) {
+ ret = -EMSGSIZE;
+ goto done;
+ }
+
+ qh->epnum = epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+ qh->type = epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+ /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
+ qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
+
+ /* precompute rxtype/txtype/type0 register */
+ type_reg = (qh->type << 4) | qh->epnum;
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ type_reg |= 0xc0;
+ break;
+ case USB_SPEED_FULL:
+ type_reg |= 0x80;
+ break;
+ default:
+ type_reg |= 0x40;
+ }
+ qh->type_reg = type_reg;
+
+ /* precompute rxinterval/txinterval register */
+ interval = min((u8)16, epd->bInterval); /* log encoding */
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_INT:
+ /* fullspeed uses linear encoding */
+ if (USB_SPEED_FULL == urb->dev->speed) {
+ interval = epd->bInterval;
+ if (!interval)
+ interval = 1;
+ }
+ /* FALLTHROUGH */
+ case USB_ENDPOINT_XFER_ISOC:
+ /* iso always uses log encoding */
+ break;
+ default:
+ /* REVISIT we actually want to use NAK limits, hinting to the
+ * transfer scheduling logic to try some other qh, e.g. try
+ * for 2 msec first:
+ *
+ * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
+ *
+ * The downside of disabling this is that transfer scheduling
+ * gets VERY unfair for nonperiodic transfers; a misbehaving
+ * peripheral could make that hurt. Or for reads, one that's
+ * perfectly normal: network and other drivers keep reads
+ * posted at all times, having one pending for a week should
+ * be perfectly safe.
+ *
+ * The upside of disabling it is avoidng transfer scheduling
+ * code to put this aside for while.
+ */
+ interval = 0;
+ }
+ qh->intv_reg = interval;
+
+ /* precompute addressing for external hub/tt ports */
+ if (musb->is_multipoint) {
+ struct usb_device *parent = urb->dev->parent;
+
+ if (parent != hcd->self.root_hub) {
+ qh->h_addr_reg = (u8) parent->devnum;
+
+ /* set up tt info if needed */
+ if (urb->dev->tt) {
+ qh->h_port_reg = (u8) urb->dev->ttport;
+ qh->h_addr_reg |= 0x80;
+ }
+ }
+ }
+
+ /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
+ * until we get real dma queues (with an entry for each urb/buffer),
+ * we only have work to do in the former case.
+ */
+ spin_lock_irqsave(&musb->lock, flags);
+ if (hep->hcpriv) {
+ /* some concurrent activity submitted another urb to hep...
+ * odd, rare, error prone, but legal.
+ */
+ kfree(qh);
+ ret = 0;
+ } else
+ ret = musb_schedule(musb, qh,
+ epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
+
+ if (ret == 0) {
+ urb->hcpriv = qh;
+ /* FIXME set urb->start_frame for iso/intr, it's tested in
+ * musb_start_urb(), but otherwise only konicawc cares ...
+ */
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+done:
+ if (ret != 0) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ kfree(qh);
+ }
+ return ret;
+}
+
+
+/*
+ * abort a transfer that's at the head of a hardware queue.
+ * called with controller locked, irqs blocked
+ * that hardware queue advances to the next transfer, unless prevented
+ */
+static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
+{
+ struct musb_hw_ep *ep = qh->hw_ep;
+ void __iomem *epio = ep->regs;
+ unsigned hw_end = ep->epnum;
+ void __iomem *regs = ep->musb->mregs;
+ u16 csr;
+ int status = 0;
+
+ musb_ep_select(regs, hw_end);
+
+ if (is_dma_capable()) {
+ struct dma_channel *dma;
+
+ dma = is_in ? ep->rx_channel : ep->tx_channel;
+ if (dma) {
+ status = ep->musb->dma_controller->channel_abort(dma);
+ DBG(status ? 1 : 3,
+ "abort %cX%d DMA for urb %p --> %d\n",
+ is_in ? 'R' : 'T', ep->epnum,
+ urb, status);
+ urb->actual_length += dma->actual_len;
+ }
+ }
+
+ /* turn off DMA requests, discard state, stop polling ... */
+ if (is_in) {
+ /* giveback saves bulk toggle */
+ csr = musb_h_flush_rxfifo(ep, 0);
+
+ /* REVISIT we still get an irq; should likely clear the
+ * endpoint's irq status here to avoid bogus irqs.
+ * clearing that status is platform-specific...
+ */
+ } else {
+ musb_h_tx_flush_fifo(ep);
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~(MUSB_TXCSR_AUTOSET
+ | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_H_RXSTALL
+ | MUSB_TXCSR_H_NAKTIMEOUT
+ | MUSB_TXCSR_H_ERROR
+ | MUSB_TXCSR_TXPKTRDY);
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* REVISIT may need to clear FLUSHFIFO ... */
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* flush cpu writebuffer */
+ csr = musb_readw(epio, MUSB_TXCSR);
+ }
+ if (status == 0)
+ musb_advance_schedule(ep->musb, urb, ep, is_in);
+ return status;
+}
+
+static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ struct musb_qh *qh;
+ struct list_head *sched;
+ unsigned long flags;
+ int ret;
+
+ DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+
+ spin_lock_irqsave(&musb->lock, flags);
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto done;
+
+ qh = urb->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* Any URB not actively programmed into endpoint hardware can be
+ * immediately given back. Such an URB must be at the head of its
+ * endpoint queue, unless someday we get real DMA queues. And even
+ * then, it might not be known to the hardware...
+ *
+ * Otherwise abort current transfer, pending dma, etc.; urb->status
+ * has already been updated. This is a synchronous abort; it'd be
+ * OK to hold off until after some IRQ, though.
+ */
+ if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
+ ret = -EINPROGRESS;
+ else {
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (usb_pipein(urb->pipe))
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic
+ * transfers won't always be at the head of a
+ * singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+ if (ret < 0 || (sched && qh != first_qh(sched))) {
+ int ready = qh->is_ready;
+
+ ret = 0;
+ qh->is_ready = 0;
+ __musb_giveback(musb, urb, 0);
+ qh->is_ready = ready;
+ } else
+ ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return ret;
+}
+
+/* disable an endpoint */
+static void
+musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
+{
+ u8 epnum = hep->desc.bEndpointAddress;
+ unsigned long flags;
+ struct musb *musb = hcd_to_musb(hcd);
+ u8 is_in = epnum & USB_DIR_IN;
+ struct musb_qh *qh = hep->hcpriv;
+ struct urb *urb, *tmp;
+ struct list_head *sched;
+
+ if (!qh)
+ return;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (qh->type) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ sched = &musb->control;
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (is_in)
+ sched = &musb->in_bulk;
+ else
+ sched = &musb->out_bulk;
+ break;
+ default:
+ /* REVISIT when we get a schedule tree, periodic transfers
+ * won't always be at the head of a singleton queue...
+ */
+ sched = NULL;
+ break;
+ }
+
+ /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
+
+ /* kick first urb off the hardware, if needed */
+ qh->is_ready = 0;
+ if (!sched || qh == first_qh(sched)) {
+ urb = next_urb(qh);
+
+ /* make software (then hardware) stop ASAP */
+ if (!urb->unlinked)
+ urb->status = -ESHUTDOWN;
+
+ /* cleanup */
+ musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
+ } else
+ urb = NULL;
+
+ /* then just nuke all the others */
+ list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
+ musb_giveback(qh, urb, -ESHUTDOWN);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int musb_h_get_frame_number(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ return musb_readw(musb->mregs, MUSB_FRAME);
+}
+
+static int musb_h_start(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ /* NOTE: musb_start() is called when the hub driver turns
+ * on port power, or when (OTG) peripheral starts.
+ */
+ hcd->state = HC_STATE_RUNNING;
+ musb->port1_status = 0;
+ return 0;
+}
+
+static void musb_h_stop(struct usb_hcd *hcd)
+{
+ musb_stop(hcd_to_musb(hcd));
+ hcd->state = HC_STATE_HALT;
+}
+
+static int musb_bus_suspend(struct usb_hcd *hcd)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+
+ if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
+ return 0;
+
+ if (is_host_active(musb) && musb->is_active) {
+ WARNING("trying to suspend as %s is_active=%i\n",
+ otg_state_string(musb), musb->is_active);
+ return -EBUSY;
+ } else
+ return 0;
+}
+
+static int musb_bus_resume(struct usb_hcd *hcd)
+{
+ /* resuming child port does the work */
+ return 0;
+}
+
+const struct hc_driver musb_hc_driver = {
+ .description = "musb-hcd",
+ .product_desc = "MUSB HDRC host driver",
+ .hcd_priv_size = sizeof(struct musb),
+ .flags = HCD_USB2 | HCD_MEMORY,
+
+ /* not using irq handler or reset hooks from usbcore, since
+ * those must be shared with peripheral code for OTG configs
+ */
+
+ .start = musb_h_start,
+ .stop = musb_h_stop,
+
+ .get_frame_number = musb_h_get_frame_number,
+
+ .urb_enqueue = musb_urb_enqueue,
+ .urb_dequeue = musb_urb_dequeue,
+ .endpoint_disable = musb_h_disable,
+
+ .hub_status_data = musb_hub_status_data,
+ .hub_control = musb_hub_control,
+ .bus_suspend = musb_bus_suspend,
+ .bus_resume = musb_bus_resume,
+ /* .start_port_reset = NULL, */
+ /* .hub_irq_enable = NULL, */
+};
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
new file mode 100644
index 000000000000..77bcdb9d5b32
--- /dev/null
+++ b/drivers/usb/musb/musb_host.h
@@ -0,0 +1,110 @@
+/*
+ * MUSB OTG driver host defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MUSB_HOST_H
+#define _MUSB_HOST_H
+
+static inline struct usb_hcd *musb_to_hcd(struct musb *musb)
+{
+ return container_of((void *) musb, struct usb_hcd, hcd_priv);
+}
+
+static inline struct musb *hcd_to_musb(struct usb_hcd *hcd)
+{
+ return (struct musb *) (hcd->hcd_priv);
+}
+
+/* stored in "usb_host_endpoint.hcpriv" for scheduled endpoints */
+struct musb_qh {
+ struct usb_host_endpoint *hep; /* usbcore info */
+ struct usb_device *dev;
+ struct musb_hw_ep *hw_ep; /* current binding */
+
+ struct list_head ring; /* of musb_qh */
+ /* struct musb_qh *next; */ /* for periodic tree */
+
+ unsigned offset; /* in urb->transfer_buffer */
+ unsigned segsize; /* current xfer fragment */
+
+ u8 type_reg; /* {rx,tx} type register */
+ u8 intv_reg; /* {rx,tx} interval register */
+ u8 addr_reg; /* device address register */
+ u8 h_addr_reg; /* hub address register */
+ u8 h_port_reg; /* hub port register */
+
+ u8 is_ready; /* safe to modify hw_ep */
+ u8 type; /* XFERTYPE_* */
+ u8 epnum;
+ u16 maxpacket;
+ u16 frame; /* for periodic schedule */
+ unsigned iso_idx; /* in urb->iso_frame_desc[] */
+};
+
+/* map from control or bulk queue head to the first qh on that ring */
+static inline struct musb_qh *first_qh(struct list_head *q)
+{
+ if (list_empty(q))
+ return NULL;
+ return list_entry(q->next, struct musb_qh, ring);
+}
+
+
+extern void musb_root_disconnect(struct musb *musb);
+
+struct usb_hcd;
+
+extern int musb_hub_status_data(struct usb_hcd *hcd, char *buf);
+extern int musb_hub_control(struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+
+extern const struct hc_driver musb_hc_driver;
+
+static inline struct urb *next_urb(struct musb_qh *qh)
+{
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ struct list_head *queue;
+
+ if (!qh)
+ return NULL;
+ queue = &qh->hep->urb_list;
+ if (list_empty(queue))
+ return NULL;
+ return list_entry(queue->next, struct urb, urb_list);
+#else
+ return NULL;
+#endif
+}
+
+#endif /* _MUSB_HOST_H */
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
new file mode 100644
index 000000000000..6bbedae83af8
--- /dev/null
+++ b/drivers/usb/musb/musb_io.h
@@ -0,0 +1,115 @@
+/*
+ * MUSB OTG driver register I/O
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_LINUX_PLATFORM_ARCH_H__
+#define __MUSB_LINUX_PLATFORM_ARCH_H__
+
+#include <linux/io.h>
+
+#ifndef CONFIG_ARM
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+ { insl((unsigned long)addr, buf, len); }
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+ { insw((unsigned long)addr, buf, len); }
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+ { insb((unsigned long)addr, buf, len); }
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+ { outsl((unsigned long)addr, buf, len); }
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+ { outsw((unsigned long)addr, buf, len); }
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+ { outsb((unsigned long)addr, buf, len); }
+
+#endif
+
+/* NOTE: these offsets are all in bytes */
+
+static inline u16 musb_readw(const void __iomem *addr, unsigned offset)
+ { return __raw_readw(addr + offset); }
+
+static inline u32 musb_readl(const void __iomem *addr, unsigned offset)
+ { return __raw_readl(addr + offset); }
+
+
+static inline void musb_writew(void __iomem *addr, unsigned offset, u16 data)
+ { __raw_writew(data, addr + offset); }
+
+static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
+ { __raw_writel(data, addr + offset); }
+
+
+#ifdef CONFIG_USB_TUSB6010
+
+/*
+ * TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
+ */
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+{
+ u16 tmp;
+ u8 val;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ val = (tmp >> 8);
+ else
+ val = tmp & 0xff;
+
+ return val;
+}
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+ u16 tmp;
+
+ tmp = __raw_readw(addr + (offset & ~1));
+ if (offset & 1)
+ tmp = (data << 8) | (tmp & 0xff);
+ else
+ tmp = (tmp & 0xff00) | data;
+
+ __raw_writew(tmp, addr + (offset & ~1));
+}
+
+#else
+
+static inline u8 musb_readb(const void __iomem *addr, unsigned offset)
+ { return __raw_readb(addr + offset); }
+
+static inline void musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+ { __raw_writeb(data, addr + offset); }
+
+#endif /* CONFIG_USB_TUSB6010 */
+
+#endif
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
new file mode 100644
index 000000000000..9c228661aa5a
--- /dev/null
+++ b/drivers/usb/musb/musb_regs.h
@@ -0,0 +1,300 @@
+/*
+ * MUSB OTG driver register defines
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MUSB_REGS_H__
+#define __MUSB_REGS_H__
+
+#define MUSB_EP0_FIFOSIZE 64 /* This is non-configurable */
+
+/*
+ * Common USB registers
+ */
+
+#define MUSB_FADDR 0x00 /* 8-bit */
+#define MUSB_POWER 0x01 /* 8-bit */
+
+#define MUSB_INTRTX 0x02 /* 16-bit */
+#define MUSB_INTRRX 0x04
+#define MUSB_INTRTXE 0x06
+#define MUSB_INTRRXE 0x08
+#define MUSB_INTRUSB 0x0A /* 8 bit */
+#define MUSB_INTRUSBE 0x0B /* 8 bit */
+#define MUSB_FRAME 0x0C
+#define MUSB_INDEX 0x0E /* 8 bit */
+#define MUSB_TESTMODE 0x0F /* 8 bit */
+
+/* Get offset for a given FIFO from musb->mregs */
+#ifdef CONFIG_USB_TUSB6010
+#define MUSB_FIFO_OFFSET(epnum) (0x200 + ((epnum) * 0x20))
+#else
+#define MUSB_FIFO_OFFSET(epnum) (0x20 + ((epnum) * 4))
+#endif
+
+/*
+ * Additional Control Registers
+ */
+
+#define MUSB_DEVCTL 0x60 /* 8 bit */
+
+/* These are always controlled through the INDEX register */
+#define MUSB_TXFIFOSZ 0x62 /* 8-bit (see masks) */
+#define MUSB_RXFIFOSZ 0x63 /* 8-bit (see masks) */
+#define MUSB_TXFIFOADD 0x64 /* 16-bit offset shifted right 3 */
+#define MUSB_RXFIFOADD 0x66 /* 16-bit offset shifted right 3 */
+
+/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
+#define MUSB_HWVERS 0x6C /* 8 bit */
+
+#define MUSB_EPINFO 0x78 /* 8 bit */
+#define MUSB_RAMINFO 0x79 /* 8 bit */
+#define MUSB_LINKINFO 0x7a /* 8 bit */
+#define MUSB_VPLEN 0x7b /* 8 bit */
+#define MUSB_HS_EOF1 0x7c /* 8 bit */
+#define MUSB_FS_EOF1 0x7d /* 8 bit */
+#define MUSB_LS_EOF1 0x7e /* 8 bit */
+
+/* Offsets to endpoint registers */
+#define MUSB_TXMAXP 0x00
+#define MUSB_TXCSR 0x02
+#define MUSB_CSR0 MUSB_TXCSR /* Re-used for EP0 */
+#define MUSB_RXMAXP 0x04
+#define MUSB_RXCSR 0x06
+#define MUSB_RXCOUNT 0x08
+#define MUSB_COUNT0 MUSB_RXCOUNT /* Re-used for EP0 */
+#define MUSB_TXTYPE 0x0A
+#define MUSB_TYPE0 MUSB_TXTYPE /* Re-used for EP0 */
+#define MUSB_TXINTERVAL 0x0B
+#define MUSB_NAKLIMIT0 MUSB_TXINTERVAL /* Re-used for EP0 */
+#define MUSB_RXTYPE 0x0C
+#define MUSB_RXINTERVAL 0x0D
+#define MUSB_FIFOSIZE 0x0F
+#define MUSB_CONFIGDATA MUSB_FIFOSIZE /* Re-used for EP0 */
+
+/* Offsets to endpoint registers in indexed model (using INDEX register) */
+#define MUSB_INDEXED_OFFSET(_epnum, _offset) \
+ (0x10 + (_offset))
+
+/* Offsets to endpoint registers in flat models */
+#define MUSB_FLAT_OFFSET(_epnum, _offset) \
+ (0x100 + (0x10*(_epnum)) + (_offset))
+
+#ifdef CONFIG_USB_TUSB6010
+/* TUSB6010 EP0 configuration register is special */
+#define MUSB_TUSB_OFFSET(_epnum, _offset) \
+ (0x10 + _offset)
+#include "tusb6010.h" /* Needed "only" for TUSB_EP0_CONF */
+#endif
+
+/* "bus control"/target registers, for host side multipoint (external hubs) */
+#define MUSB_TXFUNCADDR 0x00
+#define MUSB_TXHUBADDR 0x02
+#define MUSB_TXHUBPORT 0x03
+
+#define MUSB_RXFUNCADDR 0x04
+#define MUSB_RXHUBADDR 0x06
+#define MUSB_RXHUBPORT 0x07
+
+#define MUSB_BUSCTL_OFFSET(_epnum, _offset) \
+ (0x80 + (8*(_epnum)) + (_offset))
+
+/*
+ * MUSB Register bits
+ */
+
+/* POWER */
+#define MUSB_POWER_ISOUPDATE 0x80
+#define MUSB_POWER_SOFTCONN 0x40
+#define MUSB_POWER_HSENAB 0x20
+#define MUSB_POWER_HSMODE 0x10
+#define MUSB_POWER_RESET 0x08
+#define MUSB_POWER_RESUME 0x04
+#define MUSB_POWER_SUSPENDM 0x02
+#define MUSB_POWER_ENSUSPEND 0x01
+
+/* INTRUSB */
+#define MUSB_INTR_SUSPEND 0x01
+#define MUSB_INTR_RESUME 0x02
+#define MUSB_INTR_RESET 0x04
+#define MUSB_INTR_BABBLE 0x04
+#define MUSB_INTR_SOF 0x08
+#define MUSB_INTR_CONNECT 0x10
+#define MUSB_INTR_DISCONNECT 0x20
+#define MUSB_INTR_SESSREQ 0x40
+#define MUSB_INTR_VBUSERROR 0x80 /* For SESSION end */
+
+/* DEVCTL */
+#define MUSB_DEVCTL_BDEVICE 0x80
+#define MUSB_DEVCTL_FSDEV 0x40
+#define MUSB_DEVCTL_LSDEV 0x20
+#define MUSB_DEVCTL_VBUS 0x18
+#define MUSB_DEVCTL_VBUS_SHIFT 3
+#define MUSB_DEVCTL_HM 0x04
+#define MUSB_DEVCTL_HR 0x02
+#define MUSB_DEVCTL_SESSION 0x01
+
+/* TESTMODE */
+#define MUSB_TEST_FORCE_HOST 0x80
+#define MUSB_TEST_FIFO_ACCESS 0x40
+#define MUSB_TEST_FORCE_FS 0x20
+#define MUSB_TEST_FORCE_HS 0x10
+#define MUSB_TEST_PACKET 0x08
+#define MUSB_TEST_K 0x04
+#define MUSB_TEST_J 0x02
+#define MUSB_TEST_SE0_NAK 0x01
+
+/* Allocate for double-packet buffering (effectively doubles assigned _SIZE) */
+#define MUSB_FIFOSZ_DPB 0x10
+/* Allocation size (8, 16, 32, ... 4096) */
+#define MUSB_FIFOSZ_SIZE 0x0f
+
+/* CSR0 */
+#define MUSB_CSR0_FLUSHFIFO 0x0100
+#define MUSB_CSR0_TXPKTRDY 0x0002
+#define MUSB_CSR0_RXPKTRDY 0x0001
+
+/* CSR0 in Peripheral mode */
+#define MUSB_CSR0_P_SVDSETUPEND 0x0080
+#define MUSB_CSR0_P_SVDRXPKTRDY 0x0040
+#define MUSB_CSR0_P_SENDSTALL 0x0020
+#define MUSB_CSR0_P_SETUPEND 0x0010
+#define MUSB_CSR0_P_DATAEND 0x0008
+#define MUSB_CSR0_P_SENTSTALL 0x0004
+
+/* CSR0 in Host mode */
+#define MUSB_CSR0_H_DIS_PING 0x0800
+#define MUSB_CSR0_H_WR_DATATOGGLE 0x0400 /* Set to allow setting: */
+#define MUSB_CSR0_H_DATATOGGLE 0x0200 /* Data toggle control */
+#define MUSB_CSR0_H_NAKTIMEOUT 0x0080
+#define MUSB_CSR0_H_STATUSPKT 0x0040
+#define MUSB_CSR0_H_REQPKT 0x0020
+#define MUSB_CSR0_H_ERROR 0x0010
+#define MUSB_CSR0_H_SETUPPKT 0x0008
+#define MUSB_CSR0_H_RXSTALL 0x0004
+
+/* CSR0 bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_CSR0_P_WZC_BITS \
+ (MUSB_CSR0_P_SENTSTALL)
+#define MUSB_CSR0_H_WZC_BITS \
+ (MUSB_CSR0_H_NAKTIMEOUT | MUSB_CSR0_H_RXSTALL \
+ | MUSB_CSR0_RXPKTRDY)
+
+/* TxType/RxType */
+#define MUSB_TYPE_SPEED 0xc0
+#define MUSB_TYPE_SPEED_SHIFT 6
+#define MUSB_TYPE_PROTO 0x30 /* Implicitly zero for ep0 */
+#define MUSB_TYPE_PROTO_SHIFT 4
+#define MUSB_TYPE_REMOTE_END 0xf /* Implicitly zero for ep0 */
+
+/* CONFIGDATA */
+#define MUSB_CONFIGDATA_MPRXE 0x80 /* Auto bulk pkt combining */
+#define MUSB_CONFIGDATA_MPTXE 0x40 /* Auto bulk pkt splitting */
+#define MUSB_CONFIGDATA_BIGENDIAN 0x20
+#define MUSB_CONFIGDATA_HBRXE 0x10 /* HB-ISO for RX */
+#define MUSB_CONFIGDATA_HBTXE 0x08 /* HB-ISO for TX */
+#define MUSB_CONFIGDATA_DYNFIFO 0x04 /* Dynamic FIFO sizing */
+#define MUSB_CONFIGDATA_SOFTCONE 0x02 /* SoftConnect */
+#define MUSB_CONFIGDATA_UTMIDW 0x01 /* Data width 0/1 => 8/16bits */
+
+/* TXCSR in Peripheral and Host mode */
+#define MUSB_TXCSR_AUTOSET 0x8000
+#define MUSB_TXCSR_MODE 0x2000
+#define MUSB_TXCSR_DMAENAB 0x1000
+#define MUSB_TXCSR_FRCDATATOG 0x0800
+#define MUSB_TXCSR_DMAMODE 0x0400
+#define MUSB_TXCSR_CLRDATATOG 0x0040
+#define MUSB_TXCSR_FLUSHFIFO 0x0008
+#define MUSB_TXCSR_FIFONOTEMPTY 0x0002
+#define MUSB_TXCSR_TXPKTRDY 0x0001
+
+/* TXCSR in Peripheral mode */
+#define MUSB_TXCSR_P_ISO 0x4000
+#define MUSB_TXCSR_P_INCOMPTX 0x0080
+#define MUSB_TXCSR_P_SENTSTALL 0x0020
+#define MUSB_TXCSR_P_SENDSTALL 0x0010
+#define MUSB_TXCSR_P_UNDERRUN 0x0004
+
+/* TXCSR in Host mode */
+#define MUSB_TXCSR_H_WR_DATATOGGLE 0x0200
+#define MUSB_TXCSR_H_DATATOGGLE 0x0100
+#define MUSB_TXCSR_H_NAKTIMEOUT 0x0080
+#define MUSB_TXCSR_H_RXSTALL 0x0020
+#define MUSB_TXCSR_H_ERROR 0x0004
+
+/* TXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_TXCSR_P_WZC_BITS \
+ (MUSB_TXCSR_P_INCOMPTX | MUSB_TXCSR_P_SENTSTALL \
+ | MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_FIFONOTEMPTY)
+#define MUSB_TXCSR_H_WZC_BITS \
+ (MUSB_TXCSR_H_NAKTIMEOUT | MUSB_TXCSR_H_RXSTALL \
+ | MUSB_TXCSR_H_ERROR | MUSB_TXCSR_FIFONOTEMPTY)
+
+/* RXCSR in Peripheral and Host mode */
+#define MUSB_RXCSR_AUTOCLEAR 0x8000
+#define MUSB_RXCSR_DMAENAB 0x2000
+#define MUSB_RXCSR_DISNYET 0x1000
+#define MUSB_RXCSR_PID_ERR 0x1000
+#define MUSB_RXCSR_DMAMODE 0x0800
+#define MUSB_RXCSR_INCOMPRX 0x0100
+#define MUSB_RXCSR_CLRDATATOG 0x0080
+#define MUSB_RXCSR_FLUSHFIFO 0x0010
+#define MUSB_RXCSR_DATAERROR 0x0008
+#define MUSB_RXCSR_FIFOFULL 0x0002
+#define MUSB_RXCSR_RXPKTRDY 0x0001
+
+/* RXCSR in Peripheral mode */
+#define MUSB_RXCSR_P_ISO 0x4000
+#define MUSB_RXCSR_P_SENTSTALL 0x0040
+#define MUSB_RXCSR_P_SENDSTALL 0x0020
+#define MUSB_RXCSR_P_OVERRUN 0x0004
+
+/* RXCSR in Host mode */
+#define MUSB_RXCSR_H_AUTOREQ 0x4000
+#define MUSB_RXCSR_H_WR_DATATOGGLE 0x0400
+#define MUSB_RXCSR_H_DATATOGGLE 0x0200
+#define MUSB_RXCSR_H_RXSTALL 0x0040
+#define MUSB_RXCSR_H_REQPKT 0x0020
+#define MUSB_RXCSR_H_ERROR 0x0004
+
+/* RXCSR bits to avoid zeroing (write zero clears, write 1 ignored) */
+#define MUSB_RXCSR_P_WZC_BITS \
+ (MUSB_RXCSR_P_SENTSTALL | MUSB_RXCSR_P_OVERRUN \
+ | MUSB_RXCSR_RXPKTRDY)
+#define MUSB_RXCSR_H_WZC_BITS \
+ (MUSB_RXCSR_H_RXSTALL | MUSB_RXCSR_H_ERROR \
+ | MUSB_RXCSR_DATAERROR | MUSB_RXCSR_RXPKTRDY)
+
+/* HUBADDR */
+#define MUSB_HUBADDR_MULTI_TT 0x80
+
+#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
new file mode 100644
index 000000000000..e0e9ce584175
--- /dev/null
+++ b/drivers/usb/musb/musb_virthub.c
@@ -0,0 +1,425 @@
+/*
+ * MUSB OTG driver virtual root hub support
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2006 by Texas Instruments
+ * Copyright (C) 2006-2007 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+
+#include <asm/unaligned.h>
+
+#include "musb_core.h"
+
+
+static void musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: this doesn't necessarily put PHY into low power mode,
+ * turning off its clock; that's a function of PHY integration and
+ * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
+ * SE0 changing to connect (J) or wakeup (K) states.
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_suspend) {
+ int retries = 10000;
+
+ power &= ~MUSB_POWER_RESUME;
+ power |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ /* Needed for OPT A tests */
+ power = musb_readb(mbase, MUSB_POWER);
+ while (power & MUSB_POWER_SUSPENDM) {
+ power = musb_readb(mbase, MUSB_POWER);
+ if (retries-- < 1)
+ break;
+ }
+
+ DBG(3, "Root port suspended, power %02x\n", power);
+
+ musb->port1_status |= USB_PORT_STAT_SUSPEND;
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ musb->xceiv.state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ musb_platform_try_idle(musb, 0);
+ break;
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ musb->xceiv.state = OTG_STATE_B_WAIT_ACON;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv.host->b_hnp_enable;
+ musb_platform_try_idle(musb, 0);
+ break;
+#endif
+ default:
+ DBG(1, "bogus rh suspend? %s\n",
+ otg_state_string(musb));
+ }
+ } else if (power & MUSB_POWER_SUSPENDM) {
+ power &= ~MUSB_POWER_SUSPENDM;
+ power |= MUSB_POWER_RESUME;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ DBG(3, "Root port resuming, power %02x\n", power);
+
+ /* later, GetPortStatus will stop RESUME signaling */
+ musb->port1_status |= MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies + msecs_to_jiffies(20);
+ }
+}
+
+static void musb_port_reset(struct musb *musb, bool do_reset)
+{
+ u8 power;
+ void __iomem *mbase = musb->mregs;
+
+#ifdef CONFIG_USB_MUSB_OTG
+ if (musb->xceiv.state == OTG_STATE_B_IDLE) {
+ DBG(2, "HNP: Returning from HNP; no hub reset from b_idle\n");
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ return;
+ }
+#endif
+
+ if (!is_host_active(musb))
+ return;
+
+ /* NOTE: caller guarantees it will turn off the reset when
+ * the appropriate amount of time has passed
+ */
+ power = musb_readb(mbase, MUSB_POWER);
+ if (do_reset) {
+
+ /*
+ * If RESUME is set, we must make sure it stays minimum 20 ms.
+ * Then we must clear RESUME and wait a bit to let musb start
+ * generating SOFs. If we don't do this, OPT HS A 6.8 tests
+ * fail with "Error! Did not receive an SOF before suspend
+ * detected".
+ */
+ if (power & MUSB_POWER_RESUME) {
+ while (time_before(jiffies, musb->rh_timer))
+ msleep(1);
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESUME);
+ msleep(1);
+ }
+
+ musb->ignore_disconnect = true;
+ power &= 0xf0;
+ musb_writeb(mbase, MUSB_POWER,
+ power | MUSB_POWER_RESET);
+
+ musb->port1_status |= USB_PORT_STAT_RESET;
+ musb->port1_status &= ~USB_PORT_STAT_ENABLE;
+ musb->rh_timer = jiffies + msecs_to_jiffies(50);
+ } else {
+ DBG(4, "root port reset stopped\n");
+ musb_writeb(mbase, MUSB_POWER,
+ power & ~MUSB_POWER_RESET);
+
+ musb->ignore_disconnect = false;
+
+ power = musb_readb(mbase, MUSB_POWER);
+ if (power & MUSB_POWER_HSMODE) {
+ DBG(4, "high-speed device connected\n");
+ musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
+ }
+
+ musb->port1_status &= ~USB_PORT_STAT_RESET;
+ musb->port1_status |= USB_PORT_STAT_ENABLE
+ | (USB_PORT_STAT_C_RESET << 16)
+ | (USB_PORT_STAT_C_ENABLE << 16);
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+
+ musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
+ }
+}
+
+void musb_root_disconnect(struct musb *musb)
+{
+ musb->port1_status = (1 << USB_PORT_FEAT_POWER)
+ | (1 << USB_PORT_FEAT_C_CONNECTION);
+
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ musb->is_active = 0;
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ break;
+ default:
+ DBG(1, "host disconnect (%s)\n", otg_state_string(musb));
+ }
+}
+
+
+/*---------------------------------------------------------------------*/
+
+/* Caller may or may not hold musb->lock */
+int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ int retval = 0;
+
+ /* called in_irq() via usb_hcd_poll_rh_status() */
+ if (musb->port1_status & 0xffff0000) {
+ *buf = 0x02;
+ retval = 1;
+ }
+ return retval;
+}
+
+int musb_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength)
+{
+ struct musb *musb = hcd_to_musb(hcd);
+ u32 temp;
+ int retval = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags))) {
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ /* hub features: always zero, setting is a NOP
+ * port features: reported, sometimes updated when host is active
+ * no indicators
+ */
+ switch (typeReq) {
+ case ClearHubFeature:
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, false);
+ break;
+ case USB_PORT_FEAT_POWER:
+ if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+ musb_set_vbus(musb, 0);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_SUSPEND:
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "clear feature %d\n", wValue);
+ musb->port1_status &= ~(1 << wValue);
+ break;
+ case GetHubDescriptor:
+ {
+ struct usb_hub_descriptor *desc = (void *)buf;
+
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = __constant_cpu_to_le16(
+ 0x0001 /* per-port power switching */
+ | 0x0010 /* no overcurrent reporting */
+ );
+ desc->bPwrOn2PwrGood = 5; /* msec/2 */
+ desc->bHubContrCurrent = 0;
+
+ /* workaround bogus struct definition */
+ desc->DeviceRemovable[0] = 0x02; /* port 1 */
+ desc->DeviceRemovable[1] = 0xff;
+ }
+ break;
+ case GetHubStatus:
+ temp = 0;
+ *(__le32 *) buf = cpu_to_le32(temp);
+ break;
+ case GetPortStatus:
+ if (wIndex != 1)
+ goto error;
+
+ /* finish RESET signaling? */
+ if ((musb->port1_status & USB_PORT_STAT_RESET)
+ && time_after_eq(jiffies, musb->rh_timer))
+ musb_port_reset(musb, false);
+
+ /* finish RESUME signaling? */
+ if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
+ && time_after_eq(jiffies, musb->rh_timer)) {
+ u8 power;
+
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ DBG(4, "root port resume stopped, power %02x\n",
+ power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+
+ /* ISSUE: DaVinci (RTL 1.300) disconnects after
+ * resume of high speed peripherals (but not full
+ * speed ones).
+ */
+
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+ | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ }
+
+ put_unaligned(cpu_to_le32(musb->port1_status
+ & ~MUSB_PORT_STAT_RESUME),
+ (__le32 *) buf);
+
+ /* port change status is more interesting */
+ DBG(get_unaligned((u16 *)(buf+2)) ? 2 : 5, "port status %08x\n",
+ musb->port1_status);
+ break;
+ case SetPortFeature:
+ if ((wIndex & 0xff) != 1)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ /* NOTE: this controller has a strange state machine
+ * that involves "requesting sessions" according to
+ * magic side effects from incompletely-described
+ * rules about startup...
+ *
+ * This call is what really starts the host mode; be
+ * very careful about side effects if you reorder any
+ * initialization logic, e.g. for OTG, or change any
+ * logic relating to VBUS power-up.
+ */
+ if (!(is_otg_enabled(musb) && hcd->self.is_b_host))
+ musb_start(musb);
+ break;
+ case USB_PORT_FEAT_RESET:
+ musb_port_reset(musb, true);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ musb_port_suspend(musb, true);
+ break;
+ case USB_PORT_FEAT_TEST:
+ if (unlikely(is_host_active(musb)))
+ goto error;
+
+ wIndex >>= 8;
+ switch (wIndex) {
+ case 1:
+ pr_debug("TEST_J\n");
+ temp = MUSB_TEST_J;
+ break;
+ case 2:
+ pr_debug("TEST_K\n");
+ temp = MUSB_TEST_K;
+ break;
+ case 3:
+ pr_debug("TEST_SE0_NAK\n");
+ temp = MUSB_TEST_SE0_NAK;
+ break;
+ case 4:
+ pr_debug("TEST_PACKET\n");
+ temp = MUSB_TEST_PACKET;
+ musb_load_testpacket(musb);
+ break;
+ case 5:
+ pr_debug("TEST_FORCE_ENABLE\n");
+ temp = MUSB_TEST_FORCE_HOST
+ | MUSB_TEST_FORCE_HS;
+
+ musb_writeb(musb->mregs, MUSB_DEVCTL,
+ MUSB_DEVCTL_SESSION);
+ break;
+ case 6:
+ pr_debug("TEST_FIFO_ACCESS\n");
+ temp = MUSB_TEST_FIFO_ACCESS;
+ break;
+ default:
+ goto error;
+ }
+ musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
+ break;
+ default:
+ goto error;
+ }
+ DBG(5, "set feature %d\n", wValue);
+ musb->port1_status |= 1 << wValue;
+ break;
+
+ default:
+error:
+ /* "protocol stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return retval;
+}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
new file mode 100644
index 000000000000..9ba8fb7fcd24
--- /dev/null
+++ b/drivers/usb/musb/musbhsdma.c
@@ -0,0 +1,433 @@
+/*
+ * MUSB OTG driver - support for Mentor's DMA controller
+ *
+ * Copyright 2005 Mentor Graphics Corporation
+ * Copyright (C) 2005-2007 by Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include "musb_core.h"
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include "omap2430.h"
+#endif
+
+#define MUSB_HSDMA_BASE 0x200
+#define MUSB_HSDMA_INTR (MUSB_HSDMA_BASE + 0)
+#define MUSB_HSDMA_CONTROL 0x4
+#define MUSB_HSDMA_ADDRESS 0x8
+#define MUSB_HSDMA_COUNT 0xc
+
+#define MUSB_HSDMA_CHANNEL_OFFSET(_bChannel, _offset) \
+ (MUSB_HSDMA_BASE + (_bChannel << 4) + _offset)
+
+/* control register (16-bit): */
+#define MUSB_HSDMA_ENABLE_SHIFT 0
+#define MUSB_HSDMA_TRANSMIT_SHIFT 1
+#define MUSB_HSDMA_MODE1_SHIFT 2
+#define MUSB_HSDMA_IRQENABLE_SHIFT 3
+#define MUSB_HSDMA_ENDPOINT_SHIFT 4
+#define MUSB_HSDMA_BUSERROR_SHIFT 8
+#define MUSB_HSDMA_BURSTMODE_SHIFT 9
+#define MUSB_HSDMA_BURSTMODE (3 << MUSB_HSDMA_BURSTMODE_SHIFT)
+#define MUSB_HSDMA_BURSTMODE_UNSPEC 0
+#define MUSB_HSDMA_BURSTMODE_INCR4 1
+#define MUSB_HSDMA_BURSTMODE_INCR8 2
+#define MUSB_HSDMA_BURSTMODE_INCR16 3
+
+#define MUSB_HSDMA_CHANNELS 8
+
+struct musb_dma_controller;
+
+struct musb_dma_channel {
+ struct dma_channel Channel;
+ struct musb_dma_controller *controller;
+ u32 dwStartAddress;
+ u32 len;
+ u16 wMaxPacketSize;
+ u8 bIndex;
+ u8 epnum;
+ u8 transmit;
+};
+
+struct musb_dma_controller {
+ struct dma_controller Controller;
+ struct musb_dma_channel aChannel[MUSB_HSDMA_CHANNELS];
+ void *pDmaPrivate;
+ void __iomem *pCoreBase;
+ u8 bChannelCount;
+ u8 bmUsedChannels;
+ u8 irq;
+};
+
+static int dma_controller_start(struct dma_controller *c)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel);
+
+static int dma_controller_stop(struct dma_controller *c)
+{
+ struct musb_dma_controller *controller =
+ container_of(c, struct musb_dma_controller, Controller);
+ struct musb *musb = (struct musb *) controller->pDmaPrivate;
+ struct dma_channel *pChannel;
+ u8 bBit;
+
+ if (controller->bmUsedChannels != 0) {
+ dev_err(musb->controller,
+ "Stopping DMA controller while channel active\n");
+
+ for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+ if (controller->bmUsedChannels & (1 << bBit)) {
+ pChannel = &controller->aChannel[bBit].Channel;
+ dma_channel_release(pChannel);
+
+ if (!controller->bmUsedChannels)
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 transmit)
+{
+ u8 bBit;
+ struct dma_channel *pChannel = NULL;
+ struct musb_dma_channel *pImplChannel = NULL;
+ struct musb_dma_controller *controller =
+ container_of(c, struct musb_dma_controller, Controller);
+
+ for (bBit = 0; bBit < MUSB_HSDMA_CHANNELS; bBit++) {
+ if (!(controller->bmUsedChannels & (1 << bBit))) {
+ controller->bmUsedChannels |= (1 << bBit);
+ pImplChannel = &(controller->aChannel[bBit]);
+ pImplChannel->controller = controller;
+ pImplChannel->bIndex = bBit;
+ pImplChannel->epnum = hw_ep->epnum;
+ pImplChannel->transmit = transmit;
+ pChannel = &(pImplChannel->Channel);
+ pChannel->private_data = pImplChannel;
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+ pChannel->max_len = 0x10000;
+ /* Tx => mode 1; Rx => mode 0 */
+ pChannel->desired_mode = transmit;
+ pChannel->actual_len = 0;
+ break;
+ }
+ }
+ return pChannel;
+}
+
+static void dma_channel_release(struct dma_channel *pChannel)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+
+ pChannel->actual_len = 0;
+ pImplChannel->dwStartAddress = 0;
+ pImplChannel->len = 0;
+
+ pImplChannel->controller->bmUsedChannels &=
+ ~(1 << pImplChannel->bIndex);
+
+ pChannel->status = MUSB_DMA_STATUS_UNKNOWN;
+}
+
+static void configure_channel(struct dma_channel *pChannel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+ struct musb_dma_controller *controller = pImplChannel->controller;
+ void __iomem *mbase = controller->pCoreBase;
+ u8 bChannel = pImplChannel->bIndex;
+ u16 csr = 0;
+
+ DBG(4, "%p, pkt_sz %d, addr 0x%x, len %d, mode %d\n",
+ pChannel, packet_sz, dma_addr, len, mode);
+
+ if (mode) {
+ csr |= 1 << MUSB_HSDMA_MODE1_SHIFT;
+ BUG_ON(len < packet_sz);
+
+ if (packet_sz >= 64) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR16
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ } else if (packet_sz >= 32) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR8
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ } else if (packet_sz >= 16) {
+ csr |= MUSB_HSDMA_BURSTMODE_INCR4
+ << MUSB_HSDMA_BURSTMODE_SHIFT;
+ }
+ }
+
+ csr |= (pImplChannel->epnum << MUSB_HSDMA_ENDPOINT_SHIFT)
+ | (1 << MUSB_HSDMA_ENABLE_SHIFT)
+ | (1 << MUSB_HSDMA_IRQENABLE_SHIFT)
+ | (pImplChannel->transmit
+ ? (1 << MUSB_HSDMA_TRANSMIT_SHIFT)
+ : 0);
+
+ /* address/count */
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ dma_addr);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ len);
+
+ /* control (this should start things) */
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ csr);
+}
+
+static int dma_channel_program(struct dma_channel *pChannel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+
+ DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
+ pImplChannel->epnum,
+ pImplChannel->transmit ? "Tx" : "Rx",
+ packet_sz, dma_addr, len, mode);
+
+ BUG_ON(pChannel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ pChannel->status == MUSB_DMA_STATUS_BUSY);
+
+ pChannel->actual_len = 0;
+ pImplChannel->dwStartAddress = dma_addr;
+ pImplChannel->len = len;
+ pImplChannel->wMaxPacketSize = packet_sz;
+ pChannel->status = MUSB_DMA_STATUS_BUSY;
+
+ if ((mode == 1) && (len >= packet_sz))
+ configure_channel(pChannel, packet_sz, 1, dma_addr, len);
+ else
+ configure_channel(pChannel, packet_sz, 0, dma_addr, len);
+
+ return true;
+}
+
+static int dma_channel_abort(struct dma_channel *pChannel)
+{
+ struct musb_dma_channel *pImplChannel =
+ (struct musb_dma_channel *) pChannel->private_data;
+ u8 bChannel = pImplChannel->bIndex;
+ void __iomem *mbase = pImplChannel->controller->pCoreBase;
+ u16 csr;
+
+ if (pChannel->status == MUSB_DMA_STATUS_BUSY) {
+ if (pImplChannel->transmit) {
+
+ csr = musb_readw(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_TXCSR));
+ csr &= ~(MUSB_TXCSR_AUTOSET |
+ MUSB_TXCSR_DMAENAB |
+ MUSB_TXCSR_DMAMODE);
+ musb_writew(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_TXCSR),
+ csr);
+ } else {
+ csr = musb_readw(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_RXCSR));
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR |
+ MUSB_RXCSR_DMAENAB |
+ MUSB_RXCSR_DMAMODE);
+ musb_writew(mbase,
+ MUSB_EP_OFFSET(pImplChannel->epnum,
+ MUSB_RXCSR),
+ csr);
+ }
+
+ musb_writew(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_CONTROL),
+ 0);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_ADDRESS),
+ 0);
+ musb_writel(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel, MUSB_HSDMA_COUNT),
+ 0);
+
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+ }
+ return 0;
+}
+
+static irqreturn_t dma_controller_irq(int irq, void *private_data)
+{
+ struct musb_dma_controller *controller =
+ (struct musb_dma_controller *)private_data;
+ struct musb_dma_channel *pImplChannel;
+ struct musb *musb = controller->pDmaPrivate;
+ void __iomem *mbase = controller->pCoreBase;
+ struct dma_channel *pChannel;
+ u8 bChannel;
+ u16 csr;
+ u32 dwAddress;
+ u8 int_hsdma;
+ irqreturn_t retval = IRQ_NONE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
+ if (!int_hsdma)
+ goto done;
+
+ for (bChannel = 0; bChannel < MUSB_HSDMA_CHANNELS; bChannel++) {
+ if (int_hsdma & (1 << bChannel)) {
+ pImplChannel = (struct musb_dma_channel *)
+ &(controller->aChannel[bChannel]);
+ pChannel = &pImplChannel->Channel;
+
+ csr = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bChannel,
+ MUSB_HSDMA_CONTROL));
+
+ if (csr & (1 << MUSB_HSDMA_BUSERROR_SHIFT))
+ pImplChannel->Channel.status =
+ MUSB_DMA_STATUS_BUS_ABORT;
+ else {
+ u8 devctl;
+
+ dwAddress = musb_readl(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(
+ bChannel,
+ MUSB_HSDMA_ADDRESS));
+ pChannel->actual_len = dwAddress
+ - pImplChannel->dwStartAddress;
+
+ DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n",
+ pChannel, pImplChannel->dwStartAddress,
+ dwAddress, pChannel->actual_len,
+ pImplChannel->len,
+ (pChannel->actual_len
+ < pImplChannel->len) ?
+ "=> reconfig 0" : "=> complete");
+
+ devctl = musb_readb(mbase, MUSB_DEVCTL);
+
+ pChannel->status = MUSB_DMA_STATUS_FREE;
+
+ /* completed */
+ if ((devctl & MUSB_DEVCTL_HM)
+ && (pImplChannel->transmit)
+ && ((pChannel->desired_mode == 0)
+ || (pChannel->actual_len &
+ (pImplChannel->wMaxPacketSize - 1)))
+ ) {
+ /* Send out the packet */
+ musb_ep_select(mbase,
+ pImplChannel->epnum);
+ musb_writew(mbase, MUSB_EP_OFFSET(
+ pImplChannel->epnum,
+ MUSB_TXCSR),
+ MUSB_TXCSR_TXPKTRDY);
+ } else
+ musb_dma_completion(
+ musb,
+ pImplChannel->epnum,
+ pImplChannel->transmit);
+ }
+ }
+ }
+ retval = IRQ_HANDLED;
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return retval;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct musb_dma_controller *controller;
+
+ controller = container_of(c, struct musb_dma_controller, Controller);
+ if (!controller)
+ return;
+
+ if (controller->irq)
+ free_irq(controller->irq, c);
+
+ kfree(controller);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *pCoreBase)
+{
+ struct musb_dma_controller *controller;
+ struct device *dev = musb->controller;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq = platform_get_irq(pdev, 1);
+
+ if (irq == 0) {
+ dev_err(dev, "No DMA interrupt line!\n");
+ return NULL;
+ }
+
+ controller = kzalloc(sizeof(struct musb_dma_controller), GFP_KERNEL);
+ if (!controller)
+ return NULL;
+
+ controller->bChannelCount = MUSB_HSDMA_CHANNELS;
+ controller->pDmaPrivate = musb;
+ controller->pCoreBase = pCoreBase;
+
+ controller->Controller.start = dma_controller_start;
+ controller->Controller.stop = dma_controller_stop;
+ controller->Controller.channel_alloc = dma_channel_allocate;
+ controller->Controller.channel_release = dma_channel_release;
+ controller->Controller.channel_program = dma_channel_program;
+ controller->Controller.channel_abort = dma_channel_abort;
+
+ if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+ musb->controller->bus_id, &controller->Controller)) {
+ dev_err(dev, "request_irq %d failed!\n", irq);
+ dma_controller_destroy(&controller->Controller);
+ return NULL;
+ }
+
+ controller->irq = irq;
+
+ return &controller->Controller;
+}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
new file mode 100644
index 000000000000..298b22e6ad0d
--- /dev/null
+++ b/drivers/usb/musb/omap2430.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2005-2007 by Texas Instruments
+ * Some code has been taken from tusb6010.c
+ * Copyrights for that are attributable to:
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This file is part of the Inventra Controller Driver for Linux.
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * The Inventra Controller Driver for Linux is distributed in
+ * the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ * License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with The Inventra Controller Driver for Linux ; if not,
+ * write to the Free Software Foundation, Inc., 59 Temple Place,
+ * Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+#include "omap2430.h"
+
+#ifdef CONFIG_ARCH_OMAP3430
+#define get_cpu_rev() 2
+#endif
+
+#define MUSB_TIMEOUT_A_WAIT_BCON 1100
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+ u8 power;
+ u8 devctl;
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE) {
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ } else {
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ MUSB_HST_MODE(musb);
+ }
+ break;
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_SUSPEND:
+ /* finish RESUME signaling? */
+ if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
+ power = musb_readb(musb->mregs, MUSB_POWER);
+ power &= ~MUSB_POWER_RESUME;
+ DBG(1, "root port resume stopped, power %02x\n", power);
+ musb_writeb(musb->mregs, MUSB_POWER, power);
+ musb->is_active = 1;
+ musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
+ | MUSB_PORT_STAT_RESUME);
+ musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
+ usb_hcd_poll_rh_status(musb_to_hcd(musb));
+ /* NOTE: it might really be A_WAIT_BCON ... */
+ musb->xceiv.state = OTG_STATE_A_HOST;
+ }
+ break;
+#endif
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ else
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+#endif
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+ unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = default_timeout;
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ del_timer(&musb_idle_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&musb_idle_timer))
+ last_timer = timeout;
+ else {
+ DBG(4, "Longer idle timer already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ DBG(4, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb_idle_timer, timeout);
+}
+
+void musb_platform_enable(struct musb *musb)
+{
+}
+void musb_platform_disable(struct musb *musb)
+{
+}
+static void omap_vbus_power(struct musb *musb, int is_on, int sleeping)
+{
+}
+
+static void omap_set_vbus(struct musb *musb, int is_on)
+{
+ u8 devctl;
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ musb->is_active = 1;
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+
+ /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
+ * jumping right to B_IDLE...
+ */
+
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ devctl &= ~MUSB_DEVCTL_SESSION;
+
+ MUSB_DEV_MODE(musb);
+ }
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ DBG(1, "VBUS %s, devctl %02x "
+ /* otg %3x conf %08x prcm %08x */ "\n",
+ otg_state_string(musb),
+ musb_readb(musb->mregs, MUSB_DEVCTL));
+}
+static int omap_set_power(struct otg_transceiver *x, unsigned mA)
+{
+ return 0;
+}
+
+static int musb_platform_resume(struct musb *musb);
+
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+ u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ switch (musb_mode) {
+ case MUSB_HOST:
+ otg_set_host(&musb->xceiv, musb->xceiv.host);
+ break;
+ case MUSB_PERIPHERAL:
+ otg_set_peripheral(&musb->xceiv, musb->xceiv.gadget);
+ break;
+ case MUSB_OTG:
+ break;
+ }
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ u32 l;
+
+#if defined(CONFIG_ARCH_OMAP2430)
+ omap_cfg_reg(AE5_2430_USB0HS_STP);
+#endif
+
+ musb_platform_resume(musb);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l &= ~ENABLEWAKEUP; /* disable wakeup */
+ l &= ~NOSTDBY; /* remove possible nostdby */
+ l |= SMARTSTDBY; /* enable smart standby */
+ l &= ~AUTOIDLE; /* disable auto idle */
+ l &= ~NOIDLE; /* remove possible noidle */
+ l |= SMARTIDLE; /* enable smart idle */
+ l |= AUTOIDLE; /* enable auto idle */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ l = omap_readl(OTG_INTERFSEL);
+ l |= ULPI_12PIN;
+ omap_writel(l, OTG_INTERFSEL);
+
+ pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
+ "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
+ omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
+ omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
+ omap_readl(OTG_SIMENABLE));
+
+ omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
+
+ if (is_host_enabled(musb))
+ musb->board_set_vbus = omap_set_vbus;
+ if (is_peripheral_enabled(musb))
+ musb->xceiv.set_power = omap_set_power;
+ musb->a_wait_bcon = MUSB_TIMEOUT_A_WAIT_BCON;
+
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+ return 0;
+}
+
+int musb_platform_suspend(struct musb *musb)
+{
+ u32 l;
+
+ if (!musb->clock)
+ return 0;
+
+ /* in any role */
+ l = omap_readl(OTG_FORCESTDBY);
+ l |= ENABLEFORCE; /* enable MSTANDBY */
+ omap_writel(l, OTG_FORCESTDBY);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l |= ENABLEWAKEUP; /* enable wakeup */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ if (musb->xceiv.set_suspend)
+ musb->xceiv.set_suspend(&musb->xceiv, 1);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ else
+ clk_disable(musb->clock);
+
+ return 0;
+}
+
+static int musb_platform_resume(struct musb *musb)
+{
+ u32 l;
+
+ if (!musb->clock)
+ return 0;
+
+ if (musb->xceiv.set_suspend)
+ musb->xceiv.set_suspend(&musb->xceiv, 0);
+
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ else
+ clk_enable(musb->clock);
+
+ l = omap_readl(OTG_SYSCONFIG);
+ l &= ~ENABLEWAKEUP; /* disable wakeup */
+ omap_writel(l, OTG_SYSCONFIG);
+
+ l = omap_readl(OTG_FORCESTDBY);
+ l &= ~ENABLEFORCE; /* disable MSTANDBY */
+ omap_writel(l, OTG_FORCESTDBY);
+
+ return 0;
+}
+
+
+int musb_platform_exit(struct musb *musb)
+{
+
+ omap_vbus_power(musb, 0 /*off*/, 1);
+
+ musb_platform_suspend(musb);
+
+ clk_put(musb->clock);
+ musb->clock = 0;
+
+ return 0;
+}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
new file mode 100644
index 000000000000..786a62071f72
--- /dev/null
+++ b/drivers/usb/musb/omap2430.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2005-2006 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_OMAP243X_H__
+#define __MUSB_OMAP243X_H__
+
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
+#include <asm/arch/hardware.h>
+#include <asm/arch/usb.h>
+
+/*
+ * OMAP2430-specific definitions
+ */
+
+#define MENTOR_BASE_OFFSET 0
+#if defined(CONFIG_ARCH_OMAP2430)
+#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
+#elif defined(CONFIG_ARCH_OMAP3430)
+#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
+#endif
+#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
+#define OTG_REVISION OMAP_HSOTG(0x0)
+#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
+# define MIDLEMODE 12 /* bit position */
+# define FORCESTDBY (0 << MIDLEMODE)
+# define NOSTDBY (1 << MIDLEMODE)
+# define SMARTSTDBY (2 << MIDLEMODE)
+# define SIDLEMODE 3 /* bit position */
+# define FORCEIDLE (0 << SIDLEMODE)
+# define NOIDLE (1 << SIDLEMODE)
+# define SMARTIDLE (2 << SIDLEMODE)
+# define ENABLEWAKEUP (1 << 2)
+# define SOFTRST (1 << 1)
+# define AUTOIDLE (1 << 0)
+#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
+# define RESETDONE (1 << 0)
+#define OTG_INTERFSEL OMAP_HSOTG(0xc)
+# define EXTCP (1 << 2)
+# define PHYSEL 0 /* bit position */
+# define UTMI_8BIT (0 << PHYSEL)
+# define ULPI_12PIN (1 << PHYSEL)
+# define ULPI_8PIN (2 << PHYSEL)
+#define OTG_SIMENABLE OMAP_HSOTG(0x10)
+# define TM1 (1 << 0)
+#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
+# define ENABLEFORCE (1 << 0)
+
+#endif /* CONFIG_ARCH_OMAP2430 */
+
+#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
new file mode 100644
index 000000000000..b73b036f3d77
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.c
@@ -0,0 +1,1151 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Notes:
+ * - Driver assumes that interface to external host (main CPU) is
+ * configured for NOR FLASH interface instead of VLYNQ serial
+ * interface.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+
+#include "musb_core.h"
+
+static void tusb_source_power(struct musb *musb, int is_on);
+
+#define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
+#define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
+
+/*
+ * Checks the revision. We need to use the DMA register as 3.0 does not
+ * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
+ */
+u8 tusb_get_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 die_id;
+ u8 rev;
+
+ rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
+ if (TUSB_REV_MAJOR(rev) == 3) {
+ die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
+ TUSB_DIDR1_HI));
+ if (die_id >= TUSB_DIDR1_HI_REV_31)
+ rev |= 1;
+ }
+
+ return rev;
+}
+
+static int __init tusb_print_revision(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u8 rev;
+
+ rev = tusb_get_revision(musb);
+
+ pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
+ "prcm",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
+ "int",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
+ "gpio",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
+ "dma",
+ TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
+ "dieid",
+ TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
+ "rev",
+ TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
+
+ return tusb_get_revision(musb);
+}
+
+#define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
+ | TUSB_PHY_OTG_CTRL_TESTM0)
+
+/*
+ * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
+ * Disables power detection in PHY for the duration of idle.
+ */
+static void tusb_wbus_quirk(struct musb *musb, int enabled)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ static u32 phy_otg_ctrl, phy_otg_ena;
+ u32 tmp;
+
+ if (enabled) {
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
+ | phy_otg_ena | WBUS_QUIRK_MASK;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
+ tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ DBG(2, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
+ & TUSB_PHY_OTG_CTRL_TESTM2) {
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
+ tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
+ DBG(2, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL),
+ musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
+ phy_otg_ctrl = 0;
+ phy_otg_ena = 0;
+ }
+}
+
+/*
+ * TUSB 6010 may use a parallel bus that doesn't support byte ops;
+ * so both loading and unloading FIFOs need explicit byte counts.
+ */
+
+static inline void
+tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ memcpy(&val, buf, 4);
+ musb_writel(fifo, 0, val);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Write the rest 1 - 3 bytes to FIFO */
+ memcpy(&val, buf, len);
+ musb_writel(fifo, 0, val);
+ }
+}
+
+static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
+ void __iomem *buf, u16 len)
+{
+ u32 val;
+ int i;
+
+ if (len > 4) {
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, 4);
+ buf += 4;
+ }
+ len %= 4;
+ }
+ if (len > 0) {
+ /* Read the rest 1 - 3 bytes from FIFO */
+ val = musb_readl(fifo, 0);
+ memcpy(buf, &val, len);
+ }
+}
+
+void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ prefetch(buf);
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'T', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
+ TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ writesl(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use writesw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = (u32)(*(u16 *)buf);
+ buf += 2;
+ val |= (*(u16 *)buf) << 16;
+ buf += 2;
+ musb_writel(fifo, 0, val);
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_write_unaligned(fifo, buf, len);
+}
+
+void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
+{
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *fifo = hw_ep->fifo;
+ u8 epnum = hw_ep->epnum;
+
+ DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
+ 'R', epnum, fifo, len, buf);
+
+ if (epnum)
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(len));
+ else
+ musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
+
+ if (likely((0x01 & (unsigned long) buf) == 0)) {
+
+ /* Best case is 32bit-aligned destination address */
+ if ((0x02 & (unsigned long) buf) == 0) {
+ if (len >= 4) {
+ readsl(fifo, buf, len >> 2);
+ buf += (len & ~0x03);
+ len &= 0x03;
+ }
+ } else {
+ if (len >= 2) {
+ u32 val;
+ int i;
+
+ /* Cannot use readsw, fifo is 32-bit */
+ for (i = 0; i < (len >> 2); i++) {
+ val = musb_readl(fifo, 0);
+ *(u16 *)buf = (u16)(val & 0xffff);
+ buf += 2;
+ *(u16 *)buf = (u16)(val >> 16);
+ buf += 2;
+ }
+ len &= 0x03;
+ }
+ }
+ }
+
+ if (len > 0)
+ tusb_fifo_read_unaligned(fifo, buf, len);
+}
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+
+/* This is used by gadget drivers, and OTG transceiver logic, allowing
+ * at most mA current to be drawn from VBUS during a Default-B session
+ * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
+ * mode), or low power Default-B sessions, something else supplies power.
+ * Caller must take care of locking.
+ */
+static int tusb_draw_power(struct otg_transceiver *x, unsigned mA)
+{
+ struct musb *musb = container_of(x, struct musb, xceiv);
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ /*
+ * Keep clock active when enabled. Note that this is not tied to
+ * drawing VBUS, as with OTG mA can be less than musb->min_power.
+ */
+ if (musb->set_clock) {
+ if (mA)
+ musb->set_clock(musb->clock, 1);
+ else
+ musb->set_clock(musb->clock, 0);
+ }
+
+ /* tps65030 seems to consume max 100mA, with maybe 60mA available
+ * (measured on one board) for things other than tps and tusb.
+ *
+ * Boards sharing the CPU clock with CLKIN will need to prevent
+ * certain idle sleep states while the USB link is active.
+ *
+ * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
+ * The actual current usage would be very board-specific. For now,
+ * it's simpler to just use an aggregate (also board-specific).
+ */
+ if (x->default_a || mA < (musb->min_power << 1))
+ mA = 0;
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ if (mA) {
+ musb->is_bus_powered = 1;
+ reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
+ } else {
+ musb->is_bus_powered = 0;
+ reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+ }
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ DBG(2, "draw max %d mA VBUS\n", mA);
+ return 0;
+}
+
+#else
+#define tusb_draw_power NULL
+#endif
+
+/* workaround for issue 13: change clock during chip idle
+ * (to be fixed in rev3 silicon) ... symptoms include disconnect
+ * or looping suspend/resume cycles
+ */
+static void tusb_set_clock_source(struct musb *musb, unsigned mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ reg = musb_readl(tbase, TUSB_PRCM_CONF);
+ reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
+
+ /* 0 = refclk (clkin, XI)
+ * 1 = PHY 60 MHz (internal PLL)
+ * 2 = not supported
+ * 3 = what?
+ */
+ if (mode > 0)
+ reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
+
+ musb_writel(tbase, TUSB_PRCM_CONF, reg);
+
+ /* FIXME tusb6010_platform_retime(mode == 0); */
+}
+
+/*
+ * Idle TUSB6010 until next wake-up event; NOR access always wakes.
+ * Other code ensures that we idle unless we're connected _and_ the
+ * USB link is not suspended ... and tells us the relevant wakeup
+ * events. SW_EN for voltage is handled separately.
+ */
+void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ if ((wakeup_enables & TUSB_PRCM_WBUS)
+ && (tusb_get_revision(musb) == TUSB_REV_30))
+ tusb_wbus_quirk(musb, 1);
+
+ tusb_set_clock_source(musb, 0);
+
+ wakeup_enables |= TUSB_PRCM_WNORCS;
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
+
+ /* REVISIT writeup of WID implies that if WID set and ID is grounded,
+ * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
+ * Presumably that's mostly to save power, hence WID is immaterial ...
+ */
+
+ reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
+ if (is_host_active(musb)) {
+ reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ } else {
+ reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
+ reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ }
+ reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
+
+ DBG(6, "idle, wake on %02x\n", wakeup_enables);
+}
+
+/*
+ * Updates cable VBUS status. Caller must take care of locking.
+ */
+int musb_platform_get_vbus_status(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, prcm_mngmt;
+ int ret = 0;
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
+
+ /* Temporarily enable VBUS detection if it was disabled for
+ * suspend mode. Unless it's enabled otg_stat and devctl will
+ * not show correct VBUS state.
+ */
+ if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
+ u32 tmp = prcm_mngmt;
+ tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
+ musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
+ }
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
+ ret = 1;
+
+ return ret;
+}
+
+static struct timer_list musb_idle_timer;
+
+static void musb_do_idle(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_BCON:
+ if ((musb->a_wait_bcon != 0)
+ && (musb->idle_timeout == 0
+ || time_after(jiffies, musb->idle_timeout))) {
+ DBG(4, "Nothing connected %s, turning off VBUS\n",
+ otg_state_string(musb));
+ }
+ /* FALLTHROUGH */
+ case OTG_STATE_A_IDLE:
+ tusb_source_power(musb, 0);
+ default:
+ break;
+ }
+
+ if (!musb->is_active) {
+ u32 wakeups;
+
+ /* wait until khubd handles port change status */
+ if (is_host_active(musb) && (musb->port1_status >> 16))
+ goto done;
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ if (is_peripheral_enabled(musb) && !musb->gadget_driver)
+ wakeups = 0;
+ else {
+ wakeups = TUSB_PRCM_WHOSTDISCON
+ | TUSB_PRCM_WBUS
+ | TUSB_PRCM_WVBUS;
+ if (is_otg_enabled(musb))
+ wakeups |= TUSB_PRCM_WID;
+ }
+#else
+ wakeups = TUSB_PRCM_WHOSTDISCON | TUSB_PRCM_WBUS;
+#endif
+ tusb_allow_idle(musb, wakeups);
+ }
+done:
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+/*
+ * Maybe put TUSB6010 into idle mode mode depending on USB link status,
+ * like "disconnected" or "suspended". We'll be woken out of it by
+ * connect, resume, or disconnect.
+ *
+ * Needs to be called as the last function everywhere where there is
+ * register access to TUSB6010 because of NOR flash wake-up.
+ * Caller should own controller spinlock.
+ *
+ * Delay because peripheral enables D+ pullup 3msec after SE0, and
+ * we don't want to treat that full speed J as a wakeup event.
+ * ... peripherals must draw only suspend current after 10 msec.
+ */
+void musb_platform_try_idle(struct musb *musb, unsigned long timeout)
+{
+ unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
+ static unsigned long last_timer;
+
+ if (timeout == 0)
+ timeout = default_timeout;
+
+ /* Never idle if active, or when VBUS timeout is not set as host */
+ if (musb->is_active || ((musb->a_wait_bcon == 0)
+ && (musb->xceiv.state == OTG_STATE_A_WAIT_BCON))) {
+ DBG(4, "%s active, deleting timer\n", otg_state_string(musb));
+ del_timer(&musb_idle_timer);
+ last_timer = jiffies;
+ return;
+ }
+
+ if (time_after(last_timer, timeout)) {
+ if (!timer_pending(&musb_idle_timer))
+ last_timer = timeout;
+ else {
+ DBG(4, "Longer idle timer already pending, ignoring\n");
+ return;
+ }
+ }
+ last_timer = timeout;
+
+ DBG(4, "%s inactive, for idle timer for %lu ms\n",
+ otg_state_string(musb),
+ (unsigned long)jiffies_to_msecs(timeout - jiffies));
+ mod_timer(&musb_idle_timer, timeout);
+}
+
+/* ticks of 60 MHz clock */
+#define DEVCLOCK 60000000
+#define OTG_TIMER_MS(msecs) ((msecs) \
+ ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
+ | TUSB_DEV_OTG_TIMER_ENABLE) \
+ : 0)
+
+static void tusb_source_power(struct musb *musb, int is_on)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 conf, prcm, timer;
+ u8 devctl;
+
+ /* HDRC controls CPEN, but beware current surges during device
+ * connect. They can trigger transient overcurrent conditions
+ * that must be ignored.
+ */
+
+ prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
+ conf = musb_readl(tbase, TUSB_DEV_CONF);
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+
+ if (is_on) {
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 1);
+ timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
+ musb->xceiv.default_a = 1;
+ musb->xceiv.state = OTG_STATE_A_WAIT_VRISE;
+ devctl |= MUSB_DEVCTL_SESSION;
+
+ conf |= TUSB_DEV_CONF_USB_HOST_MODE;
+ MUSB_HST_MODE(musb);
+ } else {
+ u32 otg_stat;
+
+ timer = 0;
+
+ /* If ID pin is grounded, we want to be a_idle */
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ case OTG_STATE_A_WAIT_BCON:
+ musb->xceiv.state = OTG_STATE_A_WAIT_VFALL;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ break;
+ default:
+ musb->xceiv.state = OTG_STATE_A_IDLE;
+ }
+ musb->is_active = 0;
+ musb->xceiv.default_a = 1;
+ MUSB_HST_MODE(musb);
+ } else {
+ musb->is_active = 0;
+ musb->xceiv.default_a = 0;
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ MUSB_DEV_MODE(musb);
+ }
+
+ devctl &= ~MUSB_DEVCTL_SESSION;
+ conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
+ if (musb->set_clock)
+ musb->set_clock(musb->clock, 0);
+ }
+ prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
+
+ musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
+ musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
+ musb_writel(tbase, TUSB_DEV_CONF, conf);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+
+ DBG(1, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
+ otg_state_string(musb),
+ musb_readb(musb->mregs, MUSB_DEVCTL),
+ musb_readl(tbase, TUSB_DEV_OTG_STAT),
+ conf, prcm);
+}
+
+/*
+ * Sets the mode to OTG, peripheral or host by changing the ID detection.
+ * Caller must take care of locking.
+ *
+ * Note that if a mini-A cable is plugged in the ID line will stay down as
+ * the weak ID pull-up is not able to pull the ID up.
+ *
+ * REVISIT: It would be possible to add support for changing between host
+ * and peripheral modes in non-OTG configurations by reconfiguring hardware
+ * and then setting musb->board_mode. For now, only support OTG mode.
+ */
+void musb_platform_set_mode(struct musb *musb, u8 musb_mode)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
+
+ if (musb->board_mode != MUSB_OTG) {
+ ERR("Changing mode currently only supported in OTG mode\n");
+ return;
+ }
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
+
+ switch (musb_mode) {
+
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case MUSB_HOST: /* Disable PHY ID detect, ground ID */
+ phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= TUSB_DEV_CONF_ID_SEL;
+ dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
+ break;
+#endif
+
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+#endif
+
+#ifdef CONFIG_USB_MUSB_OTG
+ case MUSB_OTG: /* Use PHY ID detection */
+ phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
+ break;
+#endif
+
+ default:
+ DBG(2, "Trying to set unknown mode %i\n", musb_mode);
+ }
+
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
+ TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
+ musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
+
+ otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ if ((musb_mode == MUSB_PERIPHERAL) &&
+ !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
+ INFO("Cannot be peripheral with mini-A cable "
+ "otg_stat: %08x\n", otg_stat);
+}
+
+static inline unsigned long
+tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
+{
+ u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
+ unsigned long idle_timeout = 0;
+
+ /* ID pin */
+ if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
+ int default_a;
+
+ if (is_otg_enabled(musb))
+ default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
+ else
+ default_a = is_host_enabled(musb);
+ DBG(2, "Default-%c\n", default_a ? 'A' : 'B');
+ musb->xceiv.default_a = default_a;
+ tusb_source_power(musb, default_a);
+
+ /* Don't allow idling immediately */
+ if (default_a)
+ idle_timeout = jiffies + (HZ * 3);
+ }
+
+ /* VBUS state change */
+ if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
+
+ /* B-dev state machine: no vbus ~= disconnect */
+ if ((is_otg_enabled(musb) && !musb->xceiv.default_a)
+ || !is_host_enabled(musb)) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ /* ? musb_root_disconnect(musb); */
+ musb->port1_status &=
+ ~(USB_PORT_STAT_CONNECTION
+ | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_LOW_SPEED
+ | USB_PORT_STAT_HIGH_SPEED
+ | USB_PORT_STAT_TEST
+ );
+#endif
+
+ if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
+ DBG(1, "Forcing disconnect (no interrupt)\n");
+ if (musb->xceiv.state != OTG_STATE_B_IDLE) {
+ /* INTR_DISCONNECT can hide... */
+ musb->xceiv.state = OTG_STATE_B_IDLE;
+ musb->int_usb |= MUSB_INTR_DISCONNECT;
+ }
+ musb->is_active = 0;
+ }
+ DBG(2, "vbus change, %s, otg %03x\n",
+ otg_state_string(musb), otg_stat);
+ idle_timeout = jiffies + (1 * HZ);
+ schedule_work(&musb->irq_work);
+
+ } else /* A-dev state machine */ {
+ DBG(2, "vbus change, %s, otg %03x\n",
+ otg_state_string(musb), otg_stat);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_IDLE:
+ DBG(2, "Got SRP, turning on VBUS\n");
+ musb_set_vbus(musb, 1);
+
+ /* CONNECT can wake if a_wait_bcon is set */
+ if (musb->a_wait_bcon != 0)
+ musb->is_active = 0;
+ else
+ musb->is_active = 1;
+
+ /*
+ * OPT FS A TD.4.6 needs few seconds for
+ * A_WAIT_VRISE
+ */
+ idle_timeout = jiffies + (2 * HZ);
+
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ /* ignore; A-session-valid < VBUS_VALID/2,
+ * we monitor this with the timer
+ */
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /* REVISIT this irq triggers during short
+ * spikes caused by enumeration ...
+ */
+ if (musb->vbuserr_retry) {
+ musb->vbuserr_retry--;
+ tusb_source_power(musb, 1);
+ } else {
+ musb->vbuserr_retry
+ = VBUSERR_RETRY_COUNT;
+ tusb_source_power(musb, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ /* OTG timer expiration */
+ if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
+ u8 devctl;
+
+ DBG(4, "%s timer, %03x\n", otg_state_string(musb), otg_stat);
+
+ switch (musb->xceiv.state) {
+ case OTG_STATE_A_WAIT_VRISE:
+ /* VBUS has probably been valid for a while now,
+ * but may well have bounced out of range a bit
+ */
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
+ if ((devctl & MUSB_DEVCTL_VBUS)
+ != MUSB_DEVCTL_VBUS) {
+ DBG(2, "devctl %02x\n", devctl);
+ break;
+ }
+ musb->xceiv.state = OTG_STATE_A_WAIT_BCON;
+ musb->is_active = 0;
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ } else {
+ /* REVISIT report overcurrent to hub? */
+ ERR("vbus too slow, devctl %02x\n", devctl);
+ tusb_source_power(musb, 0);
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ idle_timeout = jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon);
+ break;
+ case OTG_STATE_A_SUSPEND:
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ break;
+ default:
+ break;
+ }
+ }
+ schedule_work(&musb->irq_work);
+
+ return idle_timeout;
+}
+
+static irqreturn_t tusb_interrupt(int irq, void *__hci)
+{
+ struct musb *musb = __hci;
+ void __iomem *tbase = musb->ctrl_base;
+ unsigned long flags, idle_timeout = 0;
+ u32 int_mask, int_src;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ /* Mask all interrupts to allow using both edge and level GPIO irq */
+ int_mask = musb_readl(tbase, TUSB_INT_MASK);
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
+ DBG(3, "TUSB IRQ %08x\n", int_src);
+
+ musb->int_usb = (u8) int_src;
+
+ /* Acknowledge wake-up source interrupts */
+ if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
+ u32 reg;
+ u32 i;
+
+ if (tusb_get_revision(musb) == TUSB_REV_30)
+ tusb_wbus_quirk(musb, 0);
+
+ /* there are issues re-locking the PLL on wakeup ... */
+
+ /* work around issue 8 */
+ for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
+ musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
+ musb_writel(tbase, TUSB_SCRATCH_PAD, i);
+ reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
+ if (reg == i)
+ break;
+ DBG(6, "TUSB NOR not ready\n");
+ }
+
+ /* work around issue 13 (2nd half) */
+ tusb_set_clock_source(musb, 1);
+
+ reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
+ musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
+ if (reg & ~TUSB_PRCM_WNORCS) {
+ musb->is_active = 1;
+ schedule_work(&musb->irq_work);
+ }
+ DBG(3, "wake %sactive %02x\n",
+ musb->is_active ? "" : "in", reg);
+
+ /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
+ }
+
+ if (int_src & TUSB_INT_SRC_USB_IP_CONN)
+ del_timer(&musb_idle_timer);
+
+ /* OTG state change reports (annoyingly) not issued by Mentor core */
+ if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
+ | TUSB_INT_SRC_OTG_TIMEOUT
+ | TUSB_INT_SRC_ID_STATUS_CHNG))
+ idle_timeout = tusb_otg_ints(musb, int_src, tbase);
+
+ /* TX dma callback must be handled here, RX dma callback is
+ * handled in tusb_omap_dma_cb.
+ */
+ if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
+ u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
+ u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
+
+ DBG(3, "DMA IRQ %08x\n", dma_src);
+ real_dma_src = ~real_dma_src & dma_src;
+ if (tusb_dma_omap() && real_dma_src) {
+ int tx_source = (real_dma_src & 0xffff);
+ int i;
+
+ for (i = 1; i <= 15; i++) {
+ if (tx_source & (1 << i)) {
+ DBG(3, "completing ep%i %s\n", i, "tx");
+ musb_dma_completion(musb, i, 1);
+ }
+ }
+ }
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
+ }
+
+ /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
+ u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
+
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
+ musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
+ musb->int_tx = (musb_src & 0xffff);
+ } else {
+ musb->int_rx = 0;
+ musb->int_tx = 0;
+ }
+
+ if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
+ musb_interrupt(musb);
+
+ /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR,
+ int_src & ~TUSB_INT_MASK_RESERVED_BITS);
+
+ musb_platform_try_idle(musb, idle_timeout);
+
+ musb_writel(tbase, TUSB_INT_MASK, int_mask);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int dma_off;
+
+/*
+ * Enables TUSB6010. Caller must take care of locking.
+ * REVISIT:
+ * - Check what is unnecessary in MGC_HdrcStart()
+ */
+void musb_platform_enable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
+ * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
+ musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
+
+ /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ /* Clear all subsystem interrups */
+ musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
+
+ /* Acknowledge pending interrupt(s) */
+ musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
+
+ /* Only 0 clock cycles for minimum interrupt de-assertion time and
+ * interrupt polarity active low seems to work reliably here */
+ musb_writel(tbase, TUSB_INT_CTRL_CONF,
+ TUSB_INT_CTRL_CONF_INT_RELCYC(0));
+
+ set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
+
+ /* maybe force into the Default-A OTG state machine */
+ if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
+ & TUSB_DEV_OTG_STAT_ID_STATUS))
+ musb_writel(tbase, TUSB_INT_SRC_SET,
+ TUSB_INT_SRC_ID_STATUS_CHNG);
+
+ if (is_dma_capable() && dma_off)
+ printk(KERN_WARNING "%s %s: dma not reactivated\n",
+ __FILE__, __func__);
+ else
+ dma_off = 1;
+}
+
+/*
+ * Disables TUSB6010. Caller must take care of locking.
+ */
+void musb_platform_disable(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /* FIXME stop DMA, IRQs, timers, ... */
+
+ /* disable all IRQs */
+ musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
+ musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
+
+ del_timer(&musb_idle_timer);
+
+ if (is_dma_capable() && !dma_off) {
+ printk(KERN_WARNING "%s %s: dma still active\n",
+ __FILE__, __func__);
+ dma_off = 1;
+ }
+}
+
+/*
+ * Sets up TUSB6010 CPU interface specific signals and registers
+ * Note: Settings optimized for OMAP24xx
+ */
+static void __init tusb_setup_cpu_interface(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+
+ /*
+ * Disable GPIO[5:0] pullups (used as output DMA requests)
+ * Don't disable GPIO[7:6] as they are needed for wake-up.
+ */
+ musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
+
+ /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
+ musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
+
+ /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
+ musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
+
+ /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
+ * de-assertion time 2 system clocks p 62 */
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
+ TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ /* Set 0 wait count for synchronous burst access */
+ musb_writel(tbase, TUSB_WAIT_COUNT, 1);
+}
+
+static int __init tusb_start(struct musb *musb)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ int ret = 0;
+ unsigned long flags;
+ u32 reg;
+
+ if (musb->board_set_power)
+ ret = musb->board_set_power(1);
+ if (ret != 0) {
+ printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
+ return ret;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
+ TUSB_PROD_TEST_RESET_VAL) {
+ printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
+ goto err;
+ }
+
+ ret = tusb_print_revision(musb);
+ if (ret < 2) {
+ printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
+ ret);
+ goto err;
+ }
+
+ /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
+ * NOR FLASH interface is used */
+ musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
+
+ /* Select PHY free running 60MHz as a system clock */
+ tusb_set_clock_source(musb, 1);
+
+ /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
+ * power saving, enable VBus detect and session end comparators,
+ * enable IDpullup, enable VBus charging */
+ musb_writel(tbase, TUSB_PRCM_MNGMT,
+ TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
+ TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
+ TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
+ TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
+ TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
+ tusb_setup_cpu_interface(musb);
+
+ /* simplify: always sense/pullup ID pins, as if in OTG mode */
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
+
+ reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
+ reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
+ musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&musb->lock, flags);
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ return -ENODEV;
+}
+
+int __init musb_platform_init(struct musb *musb)
+{
+ struct platform_device *pdev;
+ struct resource *mem;
+ void __iomem *sync;
+ int ret;
+
+ pdev = to_platform_device(musb->controller);
+
+ /* dma address for async dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ musb->async = mem->start;
+
+ /* dma address for sync dma */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem) {
+ pr_debug("no sync dma resource?\n");
+ return -ENODEV;
+ }
+ musb->sync = mem->start;
+
+ sync = ioremap(mem->start, mem->end - mem->start + 1);
+ if (!sync) {
+ pr_debug("ioremap for sync failed\n");
+ return -ENOMEM;
+ }
+ musb->sync_va = sync;
+
+ /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
+ * FIFOs at 0x600, TUSB at 0x800
+ */
+ musb->mregs += TUSB_BASE_OFFSET;
+
+ ret = tusb_start(musb);
+ if (ret) {
+ printk(KERN_ERR "Could not start tusb6010 (%d)\n",
+ ret);
+ return -ENODEV;
+ }
+ musb->isr = tusb_interrupt;
+
+ if (is_host_enabled(musb))
+ musb->board_set_vbus = tusb_source_power;
+ if (is_peripheral_enabled(musb))
+ musb->xceiv.set_power = tusb_draw_power;
+
+ setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+
+ return ret;
+}
+
+int musb_platform_exit(struct musb *musb)
+{
+ del_timer_sync(&musb_idle_timer);
+
+ if (musb->board_set_power)
+ musb->board_set_power(0);
+
+ iounmap(musb->sync_va);
+
+ return 0;
+}
diff --git a/drivers/usb/musb/tusb6010.h b/drivers/usb/musb/tusb6010.h
new file mode 100644
index 000000000000..ab8c96286ce6
--- /dev/null
+++ b/drivers/usb/musb/tusb6010.h
@@ -0,0 +1,233 @@
+/*
+ * Definitions for TUSB6010 USB 2.0 OTG Dual Role controller
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Jarkko Nikula <jarkko.nikula@nokia.com>
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __TUSB6010_H__
+#define __TUSB6010_H__
+
+extern u8 tusb_get_revision(struct musb *musb);
+
+#ifdef CONFIG_USB_TUSB6010
+#define musb_in_tusb() 1
+#else
+#define musb_in_tusb() 0
+#endif
+
+#ifdef CONFIG_USB_TUSB_OMAP_DMA
+#define tusb_dma_omap() 1
+#else
+#define tusb_dma_omap() 0
+#endif
+
+/* VLYNQ control register. 32-bit at offset 0x000 */
+#define TUSB_VLYNQ_CTRL 0x004
+
+/* Mentor Graphics OTG core registers. 8,- 16- and 32-bit at offset 0x400 */
+#define TUSB_BASE_OFFSET 0x400
+
+/* FIFO registers 32-bit at offset 0x600 */
+#define TUSB_FIFO_BASE 0x600
+
+/* Device System & Control registers. 32-bit at offset 0x800 */
+#define TUSB_SYS_REG_BASE 0x800
+
+#define TUSB_DEV_CONF (TUSB_SYS_REG_BASE + 0x000)
+#define TUSB_DEV_CONF_USB_HOST_MODE (1 << 16)
+#define TUSB_DEV_CONF_PROD_TEST_MODE (1 << 15)
+#define TUSB_DEV_CONF_SOFT_ID (1 << 1)
+#define TUSB_DEV_CONF_ID_SEL (1 << 0)
+
+#define TUSB_PHY_OTG_CTRL_ENABLE (TUSB_SYS_REG_BASE + 0x004)
+#define TUSB_PHY_OTG_CTRL (TUSB_SYS_REG_BASE + 0x008)
+#define TUSB_PHY_OTG_CTRL_WRPROTECT (0xa5 << 24)
+#define TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP (1 << 23)
+#define TUSB_PHY_OTG_CTRL_OTG_VBUS_DET_EN (1 << 19)
+#define TUSB_PHY_OTG_CTRL_OTG_SESS_END_EN (1 << 18)
+#define TUSB_PHY_OTG_CTRL_TESTM2 (1 << 17)
+#define TUSB_PHY_OTG_CTRL_TESTM1 (1 << 16)
+#define TUSB_PHY_OTG_CTRL_TESTM0 (1 << 15)
+#define TUSB_PHY_OTG_CTRL_TX_DATA2 (1 << 14)
+#define TUSB_PHY_OTG_CTRL_TX_GZ2 (1 << 13)
+#define TUSB_PHY_OTG_CTRL_TX_ENABLE2 (1 << 12)
+#define TUSB_PHY_OTG_CTRL_DM_PULLDOWN (1 << 11)
+#define TUSB_PHY_OTG_CTRL_DP_PULLDOWN (1 << 10)
+#define TUSB_PHY_OTG_CTRL_OSC_EN (1 << 9)
+#define TUSB_PHY_OTG_CTRL_PHYREF_CLKSEL(v) (((v) & 3) << 7)
+#define TUSB_PHY_OTG_CTRL_PD (1 << 6)
+#define TUSB_PHY_OTG_CTRL_PLL_ON (1 << 5)
+#define TUSB_PHY_OTG_CTRL_EXT_RPU (1 << 4)
+#define TUSB_PHY_OTG_CTRL_PWR_GOOD (1 << 3)
+#define TUSB_PHY_OTG_CTRL_RESET (1 << 2)
+#define TUSB_PHY_OTG_CTRL_SUSPENDM (1 << 1)
+#define TUSB_PHY_OTG_CTRL_CLK_MODE (1 << 0)
+
+/*OTG status register */
+#define TUSB_DEV_OTG_STAT (TUSB_SYS_REG_BASE + 0x00c)
+#define TUSB_DEV_OTG_STAT_PWR_CLK_GOOD (1 << 8)
+#define TUSB_DEV_OTG_STAT_SESS_END (1 << 7)
+#define TUSB_DEV_OTG_STAT_SESS_VALID (1 << 6)
+#define TUSB_DEV_OTG_STAT_VBUS_VALID (1 << 5)
+#define TUSB_DEV_OTG_STAT_VBUS_SENSE (1 << 4)
+#define TUSB_DEV_OTG_STAT_ID_STATUS (1 << 3)
+#define TUSB_DEV_OTG_STAT_HOST_DISCON (1 << 2)
+#define TUSB_DEV_OTG_STAT_LINE_STATE (3 << 0)
+#define TUSB_DEV_OTG_STAT_DP_ENABLE (1 << 1)
+#define TUSB_DEV_OTG_STAT_DM_ENABLE (1 << 0)
+
+#define TUSB_DEV_OTG_TIMER (TUSB_SYS_REG_BASE + 0x010)
+# define TUSB_DEV_OTG_TIMER_ENABLE (1 << 31)
+# define TUSB_DEV_OTG_TIMER_VAL(v) ((v) & 0x07ffffff)
+#define TUSB_PRCM_REV (TUSB_SYS_REG_BASE + 0x014)
+
+/* PRCM configuration register */
+#define TUSB_PRCM_CONF (TUSB_SYS_REG_BASE + 0x018)
+#define TUSB_PRCM_CONF_SFW_CPEN (1 << 24)
+#define TUSB_PRCM_CONF_SYS_CLKSEL(v) (((v) & 3) << 16)
+
+/* PRCM management register */
+#define TUSB_PRCM_MNGMT (TUSB_SYS_REG_BASE + 0x01c)
+#define TUSB_PRCM_MNGMT_SRP_FIX_TIMER(v) (((v) & 0xf) << 25)
+#define TUSB_PRCM_MNGMT_SRP_FIX_EN (1 << 24)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(v) (((v) & 0xf) << 20)
+#define TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN (1 << 19)
+#define TUSB_PRCM_MNGMT_DFT_CLK_DIS (1 << 18)
+#define TUSB_PRCM_MNGMT_VLYNQ_CLK_DIS (1 << 17)
+#define TUSB_PRCM_MNGMT_OTG_SESS_END_EN (1 << 10)
+#define TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN (1 << 9)
+#define TUSB_PRCM_MNGMT_OTG_ID_PULLUP (1 << 8)
+#define TUSB_PRCM_MNGMT_15_SW_EN (1 << 4)
+#define TUSB_PRCM_MNGMT_33_SW_EN (1 << 3)
+#define TUSB_PRCM_MNGMT_5V_CPEN (1 << 2)
+#define TUSB_PRCM_MNGMT_PM_IDLE (1 << 1)
+#define TUSB_PRCM_MNGMT_DEV_IDLE (1 << 0)
+
+/* Wake-up source clear and mask registers */
+#define TUSB_PRCM_WAKEUP_SOURCE (TUSB_SYS_REG_BASE + 0x020)
+#define TUSB_PRCM_WAKEUP_CLEAR (TUSB_SYS_REG_BASE + 0x028)
+#define TUSB_PRCM_WAKEUP_MASK (TUSB_SYS_REG_BASE + 0x02c)
+#define TUSB_PRCM_WAKEUP_RESERVED_BITS (0xffffe << 13)
+#define TUSB_PRCM_WGPIO_7 (1 << 12)
+#define TUSB_PRCM_WGPIO_6 (1 << 11)
+#define TUSB_PRCM_WGPIO_5 (1 << 10)
+#define TUSB_PRCM_WGPIO_4 (1 << 9)
+#define TUSB_PRCM_WGPIO_3 (1 << 8)
+#define TUSB_PRCM_WGPIO_2 (1 << 7)
+#define TUSB_PRCM_WGPIO_1 (1 << 6)
+#define TUSB_PRCM_WGPIO_0 (1 << 5)
+#define TUSB_PRCM_WHOSTDISCON (1 << 4) /* Host disconnect */
+#define TUSB_PRCM_WBUS (1 << 3) /* USB bus resume */
+#define TUSB_PRCM_WNORCS (1 << 2) /* NOR chip select */
+#define TUSB_PRCM_WVBUS (1 << 1) /* OTG PHY VBUS */
+#define TUSB_PRCM_WID (1 << 0) /* OTG PHY ID detect */
+
+#define TUSB_PULLUP_1_CTRL (TUSB_SYS_REG_BASE + 0x030)
+#define TUSB_PULLUP_2_CTRL (TUSB_SYS_REG_BASE + 0x034)
+#define TUSB_INT_CTRL_REV (TUSB_SYS_REG_BASE + 0x038)
+#define TUSB_INT_CTRL_CONF (TUSB_SYS_REG_BASE + 0x03c)
+#define TUSB_USBIP_INT_SRC (TUSB_SYS_REG_BASE + 0x040)
+#define TUSB_USBIP_INT_SET (TUSB_SYS_REG_BASE + 0x044)
+#define TUSB_USBIP_INT_CLEAR (TUSB_SYS_REG_BASE + 0x048)
+#define TUSB_USBIP_INT_MASK (TUSB_SYS_REG_BASE + 0x04c)
+#define TUSB_DMA_INT_SRC (TUSB_SYS_REG_BASE + 0x050)
+#define TUSB_DMA_INT_SET (TUSB_SYS_REG_BASE + 0x054)
+#define TUSB_DMA_INT_CLEAR (TUSB_SYS_REG_BASE + 0x058)
+#define TUSB_DMA_INT_MASK (TUSB_SYS_REG_BASE + 0x05c)
+#define TUSB_GPIO_INT_SRC (TUSB_SYS_REG_BASE + 0x060)
+#define TUSB_GPIO_INT_SET (TUSB_SYS_REG_BASE + 0x064)
+#define TUSB_GPIO_INT_CLEAR (TUSB_SYS_REG_BASE + 0x068)
+#define TUSB_GPIO_INT_MASK (TUSB_SYS_REG_BASE + 0x06c)
+
+/* NOR flash interrupt source registers */
+#define TUSB_INT_SRC (TUSB_SYS_REG_BASE + 0x070)
+#define TUSB_INT_SRC_SET (TUSB_SYS_REG_BASE + 0x074)
+#define TUSB_INT_SRC_CLEAR (TUSB_SYS_REG_BASE + 0x078)
+#define TUSB_INT_MASK (TUSB_SYS_REG_BASE + 0x07c)
+#define TUSB_INT_SRC_TXRX_DMA_DONE (1 << 24)
+#define TUSB_INT_SRC_USB_IP_CORE (1 << 17)
+#define TUSB_INT_SRC_OTG_TIMEOUT (1 << 16)
+#define TUSB_INT_SRC_VBUS_SENSE_CHNG (1 << 15)
+#define TUSB_INT_SRC_ID_STATUS_CHNG (1 << 14)
+#define TUSB_INT_SRC_DEV_WAKEUP (1 << 13)
+#define TUSB_INT_SRC_DEV_READY (1 << 12)
+#define TUSB_INT_SRC_USB_IP_TX (1 << 9)
+#define TUSB_INT_SRC_USB_IP_RX (1 << 8)
+#define TUSB_INT_SRC_USB_IP_VBUS_ERR (1 << 7)
+#define TUSB_INT_SRC_USB_IP_VBUS_REQ (1 << 6)
+#define TUSB_INT_SRC_USB_IP_DISCON (1 << 5)
+#define TUSB_INT_SRC_USB_IP_CONN (1 << 4)
+#define TUSB_INT_SRC_USB_IP_SOF (1 << 3)
+#define TUSB_INT_SRC_USB_IP_RST_BABBLE (1 << 2)
+#define TUSB_INT_SRC_USB_IP_RESUME (1 << 1)
+#define TUSB_INT_SRC_USB_IP_SUSPEND (1 << 0)
+
+/* NOR flash interrupt registers reserved bits. Must be written as 0 */
+#define TUSB_INT_MASK_RESERVED_17 (0x3fff << 17)
+#define TUSB_INT_MASK_RESERVED_13 (1 << 13)
+#define TUSB_INT_MASK_RESERVED_8 (0xf << 8)
+#define TUSB_INT_SRC_RESERVED_26 (0x1f << 26)
+#define TUSB_INT_SRC_RESERVED_18 (0x3f << 18)
+#define TUSB_INT_SRC_RESERVED_10 (0x03 << 10)
+
+/* Reserved bits for NOR flash interrupt mask and clear register */
+#define TUSB_INT_MASK_RESERVED_BITS (TUSB_INT_MASK_RESERVED_17 | \
+ TUSB_INT_MASK_RESERVED_13 | \
+ TUSB_INT_MASK_RESERVED_8)
+
+/* Reserved bits for NOR flash interrupt status register */
+#define TUSB_INT_SRC_RESERVED_BITS (TUSB_INT_SRC_RESERVED_26 | \
+ TUSB_INT_SRC_RESERVED_18 | \
+ TUSB_INT_SRC_RESERVED_10)
+
+#define TUSB_GPIO_REV (TUSB_SYS_REG_BASE + 0x080)
+#define TUSB_GPIO_CONF (TUSB_SYS_REG_BASE + 0x084)
+#define TUSB_DMA_CTRL_REV (TUSB_SYS_REG_BASE + 0x100)
+#define TUSB_DMA_REQ_CONF (TUSB_SYS_REG_BASE + 0x104)
+#define TUSB_EP0_CONF (TUSB_SYS_REG_BASE + 0x108)
+#define TUSB_DMA_EP_MAP (TUSB_SYS_REG_BASE + 0x148)
+
+/* Offsets from each ep base register */
+#define TUSB_EP_TX_OFFSET 0x10c /* EP_IN in docs */
+#define TUSB_EP_RX_OFFSET 0x14c /* EP_OUT in docs */
+#define TUSB_EP_MAX_PACKET_SIZE_OFFSET 0x188
+
+#define TUSB_WAIT_COUNT (TUSB_SYS_REG_BASE + 0x1c8)
+#define TUSB_SCRATCH_PAD (TUSB_SYS_REG_BASE + 0x1c4)
+#define TUSB_PROD_TEST_RESET (TUSB_SYS_REG_BASE + 0x1d8)
+
+/* Device System & Control register bitfields */
+#define TUSB_INT_CTRL_CONF_INT_RELCYC(v) (((v) & 0x7) << 18)
+#define TUSB_INT_CTRL_CONF_INT_POLARITY (1 << 17)
+#define TUSB_INT_CTRL_CONF_INT_MODE (1 << 16)
+#define TUSB_GPIO_CONF_DMAREQ(v) (((v) & 0x3f) << 24)
+#define TUSB_DMA_REQ_CONF_BURST_SIZE(v) (((v) & 3) << 26)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_EN(v) (((v) & 0x3f) << 20)
+#define TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(v) (((v) & 0xf) << 16)
+#define TUSB_EP0_CONFIG_SW_EN (1 << 8)
+#define TUSB_EP0_CONFIG_DIR_TX (1 << 7)
+#define TUSB_EP0_CONFIG_XFR_SIZE(v) ((v) & 0x7f)
+#define TUSB_EP_CONFIG_SW_EN (1 << 31)
+#define TUSB_EP_CONFIG_XFR_SIZE(v) ((v) & 0x7fffffff)
+#define TUSB_PROD_TEST_RESET_VAL 0xa596
+#define TUSB_EP_FIFO(ep) (TUSB_FIFO_BASE + (ep) * 0x20)
+
+#define TUSB_DIDR1_LO (TUSB_SYS_REG_BASE + 0x1f8)
+#define TUSB_DIDR1_HI (TUSB_SYS_REG_BASE + 0x1fc)
+#define TUSB_DIDR1_HI_CHIP_REV(v) (((v) >> 17) & 0xf)
+#define TUSB_DIDR1_HI_REV_20 0
+#define TUSB_DIDR1_HI_REV_30 1
+#define TUSB_DIDR1_HI_REV_31 2
+
+#define TUSB_REV_10 0x10
+#define TUSB_REV_20 0x20
+#define TUSB_REV_30 0x30
+#define TUSB_REV_31 0x31
+
+#endif /* __TUSB6010_H__ */
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
new file mode 100644
index 000000000000..52f7f29cebda
--- /dev/null
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -0,0 +1,719 @@
+/*
+ * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/usb.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mux.h>
+
+#include "musb_core.h"
+
+#define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
+
+#define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
+
+struct tusb_omap_dma_ch {
+ struct musb *musb;
+ void __iomem *tbase;
+ unsigned long phys_offset;
+ int epnum;
+ u8 tx;
+ struct musb_hw_ep *hw_ep;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ struct tusb_omap_dma *tusb_dma;
+
+ void __iomem *dma_addr;
+
+ u32 len;
+ u16 packet_sz;
+ u16 transfer_packet_sz;
+ u32 transfer_len;
+ u32 completed_len;
+};
+
+struct tusb_omap_dma {
+ struct dma_controller controller;
+ struct musb *musb;
+ void __iomem *tbase;
+
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+ unsigned multichannel:1;
+};
+
+static int tusb_omap_dma_start(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+ return 0;
+}
+
+static int tusb_omap_dma_stop(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+
+ /* DBG(3, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
+
+ return 0;
+}
+
+/*
+ * Allocate dmareq0 to the current channel unless it's already taken
+ */
+static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if (reg != 0) {
+ DBG(3, "ep%i dmareq0 is busy for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return -EAGAIN;
+ }
+
+ if (chdat->tx)
+ reg = (1 << 4) | chdat->epnum;
+ else
+ reg = chdat->epnum;
+
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ return 0;
+}
+
+static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+
+ if ((reg & 0xf) != chdat->epnum) {
+ printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
+ chdat->epnum, reg & 0xf);
+ return;
+ }
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
+}
+
+/*
+ * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
+ * musb_gadget.c.
+ */
+static void tusb_omap_dma_cb(int lch, u16 ch_status, void *data)
+{
+ struct dma_channel *channel = (struct dma_channel *)data;
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *ep_conf = hw_ep->conf;
+ void __iomem *mbase = musb->mregs;
+ unsigned long remaining, flags, pio;
+ int ch;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ if (tusb_dma->multichannel)
+ ch = chdat->ch;
+ else
+ ch = tusb_dma->ch;
+
+ if (ch_status != OMAP_DMA_BLOCK_IRQ)
+ printk(KERN_ERR "TUSB DMA error status: %i\n", ch_status);
+
+ DBG(3, "ep%i %s dma callback ch: %i status: %x\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, ch_status);
+
+ if (chdat->tx)
+ remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
+
+ /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
+ if (unlikely(remaining > chdat->transfer_len)) {
+ DBG(2, "Corrupt %s dma ch%i XFR_SIZE: 0x%08lx\n",
+ chdat->tx ? "tx" : "rx", chdat->ch,
+ remaining);
+ remaining = 0;
+ }
+
+ channel->actual_len = chdat->transfer_len - remaining;
+ pio = chdat->len - channel->actual_len;
+
+ DBG(3, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
+
+ /* Transfer remaining 1 - 31 bytes */
+ if (pio > 0 && pio < 32) {
+ u8 *buf;
+
+ DBG(3, "Using PIO for remaining %lu bytes\n", pio);
+ buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
+ if (chdat->tx) {
+ dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+ chdat->transfer_len, DMA_TO_DEVICE);
+ musb_write_fifo(hw_ep, pio, buf);
+ } else {
+ musb_read_fifo(hw_ep, pio, buf);
+ dma_cache_maint(phys_to_virt((u32)chdat->dma_addr),
+ chdat->transfer_len, DMA_FROM_DEVICE);
+ }
+ channel->actual_len += pio;
+ }
+
+ if (!tusb_dma->multichannel)
+ tusb_omap_free_shared_dmareq(chdat);
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ /* Handle only RX callbacks here. TX callbacks must be handled based
+ * on the TUSB DMA status interrupt.
+ * REVISIT: Use both TUSB DMA status interrupt and OMAP DMA callback
+ * interrupt for RX and TX.
+ */
+ if (!chdat->tx)
+ musb_dma_completion(musb, chdat->epnum, chdat->tx);
+
+ /* We must terminate short tx transfers manually by setting TXPKTRDY.
+ * REVISIT: This same problem may occur with other MUSB dma as well.
+ * Easy to test with g_ether by pinging the MUSB board with ping -s54.
+ */
+ if ((chdat->transfer_len < chdat->packet_sz)
+ || (chdat->transfer_len % chdat->packet_sz != 0)) {
+ u16 csr;
+
+ if (chdat->tx) {
+ DBG(3, "terminating short tx packet\n");
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
+ | MUSB_TXCSR_P_WZC_BITS;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ }
+ }
+
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
+ u8 rndis_mode, dma_addr_t dma_addr, u32 len)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+ struct musb *musb = chdat->musb;
+ struct musb_hw_ep *hw_ep = chdat->hw_ep;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *ep_conf = hw_ep->conf;
+ dma_addr_t fifo = hw_ep->fifo_sync;
+ struct omap_dma_channel_params dma_params;
+ u32 dma_remaining;
+ int src_burst, dst_burst;
+ u16 csr;
+ int ch;
+ s8 dmareq;
+ s8 sync_dev;
+
+ if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
+ return false;
+
+ /*
+ * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
+ * register which will cause missed DMA interrupt. We could try to
+ * use a timer for the callback, but it is unsafe as the XFR_SIZE
+ * register is corrupt, and we won't know if the DMA worked.
+ */
+ if (dma_addr & 0x2)
+ return false;
+
+ /*
+ * Because of HW issue #10, it seems like mixing sync DMA and async
+ * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
+ * using the channel for DMA.
+ */
+ if (chdat->tx)
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
+ else
+ dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
+
+ dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
+ if (dma_remaining) {
+ DBG(2, "Busy %s dma ch%i, not using: %08x\n",
+ chdat->tx ? "tx" : "rx", chdat->ch,
+ dma_remaining);
+ return false;
+ }
+
+ chdat->transfer_len = len & ~0x1f;
+
+ if (len < packet_sz)
+ chdat->transfer_packet_sz = chdat->transfer_len;
+ else
+ chdat->transfer_packet_sz = packet_sz;
+
+ if (tusb_dma->multichannel) {
+ ch = chdat->ch;
+ dmareq = chdat->dmareq;
+ sync_dev = chdat->sync_dev;
+ } else {
+ if (tusb_omap_use_shared_dmareq(chdat) != 0) {
+ DBG(3, "could not get dma for ep%i\n", chdat->epnum);
+ return false;
+ }
+ if (tusb_dma->ch < 0) {
+ /* REVISIT: This should get blocked earlier, happens
+ * with MSC ErrorRecoveryTest
+ */
+ WARN_ON(1);
+ return false;
+ }
+
+ ch = tusb_dma->ch;
+ dmareq = tusb_dma->dmareq;
+ sync_dev = tusb_dma->sync_dev;
+ omap_set_dma_callback(ch, tusb_omap_dma_cb, channel);
+ }
+
+ chdat->packet_sz = packet_sz;
+ chdat->len = len;
+ channel->actual_len = 0;
+ chdat->dma_addr = (void __iomem *)dma_addr;
+ channel->status = MUSB_DMA_STATUS_BUSY;
+
+ /* Since we're recycling dma areas, we need to clean or invalidate */
+ if (chdat->tx)
+ dma_cache_maint(phys_to_virt(dma_addr), len, DMA_TO_DEVICE);
+ else
+ dma_cache_maint(phys_to_virt(dma_addr), len, DMA_FROM_DEVICE);
+
+ /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
+ if ((dma_addr & 0x3) == 0) {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.elem_count = 8; /* Elements in frame */
+ } else {
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S16;
+ dma_params.elem_count = 16; /* Elements in frame */
+ fifo = hw_ep->fifo_async;
+ }
+
+ dma_params.frame_count = chdat->transfer_len / 32; /* Burst sz frame */
+
+ DBG(3, "ep%i %s dma ch%i dma: %08x len: %u(%u) packet_sz: %i(%i)\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ ch, dma_addr, chdat->transfer_len, len,
+ chdat->transfer_packet_sz, packet_sz);
+
+ /*
+ * Prepare omap DMA for transfer
+ */
+ if (chdat->tx) {
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.src_start = (unsigned long)dma_addr;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.dst_start = (unsigned long)fifo;
+ dma_params.dst_ei = 1;
+ dma_params.dst_fi = -31; /* Loop 32 byte window */
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 0; /* Dest sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 read */
+ dst_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 write */
+ } else {
+ dma_params.src_amode = OMAP_DMA_AMODE_DOUBLE_IDX;
+ dma_params.src_start = (unsigned long)fifo;
+ dma_params.src_ei = 1;
+ dma_params.src_fi = -31; /* Loop 32 byte window */
+
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ dma_params.dst_start = (unsigned long)dma_addr;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+
+ dma_params.trigger = sync_dev;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.src_or_dst_synch = 1; /* Source sync */
+
+ src_burst = OMAP_DMA_DATA_BURST_8; /* 8x32 read */
+ dst_burst = OMAP_DMA_DATA_BURST_16; /* 16x32 write */
+ }
+
+ DBG(3, "ep%i %s using %i-bit %s dma from 0x%08lx to 0x%08lx\n",
+ chdat->epnum, chdat->tx ? "tx" : "rx",
+ (dma_params.data_type == OMAP_DMA_DATA_TYPE_S32) ? 32 : 16,
+ ((dma_addr & 0x3) == 0) ? "sync" : "async",
+ dma_params.src_start, dma_params.dst_start);
+
+ omap_set_dma_params(ch, &dma_params);
+ omap_set_dma_src_burst_mode(ch, src_burst);
+ omap_set_dma_dest_burst_mode(ch, dst_burst);
+ omap_set_dma_write_mode(ch, OMAP_DMA_WRITE_LAST_NON_POSTED);
+
+ /*
+ * Prepare MUSB for DMA transfer
+ */
+ if (chdat->tx) {
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
+ csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
+ | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
+ csr &= ~MUSB_TXCSR_P_UNDERRUN;
+ musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
+ } else {
+ musb_ep_select(mbase, chdat->epnum);
+ csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_DMAENAB;
+ csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
+ musb_writew(hw_ep->regs, MUSB_RXCSR,
+ csr | MUSB_RXCSR_P_WZC_BITS);
+ }
+
+ /*
+ * Start DMA transfer
+ */
+ omap_start_dma(ch);
+
+ if (chdat->tx) {
+ /* Send transfer_packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+ chdat->transfer_packet_sz);
+
+ musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ } else {
+ /* Receive transfer_packet_sz packets at a time */
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
+ chdat->transfer_packet_sz << 16);
+
+ musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
+ TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
+ }
+
+ return true;
+}
+
+static int tusb_omap_dma_abort(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
+
+ if (!tusb_dma->multichannel) {
+ if (tusb_dma->ch >= 0) {
+ omap_stop_dma(tusb_dma->ch);
+ omap_free_dma(tusb_dma->ch);
+ tusb_dma->ch = -1;
+ }
+
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+ }
+
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return 0;
+}
+
+static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ int i, dmareq_nr = -1;
+
+ const int sync_dev[6] = {
+ OMAP24XX_DMA_EXT_DMAREQ0,
+ OMAP24XX_DMA_EXT_DMAREQ1,
+ OMAP242X_DMA_EXT_DMAREQ2,
+ OMAP242X_DMA_EXT_DMAREQ3,
+ OMAP242X_DMA_EXT_DMAREQ4,
+ OMAP242X_DMA_EXT_DMAREQ5,
+ };
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ int cur = (reg & (0xf << (i * 5))) >> (i * 5);
+ if (cur == 0) {
+ dmareq_nr = i;
+ break;
+ }
+ }
+
+ if (dmareq_nr == -1)
+ return -EAGAIN;
+
+ reg |= (chdat->epnum << (dmareq_nr * 5));
+ if (chdat->tx)
+ reg |= ((1 << 4) << (dmareq_nr * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = dmareq_nr;
+ chdat->sync_dev = sync_dev[chdat->dmareq];
+
+ return 0;
+}
+
+static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
+{
+ u32 reg;
+
+ if (!chdat || chdat->dmareq < 0)
+ return;
+
+ reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
+ reg &= ~(0x1f << (chdat->dmareq * 5));
+ musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
+
+ chdat->dmareq = -1;
+ chdat->sync_dev = -1;
+}
+
+static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
+
+static struct dma_channel *
+tusb_omap_dma_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep,
+ u8 tx)
+{
+ int ret, i;
+ const char *dev_name;
+ struct tusb_omap_dma *tusb_dma;
+ struct musb *musb;
+ void __iomem *tbase;
+ struct dma_channel *channel = NULL;
+ struct tusb_omap_dma_ch *chdat = NULL;
+ u32 reg;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ musb = tusb_dma->musb;
+ tbase = musb->ctrl_base;
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+ if (tx)
+ reg &= ~(1 << hw_ep->epnum);
+ else
+ reg &= ~(1 << (hw_ep->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+ /* REVISIT: Why does dmareq5 not work? */
+ if (hw_ep->epnum == 0) {
+ DBG(3, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
+ return NULL;
+ }
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
+ ch->status = MUSB_DMA_STATUS_FREE;
+ channel = ch;
+ chdat = ch->private_data;
+ break;
+ }
+ }
+
+ if (!channel)
+ return NULL;
+
+ if (tx) {
+ chdat->tx = 1;
+ dev_name = "TUSB transmit";
+ } else {
+ chdat->tx = 0;
+ dev_name = "TUSB receive";
+ }
+
+ chdat->musb = tusb_dma->musb;
+ chdat->tbase = tusb_dma->tbase;
+ chdat->hw_ep = hw_ep;
+ chdat->epnum = hw_ep->epnum;
+ chdat->dmareq = -1;
+ chdat->completed_len = 0;
+ chdat->tusb_dma = tusb_dma;
+
+ channel->max_len = 0x7fffffff;
+ channel->desired_mode = 0;
+ channel->actual_len = 0;
+
+ if (tusb_dma->multichannel) {
+ ret = tusb_omap_dma_allocate_dmareq(chdat);
+ if (ret != 0)
+ goto free_dmareq;
+
+ ret = omap_request_dma(chdat->sync_dev, dev_name,
+ tusb_omap_dma_cb, channel, &chdat->ch);
+ if (ret != 0)
+ goto free_dmareq;
+ } else if (tusb_dma->ch == -1) {
+ tusb_dma->dmareq = 0;
+ tusb_dma->sync_dev = OMAP24XX_DMA_EXT_DMAREQ0;
+
+ /* Callback data gets set later in the shared dmareq case */
+ ret = omap_request_dma(tusb_dma->sync_dev, "TUSB shared",
+ tusb_omap_dma_cb, NULL, &tusb_dma->ch);
+ if (ret != 0)
+ goto free_dmareq;
+
+ chdat->dmareq = -1;
+ chdat->ch = -1;
+ }
+
+ DBG(3, "ep%i %s dma: %s dma%i dmareq%i sync%i\n",
+ chdat->epnum,
+ chdat->tx ? "tx" : "rx",
+ chdat->ch >= 0 ? "dedicated" : "shared",
+ chdat->ch >= 0 ? chdat->ch : tusb_dma->ch,
+ chdat->dmareq >= 0 ? chdat->dmareq : tusb_dma->dmareq,
+ chdat->sync_dev >= 0 ? chdat->sync_dev : tusb_dma->sync_dev);
+
+ return channel;
+
+free_dmareq:
+ tusb_omap_dma_free_dmareq(chdat);
+
+ DBG(3, "ep%i: Could not get a DMA channel\n", chdat->epnum);
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ return NULL;
+}
+
+static void tusb_omap_dma_release(struct dma_channel *channel)
+{
+ struct tusb_omap_dma_ch *chdat = to_chdat(channel);
+ struct musb *musb = chdat->musb;
+ void __iomem *tbase = musb->ctrl_base;
+ u32 reg;
+
+ DBG(3, "ep%i ch%i\n", chdat->epnum, chdat->ch);
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_MASK);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_MASK, reg);
+
+ reg = musb_readl(tbase, TUSB_DMA_INT_CLEAR);
+ if (chdat->tx)
+ reg |= (1 << chdat->epnum);
+ else
+ reg |= (1 << (chdat->epnum + 15));
+ musb_writel(tbase, TUSB_DMA_INT_CLEAR, reg);
+
+ channel->status = MUSB_DMA_STATUS_UNKNOWN;
+
+ if (chdat->ch >= 0) {
+ omap_stop_dma(chdat->ch);
+ omap_free_dma(chdat->ch);
+ chdat->ch = -1;
+ }
+
+ if (chdat->dmareq >= 0)
+ tusb_omap_dma_free_dmareq(chdat);
+
+ channel = NULL;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ tusb_dma = container_of(c, struct tusb_omap_dma, controller);
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch = dma_channel_pool[i];
+ if (ch) {
+ kfree(ch->private_data);
+ kfree(ch);
+ }
+ }
+
+ if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+ omap_free_dma(tusb_dma->ch);
+
+ kfree(tusb_dma);
+}
+
+struct dma_controller *__init
+dma_controller_create(struct musb *musb, void __iomem *base)
+{
+ void __iomem *tbase = musb->ctrl_base;
+ struct tusb_omap_dma *tusb_dma;
+ int i;
+
+ /* REVISIT: Get dmareq lines used from board-*.c */
+
+ musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
+ musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
+
+ musb_writel(tbase, TUSB_DMA_REQ_CONF,
+ TUSB_DMA_REQ_CONF_BURST_SIZE(2)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
+ | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
+
+ tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
+ if (!tusb_dma)
+ goto cleanup;
+
+ tusb_dma->musb = musb;
+ tusb_dma->tbase = musb->ctrl_base;
+
+ tusb_dma->ch = -1;
+ tusb_dma->dmareq = -1;
+ tusb_dma->sync_dev = -1;
+
+ tusb_dma->controller.start = tusb_omap_dma_start;
+ tusb_dma->controller.stop = tusb_omap_dma_stop;
+ tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
+ tusb_dma->controller.channel_release = tusb_omap_dma_release;
+ tusb_dma->controller.channel_program = tusb_omap_dma_program;
+ tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
+
+ if (tusb_get_revision(musb) >= TUSB_REV_30)
+ tusb_dma->multichannel = 1;
+
+ for (i = 0; i < MAX_DMAREQ; i++) {
+ struct dma_channel *ch;
+ struct tusb_omap_dma_ch *chdat;
+
+ ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
+ if (!ch)
+ goto cleanup;
+
+ dma_channel_pool[i] = ch;
+
+ chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
+ if (!chdat)
+ goto cleanup;
+
+ ch->status = MUSB_DMA_STATUS_UNKNOWN;
+ ch->private_data = chdat;
+ }
+
+ return &tusb_dma->controller;
+
+cleanup:
+ dma_controller_destroy(&tusb_dma->controller);
+
+ return NULL;
+}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8878c1767fc8..70338f4ec918 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -499,9 +499,10 @@ config USB_SERIAL_SAFE_PADDED
config USB_SERIAL_SIERRAWIRELESS
tristate "USB Sierra Wireless Driver"
help
- Say M here if you want to use a Sierra Wireless device (if
- using an PC 5220 or AC580 please use the Airprime driver
- instead).
+ Say M here if you want to use Sierra Wireless devices.
+
+ Many deviecs have a feature known as TRU-Install, for those devices
+ to work properly the USB Storage Sierra feature must be enabled.
To compile this driver as a module, choose M here: the
module will be called sierra.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 838717250145..984f6eff4c47 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -563,6 +563,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -637,6 +638,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(ELEKTOR_VID, ELEKTOR_FT323R_PID) },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
@@ -646,6 +648,10 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, LMI_LM3S_DEVEL_BOARD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, LMI_LM3S_EVAL_BOARD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
{ }, /* Optional parameter entry */
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index a577ea44dcf9..382265bba969 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -524,7 +524,9 @@
#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
+#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
/*
* Definitions for ID TECH (www.idt-net.com) devices
@@ -815,6 +817,11 @@
#define OLIMEX_VID 0x15BA
#define OLIMEX_ARM_USB_OCD_PID 0x0003
+/* Luminary Micro Stellaris Boards, VID = FTDI_VID */
+/* FTDI 2332C Dual channel device, side A=245 FIFO (JTAG), Side B=RS232 UART */
+#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
+#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
+
/* www.elsterelectricity.com Elster Unicom III Optical Probe */
#define FTDI_ELSTER_UNICOM_PID 0xE700 /* Product Id */
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 2e663f1afd5e..d95382088075 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -38,8 +38,6 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-#include <linux/version.h>
-
/* the mode to be set when the port ist opened */
static int initial_mode = 1;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e4eca95f2b0f..9f9cd36455f4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -173,6 +173,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define KYOCERA_PRODUCT_KPC680 0x180a
#define ANYDATA_VENDOR_ID 0x16d5
+#define ANYDATA_PRODUCT_ADU_620UW 0x6202
#define ANYDATA_PRODUCT_ADU_E100A 0x6501
#define ANYDATA_PRODUCT_ADU_500A 0x6502
@@ -186,6 +187,23 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define BANDRICH_VENDOR_ID 0x1A8D
#define BANDRICH_PRODUCT_C100_1 0x1002
#define BANDRICH_PRODUCT_C100_2 0x1003
+#define BANDRICH_PRODUCT_1004 0x1004
+#define BANDRICH_PRODUCT_1005 0x1005
+#define BANDRICH_PRODUCT_1006 0x1006
+#define BANDRICH_PRODUCT_1007 0x1007
+#define BANDRICH_PRODUCT_1008 0x1008
+#define BANDRICH_PRODUCT_1009 0x1009
+#define BANDRICH_PRODUCT_100A 0x100a
+
+#define BANDRICH_PRODUCT_100B 0x100b
+#define BANDRICH_PRODUCT_100C 0x100c
+#define BANDRICH_PRODUCT_100D 0x100d
+#define BANDRICH_PRODUCT_100E 0x100e
+
+#define BANDRICH_PRODUCT_100F 0x100f
+#define BANDRICH_PRODUCT_1010 0x1010
+#define BANDRICH_PRODUCT_1011 0x1011
+#define BANDRICH_PRODUCT_1012 0x1012
#define AMOI_VENDOR_ID 0x1614
#define AMOI_PRODUCT_9508 0x0800
@@ -197,6 +215,10 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po
#define TELIT_VENDOR_ID 0x1bc7
#define TELIT_PRODUCT_UC864E 0x1003
+/* ZTE PRODUCTS */
+#define ZTE_VENDOR_ID 0x19d2
+#define ZTE_PRODUCT_MF628 0x0015
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -297,17 +319,34 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(DELL_VENDOR_ID, 0x8138) }, /* Dell Wireless 5520 Voda I Mobile Broadband (3G HSDPA) Minicard */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
+ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
{ USB_DEVICE(AXESSTEL_VENDOR_ID, AXESSTEL_PRODUCT_MV110H) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_MSA501HS) },
{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_PRODUCT_ET502HS) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_1) },
{ USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_C100_2) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1004) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1005) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1006) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1007) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1008) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1009) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100A) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100B) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100C) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100D) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100E) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_100F) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1010) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1011) },
+ { USB_DEVICE(BANDRICH_VENDOR_ID, BANDRICH_PRODUCT_1012) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -346,11 +385,7 @@ static struct usb_serial_driver option_1port_device = {
.read_int_callback = option_instat_callback,
};
-#ifdef CONFIG_USB_DEBUG
static int debug;
-#else
-#define debug 0
-#endif
/* per port private data */
@@ -954,8 +989,5 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-#ifdef CONFIG_USB_DEBUG
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 2c9c446ad625..1ede1441cb1b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -90,7 +90,6 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(ALCOR_VENDOR_ID, ALCOR_PRODUCT_ID) },
{ USB_DEVICE(WS002IN_VENDOR_ID, WS002IN_PRODUCT_ID) },
{ USB_DEVICE(COREGA_VENDOR_ID, COREGA_PRODUCT_ID) },
- { USB_DEVICE(HL340_VENDOR_ID, HL340_PRODUCT_ID) },
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 6ac3bbcf7a22..a3bd039c78e9 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -107,10 +107,6 @@
#define COREGA_VENDOR_ID 0x07aa
#define COREGA_PRODUCT_ID 0x002a
-/* HL HL-340 (ID: 4348:5523) */
-#define HL340_VENDOR_ID 0x4348
-#define HL340_PRODUCT_ID 0x5523
-
/* Y.C. Cable U.S.A., Inc - USB to RS-232 */
#define YCCABLE_VENDOR_ID 0x05ad
#define YCCABLE_PRODUCT_ID 0x0fba
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 2f6f1523ec56..706033753adb 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -14,7 +14,7 @@
Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
*/
-#define DRIVER_VERSION "v.1.2.9c"
+#define DRIVER_VERSION "v.1.2.13a"
#define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>"
#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
@@ -31,6 +31,7 @@
#define SWIMS_USB_REQUEST_SetPower 0x00
#define SWIMS_USB_REQUEST_SetNmea 0x07
#define SWIMS_USB_REQUEST_SetMode 0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
#define SWIMS_SET_MODE_Modem 0x0001
/* per port private data */
@@ -40,18 +41,11 @@
static int debug;
static int nmea;
-static int truinstall = 1;
-
-enum devicetype {
- DEVICE_3_PORT = 0,
- DEVICE_1_PORT = 1,
- DEVICE_INSTALLER = 2,
-};
static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
{
int result;
- dev_dbg(&udev->dev, "%s", "SET POWER STATE\n");
+ dev_dbg(&udev->dev, "%s", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetPower, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -63,25 +57,10 @@ static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
return result;
}
-static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
-{
- int result;
- dev_dbg(&udev->dev, "%s", "DEVICE MODE SWITCH\n");
- result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- SWIMS_USB_REQUEST_SetMode, /* __u8 request */
- USB_TYPE_VENDOR, /* __u8 request type */
- eSWocMode, /* __u16 value */
- 0x0000, /* __u16 index */
- NULL, /* void *data */
- 0, /* __u16 size */
- USB_CTRL_SET_TIMEOUT); /* int timeout */
- return result;
-}
-
static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable)
{
int result;
- dev_dbg(&udev->dev, "%s", "NMEA Enable sent\n");
+ dev_dbg(&udev->dev, "%s", __func__);
result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
SWIMS_USB_REQUEST_SetNmea, /* __u8 request */
USB_TYPE_VENDOR, /* __u8 request type */
@@ -97,6 +76,7 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
{
int result;
int *num_ports = usb_get_serial_data(serial);
+ dev_dbg(&serial->dev->dev, "%s", __func__);
result = *num_ports;
@@ -110,22 +90,23 @@ static int sierra_calc_num_ports(struct usb_serial *serial)
static int sierra_calc_interface(struct usb_serial *serial)
{
- int interface;
- struct usb_interface *p_interface;
- struct usb_host_interface *p_host_interface;
+ int interface;
+ struct usb_interface *p_interface;
+ struct usb_host_interface *p_host_interface;
+ dev_dbg(&serial->dev->dev, "%s", __func__);
- /* Get the interface structure pointer from the serial struct */
- p_interface = serial->interface;
+ /* Get the interface structure pointer from the serial struct */
+ p_interface = serial->interface;
- /* Get a pointer to the host interface structure */
- p_host_interface = p_interface->cur_altsetting;
+ /* Get a pointer to the host interface structure */
+ p_host_interface = p_interface->cur_altsetting;
- /* read the interface descriptor for this active altsetting
- * to find out the interface number we are on
- */
- interface = p_host_interface->desc.bInterfaceNumber;
+ /* read the interface descriptor for this active altsetting
+ * to find out the interface number we are on
+ */
+ interface = p_host_interface->desc.bInterfaceNumber;
- return interface;
+ return interface;
}
static int sierra_probe(struct usb_serial *serial,
@@ -135,43 +116,40 @@ static int sierra_probe(struct usb_serial *serial,
struct usb_device *udev;
int *num_ports;
u8 ifnum;
+ u8 numendpoints;
+
+ dev_dbg(&serial->dev->dev, "%s", __func__);
num_ports = kmalloc(sizeof(*num_ports), GFP_KERNEL);
if (!num_ports)
return -ENOMEM;
ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
+ numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints;
udev = serial->dev;
- /* Figure out the interface number from the serial structure */
- ifnum = sierra_calc_interface(serial);
-
- /*
- * If this interface supports more than 1 alternate
- * select the 2nd one
- */
- if (serial->interface->num_altsetting == 2) {
- dev_dbg(&udev->dev,
- "Selecting alt setting for interface %d\n",
- ifnum);
+ /* Figure out the interface number from the serial structure */
+ ifnum = sierra_calc_interface(serial);
- /* We know the alternate setting is 1 for the MC8785 */
- usb_set_interface(udev, ifnum, 1);
- }
+ /*
+ * If this interface supports more than 1 alternate
+ * select the 2nd one
+ */
+ if (serial->interface->num_altsetting == 2) {
+ dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n",
+ ifnum);
+ /* We know the alternate setting is 1 for the MC8785 */
+ usb_set_interface(udev, ifnum, 1);
+ }
- /* Check if in installer mode */
- if (truinstall && id->driver_info == DEVICE_INSTALLER) {
- dev_dbg(&udev->dev, "%s", "FOUND TRU-INSTALL DEVICE(SW)\n");
- result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
- /* Don't bind to the device when in installer mode */
- kfree(num_ports);
- return -EIO;
- } else if (id->driver_info == DEVICE_1_PORT)
- *num_ports = 1;
- else if (ifnum == 0x99)
+ /* Dummy interface present on some SKUs should be ignored */
+ if (ifnum == 0x99)
*num_ports = 0;
+ else if (numendpoints <= 3)
+ *num_ports = 1;
else
- *num_ports = 3;
+ *num_ports = (numendpoints-1)/2;
+
/*
* save off our num_ports info so that we can use it in the
* calc_num_ports callback
@@ -187,40 +165,50 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */
{ USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */
+ { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */
{ USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */
{ USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */
{ USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */
{ USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */
- { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless C597 */
+ /* Sierra Wireless C597 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) },
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) },
+ { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */
{ USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */
{ USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */
{ USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */
- { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Thinkpad internal) */
+ { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 (Lenovo) */
{ USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */
{ USB_DEVICE(0x03f0, 0x1e1d) }, /* HP hs2300 a.k.a MC8775 */
{ USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */
{ USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */
- { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780*/
- { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781*/
- { USB_DEVICE(0x1199, 0x683B), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless MC8785 Composite*/
+ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */
+ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */
+ { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */
+ { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */
+ { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */
+ { USB_DEVICE(0x1199, 0x683E) }, /* Sierra Wireless MC8790 */
{ USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */
{ USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */
{ USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */
{ USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */
{ USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */
{ USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
- { USB_DEVICE(0x1199, 0x6859), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */
- { USB_DEVICE(0x1199, 0x685A), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 885 E */
-
- { USB_DEVICE(0x1199, 0x6468) }, /* Sierra Wireless MP3G - EVDO */
- { USB_DEVICE(0x1199, 0x6469) }, /* Sierra Wireless MP3G - UMTS/HSPA */
-
- { USB_DEVICE(0x1199, 0x0112), .driver_info = DEVICE_1_PORT }, /* Sierra Wireless AirCard 580 */
- { USB_DEVICE(0x0F3D, 0x0112), .driver_info = DEVICE_1_PORT }, /* Airprime/Sierra PC 5220 */
+ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
+ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
+ /* Sierra Wireless C885 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)},
+ /* Sierra Wireless Device */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
+
+ { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */
+ { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
- { USB_DEVICE(0x1199, 0x0FFF), .driver_info = DEVICE_INSTALLER},
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -268,13 +256,19 @@ static int sierra_send_setup(struct tty_struct *tty,
if (portdata->rts_state)
val |= 0x02;
- /* Determine which port is targeted */
- if (port->bulk_out_endpointAddress == 2)
- interface = 0;
- else if (port->bulk_out_endpointAddress == 4)
- interface = 1;
- else if (port->bulk_out_endpointAddress == 5)
- interface = 2;
+ /* If composite device then properly report interface */
+ if (serial->num_ports == 1)
+ interface = sierra_calc_interface(serial);
+
+ /* Otherwise the need to do non-composite mapping */
+ else {
+ if (port->bulk_out_endpointAddress == 2)
+ interface = 0;
+ else if (port->bulk_out_endpointAddress == 4)
+ interface = 1;
+ else if (port->bulk_out_endpointAddress == 5)
+ interface = 2;
+ }
return usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
@@ -713,7 +707,7 @@ static void sierra_shutdown(struct usb_serial *serial)
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
- .name = "sierra1",
+ .name = "sierra",
},
.description = "Sierra USB modem",
.id_table = id_table,
@@ -769,14 +763,8 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-module_param(truinstall, bool, 0);
-MODULE_PARM_DESC(truinstall, "TRU-Install support");
-
-module_param(nmea, bool, 0);
+module_param(nmea, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nmea, "NMEA streaming");
-#ifdef CONFIG_USB_DEBUG
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug messages");
-#endif
-
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 8c2d531eedea..b157c48e8b78 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -122,9 +122,6 @@ static void return_serial(struct usb_serial *serial)
dbg("%s", __func__);
- if (serial == NULL)
- return;
-
for (i = 0; i < serial->num_ports; ++i)
serial_table[serial->minor + i] = NULL;
}
@@ -142,7 +139,8 @@ static void destroy_serial(struct kref *kref)
serial->type->shutdown(serial);
/* return the minor range that this device had */
- return_serial(serial);
+ if (serial->minor != SERIAL_TTY_NO_MINOR)
+ return_serial(serial);
for (i = 0; i < serial->num_ports; ++i)
serial->port[i]->port.count = 0;
@@ -575,6 +573,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
serial->interface = interface;
kref_init(&serial->kref);
mutex_init(&serial->disc_mutex);
+ serial->minor = SERIAL_TTY_NO_MINOR;
return serial;
}
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 3d9249632ae1..c76034672c18 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -146,6 +146,18 @@ config USB_STORAGE_KARMA
on the resulting scsi device node returns the Karma to normal
operation.
+config USB_STORAGE_SIERRA
+ bool "Sierra Wireless TRU-Install Feature Support"
+ depends on USB_STORAGE
+ help
+ Say Y here to include additional code to support Sierra Wireless
+ products with the TRU-Install feature (e.g., AC597E, AC881U).
+
+ This code switches the Sierra Wireless device from being in
+ Mass Storage mode to Modem mode. It also has the ability to
+ support host software upgrades should full Linux support be added
+ to TRU-Install.
+
config USB_STORAGE_CYPRESS_ATACB
bool "SAT emulation on Cypress USB/ATA Bridge with ATACB"
depends on USB_STORAGE
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile
index 4c596c766c53..bc3415b475c9 100644
--- a/drivers/usb/storage/Makefile
+++ b/drivers/usb/storage/Makefile
@@ -21,6 +21,7 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o
usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o
usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o
+usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o
usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o
usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 7a4d45677227..73ac7262239e 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -26,8 +26,6 @@
* (http://www.freecom.de/)
*/
-#include <linux/hdreg.h>
-
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
new file mode 100644
index 000000000000..4359a2cb42df
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.c
@@ -0,0 +1,207 @@
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <linux/usb.h>
+
+#include "usb.h"
+#include "transport.h"
+#include "protocol.h"
+#include "scsiglue.h"
+#include "sierra_ms.h"
+#include "debug.h"
+
+#define SWIMS_USB_REQUEST_SetSwocMode 0x0B
+#define SWIMS_USB_REQUEST_GetSwocInfo 0x0A
+#define SWIMS_USB_INDEX_SetMode 0x0000
+#define SWIMS_SET_MODE_Modem 0x0001
+
+#define TRU_NORMAL 0x01
+#define TRU_FORCE_MS 0x02
+#define TRU_FORCE_MODEM 0x03
+
+static unsigned int swi_tru_install = 1;
+module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def),"
+ " 2=Force CD-Rom, 3=Force Modem)");
+
+struct swoc_info {
+ __u8 rev;
+ __u8 reserved[8];
+ __u16 LinuxSKU;
+ __u16 LinuxVer;
+ __u8 reserved2[47];
+} __attribute__((__packed__));
+
+static bool containsFullLinuxPackage(struct swoc_info *swocInfo)
+{
+ if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) ||
+ (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF))
+ return true;
+ else
+ return false;
+}
+
+static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode)
+{
+ int result;
+ US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n");
+ result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */
+ USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */
+ eSWocMode, /* __u16 value */
+ 0x0000, /* __u16 index */
+ NULL, /* void *data */
+ 0, /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+ return result;
+}
+
+
+static int sierra_get_swoc_info(struct usb_device *udev,
+ struct swoc_info *swocInfo)
+{
+ int result;
+
+ US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n");
+
+ result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */
+ USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */
+ 0, /* __u16 value */
+ 0, /* __u16 index */
+ (void *) swocInfo, /* void *data */
+ sizeof(struct swoc_info), /* __u16 size */
+ USB_CTRL_SET_TIMEOUT); /* int timeout */
+
+ swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU);
+ swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer);
+ return result;
+}
+
+static void debug_swoc(struct swoc_info *swocInfo)
+{
+ US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev);
+ US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU);
+ US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer);
+}
+
+
+static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct swoc_info *swocInfo;
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int result;
+ if (swi_tru_install == TRU_FORCE_MS) {
+ result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n");
+ } else {
+ swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL);
+ if (!swocInfo) {
+ US_DEBUGP("SWIMS: Allocation failure\n");
+ snprintf(buf, PAGE_SIZE, "Error\n");
+ return -ENOMEM;
+ }
+ result = sierra_get_swoc_info(udev, swocInfo);
+ if (result < 0) {
+ US_DEBUGP("SWIMS: failed SWoC query\n");
+ kfree(swocInfo);
+ snprintf(buf, PAGE_SIZE, "Error\n");
+ return -EIO;
+ }
+ debug_swoc(swocInfo);
+ result = snprintf(buf, PAGE_SIZE,
+ "REV=%02d SKU=%04X VER=%04X\n",
+ swocInfo->rev,
+ swocInfo->LinuxSKU,
+ swocInfo->LinuxVer);
+ kfree(swocInfo);
+ }
+ return result;
+}
+static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL);
+
+int sierra_ms_init(struct us_data *us)
+{
+ int result, retries;
+ signed long delay_t;
+ struct swoc_info *swocInfo;
+ struct usb_device *udev;
+ struct Scsi_Host *sh;
+ struct scsi_device *sd;
+
+ delay_t = 2;
+ retries = 3;
+ result = 0;
+ udev = us->pusb_dev;
+
+ sh = us_to_host(us);
+ sd = scsi_get_host_dev(sh);
+
+ US_DEBUGP("SWIMS: sierra_ms_init called\n");
+
+ /* Force Modem mode */
+ if (swi_tru_install == TRU_FORCE_MODEM) {
+ US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n");
+ result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem);
+ if (result < 0)
+ US_DEBUGP("SWIMS: Failed to switch to modem mode.\n");
+ return -EIO;
+ }
+ /* Force Mass Storage mode (keep CD-Rom) */
+ else if (swi_tru_install == TRU_FORCE_MS) {
+ US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n");
+ goto complete;
+ }
+ /* Normal TRU-Install Logic */
+ else {
+ US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n");
+
+ swocInfo = kmalloc(sizeof(struct swoc_info),
+ GFP_KERNEL);
+ if (!swocInfo) {
+ US_DEBUGP("SWIMS: %s", "Allocation failure\n");
+ return -ENOMEM;
+ }
+
+ retries = 3;
+ do {
+ retries--;
+ result = sierra_get_swoc_info(udev, swocInfo);
+ if (result < 0) {
+ US_DEBUGP("SWIMS: %s", "Failed SWoC query\n");
+ schedule_timeout_uninterruptible(2*HZ);
+ }
+ } while (retries && result < 0);
+
+ if (result < 0) {
+ US_DEBUGP("SWIMS: %s",
+ "Completely failed SWoC query\n");
+ kfree(swocInfo);
+ return -EIO;
+ }
+
+ debug_swoc(swocInfo);
+
+ /* If there is not Linux software on the TRU-Install device
+ * then switch to modem mode
+ */
+ if (!containsFullLinuxPackage(swocInfo)) {
+ US_DEBUGP("SWIMS: %s",
+ "Switching to Modem Mode\n");
+ result = sierra_set_ms_mode(udev,
+ SWIMS_SET_MODE_Modem);
+ if (result < 0)
+ US_DEBUGP("SWIMS: Failed to switch modem\n");
+ kfree(swocInfo);
+ return -EIO;
+ }
+ kfree(swocInfo);
+ }
+complete:
+ result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst);
+
+ return USB_STOR_TRANSPORT_GOOD;
+}
+
diff --git a/drivers/usb/storage/sierra_ms.h b/drivers/usb/storage/sierra_ms.h
new file mode 100644
index 000000000000..bb48634ac1fc
--- /dev/null
+++ b/drivers/usb/storage/sierra_ms.h
@@ -0,0 +1,4 @@
+#ifndef _SIERRA_MS_H_
+#define _SIERRA_MS_H_
+extern int sierra_ms_init(struct us_data *us);
+#endif
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index fcbbfdb7b2b0..3523a0bfa0ff 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1032,8 +1032,21 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
/* try to compute the actual residue, based on how much data
* was really transferred and what the device tells us */
- if (residue) {
- if (!(us->fflags & US_FL_IGNORE_RESIDUE)) {
+ if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
+
+ /* Heuristically detect devices that generate bogus residues
+ * by seeing what happens with INQUIRY and READ CAPACITY
+ * commands.
+ */
+ if (bcs->Status == US_BULK_STAT_OK &&
+ scsi_get_resid(srb) == 0 &&
+ ((srb->cmnd[0] == INQUIRY &&
+ transfer_length == 36) ||
+ (srb->cmnd[0] == READ_CAPACITY &&
+ transfer_length == 8))) {
+ us->fflags |= US_FL_IGNORE_RESIDUE;
+
+ } else {
residue = min(residue, transfer_length);
scsi_set_resid(srb, max(scsi_get_resid(srb),
(int) residue));
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ae69f55aa96..ba412e68d474 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -225,6 +225,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Reported by Cedric Godin <cedric@belbone.be> */
+UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551,
+ "Nokia",
+ "5300",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY ),
+
/* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
"SMSC",
@@ -356,14 +363,14 @@ UNUSUAL_DEV( 0x04b0, 0x040f, 0x0100, 0x0200,
US_FL_FIX_CAPACITY),
/* Reported by Emil Larsson <emil@swip.net> */
-UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0110,
+UNUSUAL_DEV( 0x04b0, 0x0411, 0x0100, 0x0111,
"NIKON",
"NIKON DSC D80",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
/* Reported by Ortwin Glueck <odi@odi.ch> */
-UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0110,
+UNUSUAL_DEV( 0x04b0, 0x0413, 0x0110, 0x0111,
"NIKON",
"NIKON DSC D40",
US_SC_DEVICE, US_PR_DEVICE, NULL,
@@ -1185,6 +1192,13 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
+/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
+UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
+ "Simple Tech/Datafab",
+ "CF+SM Reader",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
* to the USB storage specification in two ways:
* - They tell us they are using transport protocol CBI. In reality they
@@ -1562,6 +1576,7 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
US_SC_DEVICE, US_PR_DEVICE, NULL,
0),
+#ifdef CONFIG_USB_STORAGE_SIERRA
/* Reported by Kevin Lloyd <linux@sierrawireless.com>
* Entry is needed for the initializer function override,
* which instructs the device to load as a modem
@@ -1570,8 +1585,9 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
"Sierra Wireless",
"USB MMC Storage",
- US_SC_DEVICE, US_PR_DEVICE, NULL,
- US_FL_IGNORE_DEVICE),
+ US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init,
+ 0),
+#endif
/* Reported by Jaco Kroon <jaco@kroon.co.za>
* The usb-storage module found on the Digitech GNX4 (and supposedly other
@@ -1743,6 +1759,15 @@ UNUSUAL_DEV( 0x22b8, 0x4810, 0x0001, 0x0002,
US_FL_FIX_CAPACITY),
/*
+ * Patch by Jost Diederichs <jost@qdusa.com>
+ */
+UNUSUAL_DEV(0x22b8, 0x6410, 0x0001, 0x9999,
+ "Motorola Inc.",
+ "Motorola Phone (RAZRV3xx)",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY),
+
+/*
* Patch by Constantin Baranov <const@tltsu.ru>
* Report by Andreas Koenecke.
* Motorola ROKR Z6.
@@ -1767,6 +1792,13 @@ UNUSUAL_DEV( 0x2770, 0x915d, 0x0010, 0x0010,
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
+UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
+ "iRiver",
+ "MP3 T10",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/*
* David Härdeman <david@2gen.com>
* The key makes the SCSI stack print confusing (but harmless) messages
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bfea851be985..73679aa506de 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -102,6 +102,9 @@
#ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB
#include "cypress_atacb.h"
#endif
+#ifdef CONFIG_USB_STORAGE_SIERRA
+#include "sierra_ms.h"
+#endif
/* Some informational data */
MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>");