summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/pci_link.c16
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c52
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/block/xen-blkfront.c19
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/clk/samsung/clk-cpu.c10
-rw-r--r--drivers/clk/ti/clk-3xxx.c2
-rw-r--r--drivers/clk/ti/clk-7xx.c18
-rw-r--r--drivers/clk/ti/clkt_dflt.c4
-rw-r--r--drivers/clocksource/rockchip_timer.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/devfreq/devfreq.c7
-rw-r--r--drivers/dma/at_xdmac.c15
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/idma64.c16
-rw-r--r--drivers/dma/pxa_dma.c31
-rw-r--r--drivers/dma/sun4i-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c46
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h17
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c85
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c8
-rw-r--r--drivers/hwmon/abx500.c1
-rw-r--r--drivers/hwmon/gpio-fan.c1
-rw-r--r--drivers/hwmon/pwm-fan.c1
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c67
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c21
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/joystick/walkera0701.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c26
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c4
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/synaptics.c12
-rw-r--r--drivers/input/serio/libps2.c22
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c34
-rw-r--r--drivers/input/touchscreen/mms114.c4
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iova.c120
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c3
-rw-r--r--drivers/irqchip/irq-mips-gic.c12
-rw-r--r--drivers/mcb/mcb-pci.c6
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c2
-rw-r--r--drivers/md/dm-exception-store.c6
-rw-r--r--drivers/md/dm-exception-store.h5
-rw-r--r--drivers/md/dm-raid.c3
-rw-r--r--drivers/md/dm-snap-persistent.c17
-rw-r--r--drivers/md/dm-snap-transient.c3
-rw-r--r--drivers/md/dm-snap.c14
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c15
-rw-r--r--drivers/md/raid10.c13
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c14
-rw-r--r--drivers/mmc/host/pxamci.c66
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c6
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sunxi-mmc.c53
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c27
-rw-r--r--drivers/mtd/ubi/io.c5
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c22
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/nvmem/core.c8
-rw-r--r--drivers/nvmem/sunxi_sid.c11
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/phy/Kconfig18
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/phy-bcm-cygnus-pcie.c213
-rw-r--r--drivers/phy/phy-berlin-sata.c1
-rw-r--r--drivers/phy/phy-mt65xx-usb3.c506
-rw-r--r--drivers/phy/phy-qcom-ufs.c11
-rw-r--r--drivers/phy/phy-rockchip-usb.c6
-rw-r--r--drivers/phy/phy-samsung-usb2.c25
-rw-r--r--drivers/phy/phy-samsung-usb2.h2
-rw-r--r--drivers/phy/phy-sun4i-usb.c22
-rw-r--r--drivers/regulator/axp20x-regulator.c4
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/scsi/3w-9xxx.c28
-rw-r--r--drivers/scsi/libiscsi.c17
-rw-r--r--drivers/scsi/scsi_dh.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/spi-davinci.c7
-rw-r--r--drivers/staging/speakup/fakekey.c1
-rw-r--r--drivers/thermal/power_allocator.c10
-rw-r--r--drivers/tty/n_tty.c15
-rw-r--r--drivers/tty/serial/8250/8250_port.c8
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/imx.c20
-rw-r--r--drivers/tty/tty_buffer.c22
-rw-r--r--drivers/tty/tty_io.c40
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/driver.c14
-rw-r--r--drivers/usb/core/hcd.c110
-rw-r--r--drivers/usb/core/hub.c87
-rw-r--r--drivers/usb/core/message.c41
-rw-r--r--drivers/usb/core/quirks.c13
-rw-r--r--drivers/usb/core/sysfs.c36
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c4
-rw-r--r--drivers/usb/host/Kconfig10
-rw-r--r--drivers/usb/host/Makefile6
-rw-r--r--drivers/usb/host/ehci-msm.c6
-rw-r--r--drivers/usb/host/ehci-orion.c3
-rw-r--r--drivers/usb/host/ehci-platform.c12
-rw-r--r--drivers/usb/host/ehci-spear.c1
-rw-r--r--drivers/usb/host/fotg210-hcd.c1960
-rw-r--r--drivers/usb/host/fotg210.h36
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c1
-rw-r--r--drivers/usb/host/fusbh200-hcd.c5894
-rw-r--r--drivers/usb/host/fusbh200.h675
-rw-r--r--drivers/usb/host/ohci-spear.c1
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/uhci-platform.c1
-rw-r--r--drivers/usb/host/whci/init.c3
-rw-r--r--drivers/usb/host/xhci-dbg.c17
-rw-r--r--drivers/usb/host/xhci-hub.c185
-rw-r--r--drivers/usb/host/xhci-mem.c50
-rw-r--r--drivers/usb/host/xhci-pci.c6
-rw-r--r--drivers/usb/host/xhci-plat.c29
-rw-r--r--drivers/usb/host/xhci-ring.c122
-rw-r--r--drivers/usb/host/xhci.c27
-rw-r--r--drivers/usb/host/xhci.h72
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c7
-rw-r--r--drivers/usb/storage/isd200.c30
-rw-r--r--drivers/usb/storage/uas.c16
-rw-r--r--drivers/usb/usbip/vhci_hcd.c8
-rw-r--r--drivers/video/fbdev/broadsheetfb.c8
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c9
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c2
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c1
-rw-r--r--drivers/video/fbdev/tridentfb.c12
-rw-r--r--drivers/video/of_display_timing.c1
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/bcm2835_wdt.c10
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/mena21_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c1
215 files changed, 3492 insertions, 8873 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 2614a839c60d..42c66b64c12c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
goto err_exit;
mutex_lock(&ec->mutex);
+ result = -ENODATA;
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
+ result = 0;
q->handler = acpi_ec_get_query_handler(handler);
ec_dbg_evt("Query(0x%02x) scheduled",
q->handler->query_bit);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6da0f9beab19..c9336751e5e3 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev)
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ acpi_isa_irq_available(dev->irq) &&
(acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
pin_name(dev->pin), dev->irq);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3b4ea98e3ea0..7c8408b946ca 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void)
PIRQ_PENALTY_PCI_POSSIBLE;
}
}
- /* Add a penalty for the SCI */
- acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
return 0;
}
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
irq = link->irq.possible[i];
}
}
+ if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+ printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
+ "Try pci=noacpi or acpi=off\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device));
+ return -ENODEV;
+ }
/* Attempt to enable the link device at this IRQ. */
if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active)
}
}
+bool acpi_isa_irq_available(int irq)
+{
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+ acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
+}
+
/*
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
* PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 28cd75c535b0..7ae7cd990fbf 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
u32 microvolt[3] = {0};
int count, ret;
- count = of_property_count_u32_elems(opp->np, "opp-microvolt");
- if (!count)
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!of_find_property(opp->np, "opp-microvolt", NULL))
return 0;
+ count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ if (count < 0) {
+ dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+ __func__, count);
+ return count;
+ }
+
/* There can be one or three elements here */
if (count != 1 && count != 3) {
dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* share a common logic which is isolated here.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*
* Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@ unlock:
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index f42f2bac6466..4c55cfbad19e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
/* Calculate the length of a fixed format */
static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
{
- snprintf(buf, buf_size, "%x", max_val);
- return strlen(buf);
+ return snprintf(NULL, 0, "%x", max_val);
}
static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
/* If we're in the region the user is trying to read */
if (p >= *ppos) {
/* ...but not beyond it */
- if (buf_pos >= count - 1 - tot_len)
+ if (buf_pos + tot_len + 1 >= count)
break;
/* Format the register */
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f9889b6bc02c..674f800a3b57 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
{
const bool write = cmd->rq->cmd_flags & REQ_WRITE;
struct loop_device *lo = cmd->rq->q->queuedata;
- int ret = -EIO;
+ int ret = 0;
- if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+ if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+ ret = -EIO;
goto failed;
+ }
ret = do_req_filebacked(lo, cmd->rq);
-
failed:
- if (ret)
- cmd->rq->errors = -EIO;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
}
static void loop_queue_write_work(struct work_struct *work)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index a295b98c6bae..1c9e4fe5aa44 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (queue_mode) {
case NULL_Q_MQ:
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq, cmd->rq->errors);
break;
case NULL_Q_RQ:
blk_complete_request(cmd->rq);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b97fc3fe0916..6f04771f1019 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
spin_unlock_irqrestore(req->q->queue_lock, flags);
return;
}
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
if (cmd_rq->ctx == CMD_CTX_CANCELLED)
- req->errors = -EINTR;
- else
- req->errors = status;
+ status = -EINTR;
} else {
- req->errors = nvme_error_status(status);
+ status = nvme_error_status(status);
}
- } else
- req->errors = 0;
+ }
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
u32 result = le32_to_cpup(&cqe->result);
req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
}
nvme_free_iod(nvmeq->dev, iod);
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, status);
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ns && ns->ms && !blk_integrity_rq(req)) {
if (!(ns->pi_type && ns->ms == 8) &&
req->cmd_type != REQ_TYPE_DRV_PRIV) {
- req->errors = -EFAULT;
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, -EFAULT);
return BLK_MQ_RQ_QUEUE_OK;
}
}
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
list_sort(NULL, &dev->namespaces, ns_cmp);
}
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+ struct nvme_queue *nvmeq;
+ int i;
+
+ for (i = 0; i < dev->online_queues; i++) {
+ nvmeq = dev->queues[i];
+
+ if (!nvmeq->tags || !(*nvmeq->tags))
+ continue;
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ blk_mq_tags_cpumask(*nvmeq->tags));
+ }
+}
+
static void nvme_dev_scan(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work)
return;
nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
kfree(ctrl);
+ nvme_set_irq_hints(dev);
}
/*
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
- struct nvme_queue *nvmeq;
- int i;
-
- for (i = 0; i < dev->online_queues; i++) {
- nvmeq = dev->queues[i];
-
- if (!nvmeq->tags || !(*nvmeq->tags))
- continue;
-
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
- blk_mq_tags_cpumask(*nvmeq->tags));
- }
-}
-
static int nvme_dev_start(struct nvme_dev *dev)
{
int result;
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (result)
goto free_tags;
- nvme_set_irq_hints(dev);
-
dev->event_limit = 1;
return result;
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
} else {
nvme_unfreeze_queues(dev);
nvme_dev_add(dev);
- nvme_set_irq_hints(dev);
}
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e93899cc6f60..6ca35495a5be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
- blk_mq_complete_request(vbr->req);
+ blk_mq_complete_request(vbr->req, vbr->req->errors);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index deb3f001791f..767657565de6 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
+ struct pending_req *req, *n;
+ int i = 0, j;
+
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
/* Remove all persistent grants and the cache of ballooned pages. */
xen_blkbk_free_caches(blkif);
+ /* Check that there is no request in use */
+ list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+ list_del(&req->free_list);
+
+ for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
+ kfree(req->segments[j]);
+
+ for (j = 0; j < MAX_INDIRECT_PAGES; j++)
+ kfree(req->indirect_pages[j]);
+
+ kfree(req);
+ i++;
+ }
+
+ WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+ blkif->nr_ring_pages = 0;
+
return 0;
}
static void xen_blkif_free(struct xen_blkif *blkif)
{
- struct pending_req *req, *n;
- int i = 0, j;
xen_blkif_disconnect(blkif);
xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
BUG_ON(!list_empty(&blkif->free_pages));
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
- /* Check that there is no request in use */
- list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
- list_del(&req->free_list);
-
- for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
- kfree(req->segments[j]);
-
- for (j = 0; j < MAX_INDIRECT_PAGES; j++)
- kfree(req->indirect_pages[j]);
-
- kfree(req);
- i++;
- }
-
- WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-
kmem_cache_free(xen_blkif_cachep, blkif);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0823a96902f8..611170896b8c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int error;
spin_lock_irqsave(&info->io_lock, flags);
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
continue;
}
- req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, error);
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
- if (unlikely(req->errors)) {
- if (req->errors == -EOPNOTSUPP)
- req->errors = 0;
+ if (unlikely(error)) {
+ if (error == -EOPNOTSUPP)
+ error = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, error);
break;
default:
BUG();
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1a82f3a17681..0ebca8ba7bc4 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL
config ARM_CCI500_PMU
bool "ARM CCI500 PMU support"
- default y
depends on (ARM && CPU_V7) || ARM64
depends on PERF_EVENTS
select ARM_CCI_PMU
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 7c1e1f58e2da..2fe37f708dc7 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
* the values for DIV_COPY and DIV_HPM dividers need not be set.
*/
div0 = cfg_data->div0;
- if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+ if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
div1 = cfg_data->div1;
if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
div1 = readl(base + E4210_DIV_CPU1) &
@@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
WARN_ON(alt_div >= MAX_DIV);
- if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+ if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
/*
* In Exynos4210, ATB clock parent is also mout_core. So
* ATB clock also needs to be mantained at safe speed.
@@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
writel(div0, base + E4210_DIV_CPU0);
wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
- if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
+ if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
writel(div1, base + E4210_DIV_CPU1);
wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
DIV_MASK_ALL);
@@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
unsigned long mux_reg;
/* find out the divider values to use for clock data */
- if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+ if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
while ((cfg_data->prate * 1000) != ndata->new_rate) {
if (cfg_data->prate == 0)
return -EINVAL;
@@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
- if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
+ if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
div_mask |= E4210_DIV0_ATB_MASK;
}
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 676ee8f6d813..8831e1a05367 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
DT_CLK(NULL, "uart3_ick", "uart3_ick"),
- DT_CLK(NULL, "uart4_ick", "uart4_ick"),
DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
@@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = {
static struct ti_dt_clk omap36xx_clks[] = {
DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
DT_CLK(NULL, "uart4_fck", "uart4_fck"),
+ DT_CLK(NULL, "uart4_ick", "uart4_ick"),
{ .node_name = NULL },
};
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 9b5b289e6334..a911d7de3377 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -18,7 +18,6 @@
#include "clock.h"
-#define DRA7_DPLL_ABE_DEFFREQ 180633600
#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
#define DRA7_DPLL_USB_DEFFREQ 960000000
@@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = {
int __init dra7xx_dt_clk_init(void)
{
int rc;
- struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck;
+ struct clk *dpll_ck, *hdcp_ck;
ti_dt_clocks_register(dra7xx_clks);
omap2_clk_disable_autoidle_all();
- abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
- sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
- dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
-
- rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
- if (!rc)
- rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
- if (rc)
- pr_err("%s: failed to configure ABE DPLL!\n", __func__);
-
- dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
- rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
- if (rc)
- pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
-
dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
if (rc)
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
index 90d7d8a21c49..1ddc288fce4e 100644
--- a/drivers/clk/ti/clkt_dflt.c
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
}
}
- if (unlikely(!clk->enable_reg)) {
+ if (unlikely(IS_ERR(clk->enable_reg))) {
pr_err("%s: %s missing enable_reg\n", __func__,
clk_hw_get_name(hw));
ret = -EINVAL;
@@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw)
u32 v;
clk = to_clk_hw_omap(hw);
- if (!clk->enable_reg) {
+ if (IS_ERR(clk->enable_reg)) {
/*
* 'independent' here refers to a clock which is not
* controlled by its parent.
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index bb2c2b050964..d3c1742ded1a 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
bc_timer.freq = clk_get_rate(timer_clk);
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
return;
}
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index edacf3902e10..1cea08cf603e 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
int irq, error;
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
return;
}
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 798277227de7..cec1ee2d2f74 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
{
struct acpi_cpufreq_data *data = policy->driver_data;
+ if (unlikely(!data))
+ return -ENODEV;
+
return cpufreq_show_cpus(data->freqdomain_cpus, buf);
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ef5ed9470de9..25c4c15103a0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu)
* since this is a core component, and is essential for the
* subsequent light-weight ->init() to succeed.
*/
- if (cpufreq_driver->exit)
+ if (cpufreq_driver->exit) {
cpufreq_driver->exit(policy);
+ policy->freq_table = NULL;
+ }
}
/**
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 3927ed9fdbd5..ca848cc6a8fd 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (err) {
put_device(&devfreq->dev);
mutex_unlock(&devfreq->lock);
- goto err_dev;
+ goto err_out;
}
mutex_unlock(&devfreq->lock);
@@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
err_init:
list_del(&devfreq->node);
device_unregister(&devfreq->dev);
-err_dev:
kfree(devfreq);
err_out:
return ERR_PTR(err);
@@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
ret = PTR_ERR(governor);
goto out;
}
- if (df->governor == governor)
+ if (df->governor == governor) {
+ ret = 0;
goto out;
+ }
if (df->governor) {
ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a165b4bfd330..dd24375b76dd 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
return desc;
}
+void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+{
+ memset(&desc->lld, 0, sizeof(desc->lld));
+ INIT_LIST_HEAD(&desc->descs_list);
+ desc->direction = DMA_TRANS_NONE;
+ desc->xfer_size = 0;
+ desc->active_xfer = false;
+}
+
/* Call must be protected by lock. */
static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
{
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->free_descs_list,
struct at_xdmac_desc, desc_node);
list_del(&desc->desc_node);
- desc->active_xfer = false;
+ at_xdmac_init_used_desc(desc);
}
return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
if (xt->src_inc) {
if (xt->src_sgl)
- chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM;
+ chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
else
chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
}
if (xt->dst_inc) {
if (xt->dst_sgl)
- chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM;
+ chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
else
chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ff284c8e3d5..09479d4be4db 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
mutex_lock(&dma_list_mutex);
if (chan->client_count == 0) {
+ struct dma_device *device = chan->device;
+
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
err = dma_chan_get(chan);
- if (err)
+ if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
+ chan = NULL;
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+ }
} else
chan = NULL;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cf1c87fa1edd..bedce038c6e2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
- int r = nr_channels - i - 1;
dwc->chan.device = &dw->dma;
dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
- dwc->priority = r;
+ dwc->priority = nr_channels - i - 1;
else
dwc->priority = i;
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
+ unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
void __iomem *addr = chip->regs + r * sizeof(u32);
dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 18c14e1f1414..48d6d9e94f67 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
struct idma64_desc *desc = idma64c->desc;
struct idma64_hw_desc *hw;
size_t bytes = desc->length;
- u64 llp;
- u32 ctlhi;
+ u64 llp = channel_readq(idma64c, LLP);
+ u32 ctlhi = channel_readl(idma64c, CTL_HI);
unsigned int i = 0;
- llp = channel_readq(idma64c, LLP);
do {
hw = &desc->hw[i];
- } while ((hw->llp != llp) && (++i < desc->ndesc));
+ if (hw->llp == llp)
+ break;
+ bytes -= hw->len;
+ } while (++i < desc->ndesc);
if (!i)
return bytes;
- do {
- bytes -= desc->hw[--i].len;
- } while (i);
+ /* The current chunk is not fully transfered yet */
+ bytes += desc->hw[--i].len;
- ctlhi = channel_readl(idma64c, CTL_HI);
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 5cb61ce01036..fc4156afa070 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
return;
/* clear the channel mapping in DRCMR */
- reg = pxad_drcmr(chan->drcmr);
- writel_relaxed(0, chan->phy->base + reg);
+ if (chan->drcmr <= DRCMR_CHLNUM) {
+ reg = pxad_drcmr(chan->drcmr);
+ writel_relaxed(0, chan->phy->base + reg);
+ }
spin_lock_irqsave(&pdev->phy_lock, flags);
for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned);
- reg = pxad_drcmr(phy->vchan->drcmr);
- writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+ reg = pxad_drcmr(phy->vchan->drcmr);
+ writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ }
dalgn = phy_readl_relaxed(phy, DALGN);
if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
struct dma_async_tx_descriptor *tx;
struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
+ INIT_LIST_HEAD(&vd->node);
tx = vchan_tx_prep(vc, vd, tx_flags);
tx->tx_submit = pxad_tx_submit;
dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
width = chan->cfg.src_addr_width;
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
- *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
+ *dcmd |= PXA_DCMD_INCTRGADDR;
+ if (chan->drcmr <= DRCMR_CHLNUM)
+ *dcmd |= PXA_DCMD_FLOWSRC;
}
if (dir == DMA_MEM_TO_DEV) {
maxburst = chan->cfg.dst_maxburst;
width = chan->cfg.dst_addr_width;
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
- *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
+ *dcmd |= PXA_DCMD_INCSRCADDR;
+ if (chan->drcmr <= DRCMR_CHLNUM)
+ *dcmd |= PXA_DCMD_FLOWTRG;
}
if (dir == DMA_MEM_TO_MEM)
*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
else
curr = phy_readl_relaxed(chan->phy, DTADR);
+ /*
+ * curr has to be actually read before checking descriptor
+ * completion, so that a curr inside a status updater
+ * descriptor implies the following test returns true, and
+ * preventing reordering of curr load and the test.
+ */
+ rmb();
+ if (is_desc_completed(vd))
+ goto out;
+
for (i = 0; i < sw_desc->nb_desc - 1; i++) {
hw_desc = sw_desc->hw_desc[i];
if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index a1a500d96ff2..1661d518224a 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
{
struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
- struct sun4i_dma_promise *promise;
+ struct sun4i_dma_promise *promise, *tmp;
/* Free all the demands and completed demands */
- list_for_each_entry(promise, &contract->demands, list)
+ list_for_each_entry_safe(promise, tmp, &contract->demands, list)
kfree(promise);
- list_for_each_entry(promise, &contract->completed_demands, list)
+ list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
kfree(promise);
kfree(contract);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index b23e8d52d126..8d57b1b12e41 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -59,7 +59,6 @@
#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
#define XGENE_DMA_RING_CMD_OFFSET 0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
return flyby_type[src_cnt];
}
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
- u32 __iomem *cmd_base = ring->cmd_base;
- u32 ring_state = ioread32(&cmd_base[1]);
-
- return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
dma_addr_t *paddr)
{
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
}
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
- struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc_sw)
{
+ struct xgene_dma_ring *ring = &chan->tx_ring;
struct xgene_dma_desc_hw *desc_hw;
- /* Check if can push more descriptor to hw for execution */
- if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
- return -EBUSY;
-
/* Get hw descriptor from DMA tx ring */
desc_hw = &ring->desc_hw[ring->head];
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
}
+ /* Increment the pending transaction count */
+ chan->pending += ((desc_sw->flags &
+ XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
/* Notify the hw that we have descriptor ready for execution */
iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
2 : 1, ring->cmd);
-
- return 0;
}
/**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
{
struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
- int ret;
/*
* If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
if (chan->pending >= chan->max_outstanding)
return;
- ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
- if (ret)
- return;
+ xgene_chan_xfer_request(chan, desc_sw);
/*
* Delete this element from ld pending queue and append it to
* ld running queue
*/
list_move_tail(&desc_sw->node, &chan->ld_running);
-
- /* Increment the pending transaction count */
- chan->pending++;
}
}
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
* Decrement the pending transaction count
* as we have processed one
*/
- chan->pending--;
+ chan->pending -= ((desc_sw->flags &
+ XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
/*
* Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
struct xgene_dma_ring *ring,
enum xgene_dma_ring_cfgsize cfgsize)
{
+ int ret;
+
/* Setup DMA ring descriptor variables */
ring->pdma = chan->pdma;
ring->cfgsize = cfgsize;
ring->num = chan->pdma->ring_num++;
ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
- ring->size = xgene_dma_get_ring_size(chan, cfgsize);
- if (ring->size <= 0)
- return ring->size;
+ ret = xgene_dma_get_ring_size(chan, cfgsize);
+ if (ret <= 0)
+ return ret;
+ ring->size = ret;
/* Allocate memory for DMA ring descriptor */
ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
/* Set the max outstanding request possible to this channel */
- chan->max_outstanding = rx_ring->slots;
+ chan->max_outstanding = tx_ring->slots;
return ret;
}
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 39915a6b7986..c017fcd8e07c 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
struct zx_dma_chan *c;
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
chan = dma_get_any_slave_channel(&d->slave);
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index e29560e6b40b..950c87f5d279 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -13,6 +13,7 @@
*/
#include <linux/efi.h>
+#include <linux/sort.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -305,6 +306,44 @@ fail:
*/
#define EFI_RT_VIRTUAL_BASE 0x40000000
+static int cmp_mem_desc(const void *l, const void *r)
+{
+ const efi_memory_desc_t *left = l, *right = r;
+
+ return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ u64 left_end;
+
+ if (left == NULL || right == NULL)
+ return false;
+
+ left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+ return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+ EFI_MEMORY_WC | EFI_MEMORY_UC |
+ EFI_MEMORY_RUNTIME;
+
+ return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
/*
* efi_get_virtmap() - create a virtual mapping for the EFI memory map
*
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
int *count)
{
u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
- efi_memory_desc_t *out = runtime_map;
+ efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
int l;
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *in = (void *)memory_map + l;
+ /*
+ * To work around potential issues with the Properties Table feature
+ * introduced in UEFI 2.5, which may split PE/COFF executable images
+ * in memory into several RuntimeServicesCode and RuntimeServicesData
+ * regions, we need to preserve the relative offsets between adjacent
+ * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+ * The easiest way to find adjacent regions is to sort the memory map
+ * before traversing it.
+ */
+ sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+ for (l = 0; l < map_size; l += desc_size, prev = in) {
u64 paddr, size;
+ in = (void *)memory_map + l;
if (!(in->attribute & EFI_MEMORY_RUNTIME))
continue;
+ paddr = in->phys_addr;
+ size = in->num_pages * EFI_PAGE_SIZE;
+
/*
* Make the mapping compatible with 64k pages: this allows
* a 4k page size kernel to kexec a 64k page size kernel and
* vice versa.
*/
- paddr = round_down(in->phys_addr, SZ_64K);
- size = round_up(in->num_pages * EFI_PAGE_SIZE +
- in->phys_addr - paddr, SZ_64K);
-
- /*
- * Avoid wasting memory on PTEs by choosing a virtual base that
- * is compatible with section mappings if this region has the
- * appropriate size and physical alignment. (Sections are 2 MB
- * on 4k granule kernels)
- */
- if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
- efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ if (!regions_are_adjacent(prev, in) ||
+ !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+ paddr = round_down(in->phys_addr, SZ_64K);
+ size += in->phys_addr - paddr;
+
+ /*
+ * Avoid wasting memory on PTEs by choosing a virtual
+ * base that is compatible with section mappings if this
+ * region has the appropriate size and physical
+ * alignment. (Sections are 2 MB on 4k granule kernels)
+ */
+ if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+ efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ else
+ efi_virt_base = round_up(efi_virt_base, SZ_64K);
+ }
in->virt_addr = efi_virt_base + in->phys_addr - paddr;
efi_virt_base += size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 1c3fc99c5465..8e995148f56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
return ret;
}
-static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
- cgs_handle_t *handle)
-{
- CGS_FUNC_ADEV;
- int r;
- uint32_t dma_handle;
- struct drm_gem_object *obj;
- struct amdgpu_bo *bo;
- struct drm_device *dev = adev->ddev;
- struct drm_file *file_priv = NULL, *priv;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(priv, &dev->filelist, lhead) {
- rcu_read_lock();
- if (priv->pid == get_pid(task_pid(current)))
- file_priv = priv;
- rcu_read_unlock();
- if (file_priv)
- break;
- }
- mutex_unlock(&dev->struct_mutex);
- r = dev->driver->prime_fd_to_handle(dev,
- file_priv, dmabuf_fd,
- &dma_handle);
- spin_lock(&file_priv->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&file_priv->object_idr, dma_handle);
- if (obj == NULL) {
- spin_unlock(&file_priv->table_lock);
- return -EINVAL;
- }
- spin_unlock(&file_priv->table_lock);
- bo = gem_to_amdgpu_bo(obj);
- *handle = (cgs_handle_t)bo;
- return 0;
-}
-
static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
- amdgpu_cgs_import_gpu_mem,
amdgpu_cgs_add_irq_source,
amdgpu_cgs_irq_get,
amdgpu_cgs_irq_put
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 749420f1ea6f..cb3c274edb0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
uint64_t *chunk_array_user;
uint64_t *chunk_array;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned size, i;
+ unsigned size;
+ int i;
int ret;
if (cs->in.num_chunks == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index cd6edc40c9cd..1e0bba29e167 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 774528ab8704..fab5471d25d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a07742620d0..7bc9e9fcf3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 488642f08267..3b47ae313e36 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -27,19 +27,6 @@
#include "cgs_common.h"
/**
- * cgs_import_gpu_mem() - Import dmabuf handle
- * @cgs_device: opaque device handle
- * @dmabuf_fd: DMABuf file descriptor
- * @handle: memory handle (output)
- *
- * Must be called in the process context that dmabuf_fd belongs to.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
- cgs_handle_t *handle);
-
-/**
* cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
* @private_data: private data provided to cgs_add_irq_source
* @src_id: interrupt source ID
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
struct cgs_os_ops {
- cgs_import_gpu_mem_t import_gpu_mem;
-
/* IRQ handling */
cgs_add_irq_source_t add_irq_source;
cgs_irq_get_t irq_get;
cgs_irq_put_t irq_put;
};
-#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
- CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
private_data)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e23df5fd3836..bf27a07dbce3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
- cancel_work_sync(&mstb->mgr->work);
-
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+
if (!port->input) {
port->vcpi.num_slots = 0;
kfree(port->cached_edid);
- /* we can't destroy the connector here, as
- we might be holding the mode_config.mutex
- from an EDID retrieval */
+ /*
+ * The only time we don't have a connector
+ * on an output port is if the connector init
+ * fails.
+ */
if (port->connector) {
+ /* we can't destroy the connector here, as
+ * we might be holding the mode_config.mutex
+ * from an EDID retrieval */
+
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
return;
}
+ /* no need to clean up vcpi
+ * as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
-
- if (!port->input && port->vcpi.vcpi > 0)
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
kfree(port);
-
- (*mgr->cbs->hotplug)(mgr);
}
static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
}
}
-static void build_mst_prop_path(struct drm_dp_mst_port *port,
- struct drm_dp_mst_branch *mstb,
+static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+ int pnum,
char *proppath,
size_t proppath_size)
{
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
- snprintf(temp, sizeof(temp), "-%d", port->port_num);
+ snprintf(temp, sizeof(temp), "-%d", pnum);
strlcat(proppath, temp, proppath_size);
}
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_port_teardown_pdt(port, old_pdt);
ret = drm_dp_port_setup_pdt(port);
- if (ret == true) {
+ if (ret == true)
drm_dp_send_link_address(mstb->mgr, port->mstb);
- port->mstb->link_address_sent = true;
- }
}
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
- if (port->port_num >= 8) {
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ if (!port->connector) {
+ /* remove it from the port list */
+ mutex_lock(&mstb->mgr->lock);
+ list_del(&port->next);
+ mutex_unlock(&mstb->mgr->lock);
+ /* drop port list reference */
+ drm_dp_put_port(port);
+ goto out;
+ }
+ if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ drm_mode_connector_set_tile_property(port->connector);
}
+ (*mstb->mgr->cbs->register_connector)(port->connector);
}
+out:
/* put reference to this port */
drm_dp_put_port(port);
}
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent) {
+ if (!mstb->link_address_sent)
drm_dp_send_link_address(mgr, mstb);
- mstb->link_address_sent = true;
- }
+
list_for_each_entry(port, &mstb->ports, next) {
if (port->input)
continue;
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
mutex_unlock(&mgr->qlock);
}
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb)
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
int len;
struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return -ENOMEM;
+ return;
txmsg->dst = mstb;
len = build_link_address(txmsg);
+ mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
}
(*mgr->cbs->hotplug)(mgr);
}
- } else
+ } else {
+ mstb->link_address_sent = false;
DRM_DEBUG_KMS("link address failed %d\n", ret);
+ }
kfree(txmsg);
- return 0;
}
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->work);
+ flush_work(&mgr->destroy_connector_work);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
- else
+ else {
edid = drm_get_edid(connector, &port->aux.ddc);
-
- drm_mode_connector_set_tile_property(connector);
+ drm_mode_connector_set_tile_property(connector);
+ }
drm_dp_put_port(port);
return edid;
}
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
struct drm_dp_mst_port *port;
-
+ bool send_hotplug = false;
/*
* Not a regular list traverse as we have to drop the destroy
* connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
kfree(port);
+ send_hotplug = true;
}
+ if (send_hotplug)
+ (*mgr->cbs->hotplug)(mgr);
}
/**
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
+ flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 418d299f3b12..ca08c472311b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
struct drm_crtc *crtc = mode_set->crtc;
int ret;
- if (crtc->funcs->cursor_set) {
+ if (crtc->funcs->cursor_set2) {
+ ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+ if (ret)
+ error = true;
+ } else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
error = true;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d734780b31c0..a18164f2f6d2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+/**
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
+ *
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
+ */
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
- __drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
mutex_lock(&dev->mode_config.mutex);
- __drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index cbdb78ef3bac..e6cbaca821a4 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -37,7 +37,6 @@
* DECON stands for Display and Enhancement controller.
*/
-#define DECON_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define WINDOWS_NR 2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
-static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (adjusted_mode->vrefresh == 0)
- adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
-
- return true;
-}
-
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
- .mode_fixup = decon_mode_fixup,
.commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index d66ade0efac8..124fb9a56f02 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int exynos_dp_suspend(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_disable(&dp->encoder);
- return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_enable(&dp->encoder);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
-};
-
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
- .pm = &exynos_dp_pm_ops,
.of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index c68a6a2a9b57..7f55ba6771c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
{
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
int exynos_drm_device_subdrv_probe(struct drm_device *dev)
{
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
int exynos_drm_device_subdrv_remove(struct drm_device *dev)
{
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
{
@@ -111,7 +107,6 @@ err:
}
return ret;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
{
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
subdrv->close(dev, subdrv->dev, file);
}
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 0872aa2f450f..ed28823d3b35 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
exynos_crtc->ops->disable(exynos_crtc);
}
-static bool
-exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->mode_fixup)
- return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
- adjusted_mode);
-
- return true;
-}
-
static void
exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.enable = exynos_drm_crtc_enable,
.disable = exynos_drm_crtc_disable,
- .mode_fixup = exynos_drm_crtc_mode_fixup,
.mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 831d2e4cacf9..ae9e6b2d3758 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
return 0;
}
+#endif
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b7ba21dfb696..6c717ba672db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
*
* @enable: enable the device
* @disable: disable the device
- * @mode_fixup: fix mode data before applying it
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
void (*enable)(struct exynos_drm_crtc *crtc);
void (*disable)(struct exynos_drm_crtc *crtc);
- bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void (*commit)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 2a652359af64..dd3a5e6d58c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
.set_addr = fimc_dst_set_addr,
};
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
- DRM_DEBUG_KMS("enable[%d]\n", enable);
-
- if (enable) {
- clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
- clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
- ctx->suspended = false;
- } else {
- clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
- clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
- ctx->suspended = true;
- }
-
- return 0;
-}
-
static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
{
struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ if (enable) {
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = false;
+ } else {
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int fimc_suspend(struct device *dev)
{
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 750a9e6b9e8d..3d1aba67758b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -41,7 +41,6 @@
* CPU Interface.
*/
-#define FIMD_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
/* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
-static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (adjusted_mode->vrefresh == 0)
- adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
-
- return true;
-}
-
static void fimd_commit(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
return;
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
- writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+ writel(val, ctx->regs + DP_MIE_CLKCON);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable = fimd_enable,
.disable = fimd_disable,
- .mode_fixup = fimd_mode_fixup,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3734c34aed16..c17efdb238a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
g2d_put_cmdlist(g2d, node);
return ret;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
out:
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f12fbc36b120..407afedb6003 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
nr_pages = obj->size >> PAGE_SHIFT;
if (!is_drm_iommu_supported(dev)) {
- dma_addr_t start_addr;
- unsigned int i = 0;
-
obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
if (!obj->pages) {
DRM_ERROR("failed to allocate pages.\n");
return -ENOMEM;
}
+ }
- obj->cookie = dma_alloc_attrs(dev->dev,
- obj->size,
- &obj->dma_addr, GFP_KERNEL,
- &obj->dma_attrs);
- if (!obj->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
+ obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
+ GFP_KERNEL, &obj->dma_attrs);
+ if (!obj->cookie) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ if (obj->pages)
drm_free_large(obj->pages);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
+
+ if (obj->pages) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
start_addr = obj->dma_addr;
while (i < nr_pages) {
- obj->pages[i] = phys_to_page(start_addr);
+ obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
+ start_addr));
start_addr += PAGE_SIZE;
i++;
}
} else {
- obj->pages = dma_alloc_attrs(dev->dev, obj->size,
- &obj->dma_addr, GFP_KERNEL,
- &obj->dma_attrs);
- if (!obj->pages) {
- DRM_ERROR("failed to allocate buffer.\n");
- return -ENOMEM;
- }
+ obj->pages = obj->cookie;
}
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)obj->dma_addr, obj->size);
- if (!is_drm_iommu_supported(dev)) {
- dma_free_attrs(dev->dev, obj->size, obj->cookie,
- (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- drm_free_large(obj->pages);
- } else
- dma_free_attrs(dev->dev, obj->size, obj->pages,
- (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+ dma_free_attrs(dev->dev, obj->size, obj->cookie,
+ (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- obj->dma_addr = (dma_addr_t)NULL;
+ if (!is_drm_iommu_supported(dev))
+ drm_free_large(obj->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
* once dmabuf's refcount becomes 0.
*/
if (obj->import_attach)
- goto out;
-
- exynos_drm_free_buf(exynos_gem_obj);
-
-out:
- drm_gem_free_mmap_offset(obj);
+ drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
+ else
+ exynos_drm_free_buf(exynos_gem_obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
- exynos_gem_obj = NULL;
}
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
return exynos_gem_obj->size;
}
-
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret < 0) {
+ drm_gem_object_release(obj);
+ kfree(exynos_gem_obj);
+ return ERR_PTR(ret);
+ }
+
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
}
-int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
+static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
-{ struct exynos_drm_gem_obj *exynos_gem_obj;
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ unsigned int flags;
int ret;
/*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- if (is_drm_iommu_supported(dev)) {
- exynos_gem_obj = exynos_drm_gem_create(dev,
- EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
- args->size);
- } else {
- exynos_gem_obj = exynos_drm_gem_create(dev,
- EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
- args->size);
- }
+ if (is_drm_iommu_supported(dev))
+ flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
+ else
+ flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
+ exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
if (IS_ERR(exynos_gem_obj)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
-
*offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
-out:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
err_close_vm:
drm_gem_vm_close(vma);
- drm_gem_free_mmap_offset(obj);
return ret;
}
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
if (ret < 0)
goto err_free_large;
+ exynos_gem_obj->sgt = sgt;
+
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index cd62f8410d1e..b62d1007c0e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -39,6 +39,7 @@
* - this address could be physical address without IOMMU and
* device address with IOMMU.
* @pages: Array of backing pages.
+ * @sgt: Imported sg_table.
*
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
struct page **pages;
+ struct sg_table *sgt;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
-/* create a private gem object and initialize it. */
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
- unsigned long size);
-
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 425e70625388..2f5c118f4c8e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3e4be5a3becd..6ade06888432 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
+ return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
drm_modeset_unlock_all(dev);
drm_connector_register(&intel_connector->base);
- return connector;
}
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
+ .register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 53c0173a39fe..b17785719598 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 72e0edd7bbde..7412caedcf7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer;
- write_pointer = status_pointer & 0x07;
+ write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
if (read_pointer > write_pointer)
- write_pointer += 6;
+ write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
- (read_pointer % 6) * 8);
+ (read_pointer % GEN8_CSB_ENTRIES) * 8);
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
- (read_pointer % 6) * 8 + 4);
+ (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
spin_unlock(&ring->execlist_lock);
WARN(submit_contexts > 2, "More than two context complete events?\n");
- ring->next_context_status_buffer = write_pointer % 6;
+ ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
+ _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+ ((u32)ring->next_context_status_buffer &
+ GEN8_CSB_PTR_MASK) << 8));
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 next_context_status_buffer_hw;
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
- ring->next_context_status_buffer = 0;
+
+ /*
+ * Instead of resetting the Context Status Buffer (CSB) read pointer to
+ * zero, we need to read the write pointer from hardware and use its
+ * value because "this register is power context save restored".
+ * Effectively, these states have been observed:
+ *
+ * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+ * BDW | CSB regs not reset | CSB regs reset |
+ * CHT | CSB regs not reset | CSB regs not reset |
+ */
+ next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+ & GEN8_CSB_PTR_MASK);
+
+ /*
+ * When the CSB registers are reset (also after power-up / gpu reset),
+ * CSB write pointer is set to all 1's, which is not valid, use '5' in
+ * this special case, so the first element read is CSB[0].
+ */
+ if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+ next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+ ring->next_context_status_buffer = next_context_status_buffer_hw;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f9982a2..3c63bb32ad81 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,8 @@
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index af7fdb3bd663..7401cf90b0db 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
}
if (power_well->data == SKL_DISP_PW_1) {
- intel_prepare_ddi(dev);
+ if (!dev_priv->power_domains.initializing)
+ intel_prepare_ddi(dev);
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index dd845f82cc24..4649bd2ed340 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->hdisplay,
adjusted_mode->vdisplay);
- if (qcrtc->index == 0)
+ if (bo->is_primary == false)
recreate_primary = true;
if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c3872598b85a..65adb9c72377 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
}
break;
case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 5e09c061847f..6cddae44fa6e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
{
struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector;
struct drm_connector *connector;
@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
+ return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
drm_modeset_lock_all(dev);
radeon_fb_add_connector(rdev, connector);
drm_modeset_unlock_all(dev);
drm_connector_register(connector);
- return connector;
}
static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
+ .register_connector = radeon_dp_register_mst_connector,
.destroy_connector = radeon_dp_destroy_mst_connector,
.hotplug = radeon_dp_mst_hotplug,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 7214858ffcea..1aa657fe31cb 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,40 +48,10 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
-/**
- * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
- *
- * @info: fbdev info
- *
- * This function hides the cursor on all CRTCs used by fbdev.
- */
-static int radeon_fb_helper_set_par(struct fb_info *info)
-{
- int ret;
-
- ret = drm_fb_helper_set_par(info);
-
- /* XXX: with universal plane support fbdev will automatically disable
- * all non-primary planes (including the cursor)
- */
- if (ret == 0) {
- struct drm_fb_helper *fb_helper = info->par;
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
- radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
- }
- }
-
- return ret;
-}
-
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = radeon_fb_helper_set_par,
+ .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 5ae8f921da2a..8a76821177a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
+ if (ret) {
+ (void) vmw_cmdbuf_man_process(man);
+ ret = drm_mm_insert_node_generic(&man->mm, info->node,
+ info->page_size, 0, 0,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ }
+
spin_unlock_bh(&man->lock);
info->done = !ret;
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 6cb89c0ebab6..1fd46859ed29 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = {
{ .compatible = "stericsson,abx500-temp" },
{},
};
+MODULE_DEVICE_TABLE(of, abx500_temp_match);
#endif
static struct platform_driver abx500_temp_driver = {
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index a3dae6d0082a..82de3deeb18a 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = {
{ .compatible = "gpio-fan", },
{},
};
+MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
#endif /* CONFIG_OF_GPIO */
static int gpio_fan_probe(struct platform_device *pdev)
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 2d9a712699ff..3e23003f78b0 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = {
{ .compatible = "pwm-fan", },
{},
};
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3a3738fe016b..cd4510a63375 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = {
.name = "C6-SKL",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 75,
+ .exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
@@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = {
.name = "C8-SKL",
.desc = "MWAIT 0x40",
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 174,
+ .exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
+ .name = "C9-SKL",
+ .desc = "MWAIT 0x50",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 480,
+ .target_residency = 5000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
.name = "C10-SKL",
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 41d6911e244e..f1ccd40beae9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
if (MLX5_CAP_GEN(mdev, apm))
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
- props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
if (MLX5_CAP_GEN(mdev, xrc))
props->device_cap_flags |= IB_DEVICE_XRC;
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return 0;
}
-static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
-{
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *seg;
- struct mlx5_core_mr mr;
- int err;
-
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- seg = &in->seg;
- seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
- seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- seg->start_addr = 0;
-
- err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
- NULL, NULL, NULL);
- if (err) {
- mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
- goto err_in;
- }
-
- kfree(in);
- *key = mr.key;
-
- return 0;
-
-err_in:
- kfree(in);
-
- return err;
-}
-
-static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
-{
- struct mlx5_core_mr mr;
- int err;
-
- memset(&mr, 0, sizeof(mr));
- mr.key = key;
- err = mlx5_core_destroy_mkey(dev->mdev, &mr);
- if (err)
- mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
-}
-
static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
kfree(pd);
return ERR_PTR(-EFAULT);
}
- } else {
- err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
- if (err) {
- mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
- kfree(pd);
- return ERR_PTR(err);
- }
}
return &pd->ibpd;
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- if (!pd->uobject)
- free_pa_mkey(mdev, mpd->pa_lkey);
-
mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
kfree(mpd);
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
- u32 rsvd_lkey;
int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr);
- ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
- if (ret) {
- pr_err("Failed to query special context %d\n", ret);
- return ret;
- }
- dev->ib_dev.local_dma_lkey = rsvd_lkey;
-
devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
if (IS_ERR(devr->p0)) {
ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bb8cda79e881..22123b79d550 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
struct mlx5_ib_pd {
struct ib_pd ibpd;
u32 pdn;
- u32 pa_lkey;
};
/* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@ struct mlx5_ib_qp {
int uuarn;
int create_type;
- u32 pa_lkey;
/* Store signature errors */
bool signature_en;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c745c6c5e10d..6f521a3418e8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
if (err)
mlx5_ib_dbg(dev, "err %d\n", err);
- else
- qp->pa_lkey = to_mpd(pd)->pa_lkey;
}
if (err)
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
dseg->addr = cpu_to_be64(mfrpl->map);
dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
- dseg->lkey = cpu_to_be32(pd->pa_lkey);
+ dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
}
static __be32 send_ieth(struct ib_send_wr *wr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca2873698d75..4cd5428a2399 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -80,7 +80,7 @@ enum {
IPOIB_NUM_WC = 4,
IPOIB_MAX_PATH_REC_QUEUE = 3,
- IPOIB_MAX_MCAST_QUEUE = 3,
+ IPOIB_MAX_MCAST_QUEUE = 64,
IPOIB_FLAG_OPER_UP = 0,
IPOIB_FLAG_INITIALIZED = 1,
@@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
union ib_gid *mgid, int set_qkey);
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
int ipoib_init_qp(struct net_device *dev);
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 36536ce5a3e2..f74316e679d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
unsigned long dt;
unsigned long flags;
int i;
+ LIST_HEAD(remove_list);
+ struct ipoib_mcast *mcast, *tmcast;
+ struct net_device *dev = priv->dev;
if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
return;
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
lockdep_is_held(&priv->lock))) != NULL) {
/* was the neigh idle for two GC periods */
if (time_after(neigh_obsolete, neigh->alive)) {
+ u8 *mgid = neigh->daddr + 4;
+
+ /* Is this multicast ? */
+ if (*mgid == 0xff) {
+ mcast = __ipoib_mcast_find(dev, mgid);
+
+ if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, &remove_list);
+ }
+ }
+
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ ipoib_mcast_leave(dev, mcast);
}
static void ipoib_reap_neigh(struct work_struct *work)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 09a1748f9d13..136cbefe00f8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
return mcast;
}
-static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/*
- * Historically Linux IPoIB has never properly supported SEND
- * ONLY join. It emulated it by not providing all the required
- * attributes, which is enough to prevent group creation and
- * detect if there are full members or not. A major problem
- * with supporting SEND ONLY is detecting when the group is
- * auto-destroyed as IPoIB will cache the MLID..
+ * Send-only IB Multicast joins do not work at the core
+ * IB layer yet, so we can't use them here. However,
+ * we are emulating an Ethernet multicast send, which
+ * does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
+ * do is to create the group if it doesn't exist as that
+ * most closely emulates the behavior, from a user space
+ * application perspecitive, of Ethernet multicast
+ * operation. For now, we do a full join, maybe later
+ * when the core IB layers support send only joins we
+ * will use them.
*/
-#if 1
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
-#else
+#if 0
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
rec.join_state = 4;
#endif
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
return 0;
}
-static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret = 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1ace5d83a4d7..f58ff96b6cbb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+ "Always register memory, even for continuous memory regions (default:true)");
+
bool iser_pi_enable = false;
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 86f6583485ef..a5edd6ede692 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -611,6 +611,7 @@ extern int iser_debug_level;
extern bool iser_pi_enable;
extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
+extern bool iser_always_reg;
int iser_assign_reg_ops(struct iser_device *device);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2493cc748db8..4c46d67d37a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -803,11 +803,12 @@ static int
iser_reg_prot_sg(struct iscsi_iser_task *task,
struct iser_data_buf *mem,
struct iser_fr_desc *desc,
+ bool use_dma_key,
struct iser_mem_reg *reg)
{
struct iser_device *device = task->iser_conn->ib_conn.device;
- if (mem->dma_nents == 1)
+ if (use_dma_key)
return iser_reg_dma(device, mem, reg);
return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@ static int
iser_reg_data_sg(struct iscsi_iser_task *task,
struct iser_data_buf *mem,
struct iser_fr_desc *desc,
+ bool use_dma_key,
struct iser_mem_reg *reg)
{
struct iser_device *device = task->iser_conn->ib_conn.device;
- if (mem->dma_nents == 1)
+ if (use_dma_key)
return iser_reg_dma(device, mem, reg);
return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
struct iser_mem_reg *reg = &task->rdma_reg[dir];
struct iser_mem_reg *data_reg;
struct iser_fr_desc *desc = NULL;
+ bool use_dma_key;
int err;
err = iser_handle_unaligned_buf(task, mem, dir);
if (unlikely(err))
return err;
- if (mem->dma_nents != 1 ||
- scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+ use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
+ scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
+
+ if (!use_dma_key) {
desc = device->reg_ops->reg_desc_get(ib_conn);
reg->mem_h = desc;
}
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
else
data_reg = &task->desc.data_reg;
- err = iser_reg_data_sg(task, mem, desc, data_reg);
+ err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
if (unlikely(err))
goto err_reg;
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
if (unlikely(err))
goto err_reg;
- err = iser_reg_prot_sg(task, mem, desc, prot_reg);
+ err = iser_reg_prot_sg(task, mem, desc,
+ use_dma_key, prot_reg);
if (unlikely(err))
goto err_reg;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ae70cc1463ac..85132d867bc8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
(unsigned long)comp);
}
- device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
- if (IS_ERR(device->mr))
- goto dma_mr_err;
+ if (!iser_always_reg) {
+ int access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ device->mr = ib_get_dma_mr(device->pd, access);
+ if (IS_ERR(device->mr))
+ goto dma_mr_err;
+ }
INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
iser_event_handler);
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
return 0;
handler_err:
- ib_dereg_mr(device->mr);
+ if (device->mr)
+ ib_dereg_mr(device->mr);
dma_mr_err:
for (i = 0; i < device->comps_used; i++)
tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@ comps_err:
static void iser_free_device_ib_res(struct iser_device *device)
{
int i;
- BUG_ON(device->mr == NULL);
for (i = 0; i < device->comps_used; i++) {
struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device)
}
(void)ib_unregister_event_handler(&device->event_handler);
- (void)ib_dereg_mr(device->mr);
+ if (device->mr)
+ (void)ib_dereg_mr(device->mr);
ib_dealloc_pd(device->pd);
kfree(device->comps);
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471b5576..4215b5382092 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
config JOYSTICK_ZHENHUA
tristate "5-byte Zhenhua RC transmitter"
select SERIO
+ select BITREVERSE
help
Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
supplied with a ready to fly micro electric indoor helicopters
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index b76ac580703c..a8bc2fe170dd 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
if (w->counter == 24) { /* full frame */
walkera0701_parse_frame(w);
w->counter = NO_SYNC;
- if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
+ if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
w->counter = 0;
} else {
if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
} else
w->counter = NO_SYNC;
}
- } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) <
+ } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
w->counter = 0;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index b052afec9a11..6639b2b8528a 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
if (error)
- return error;
+ goto err_free_keypad;
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (!res) {
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 867db8a91372..e317b75357a0 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
default:
reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
break;
- };
+ }
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 345df9b03aed..5adbcedcb81c 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
- for_each_set_bit(i, dev->absbit, ABS_CNT) {
+ for (i = 0; i < ABS_CNT; i++) {
input_abs_set_max(dev, i, user_dev->absmax[i]);
input_abs_set_min(dev, i, user_dev->absmin[i]);
input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 73670f2aebfd..c0ec26118732 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -60,7 +60,7 @@ struct elan_transport_ops {
int (*get_sm_version)(struct i2c_client *client,
u8* ic_type, u8 *version);
int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
- int (*get_product_id)(struct i2c_client *client, u8 *id);
+ int (*get_product_id)(struct i2c_client *client, u16 *id);
int (*get_max)(struct i2c_client *client,
unsigned int *max_x, unsigned int *max_y);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index fa945304b9a5..5e1665bbaa0b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -40,7 +40,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.6.0"
+#define ELAN_DRIVER_VERSION "1.6.1"
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
#define ETP_FINGER_WIDTH 15
@@ -76,7 +76,7 @@ struct elan_tp_data {
unsigned int x_res;
unsigned int y_res;
- u8 product_id;
+ u16 product_id;
u8 fw_version;
u8 sm_version;
u8 iap_version;
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
u16 *signature_address)
{
switch (iap_version) {
+ case 0x00:
+ case 0x06:
case 0x08:
*validpage_count = 512;
break;
+ case 0x03:
+ case 0x07:
case 0x09:
+ case 0x0A:
+ case 0x0B:
+ case 0x0C:
*validpage_count = 768;
break;
case 0x0D:
*validpage_count = 896;
break;
+ case 0x0E:
+ *validpage_count = 640;
+ break;
default:
/* unknown ic type clear value */
*validpage_count = 0;
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data)
error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
&data->fw_signature_address);
- if (error) {
- dev_err(&data->client->dev,
- "unknown iap version %d\n", data->iap_version);
- return error;
- }
+ if (error)
+ dev_warn(&data->client->dev,
+ "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
+ data->iap_version, data->ic_type);
return 0;
}
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
const u8 *fw_signature;
static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
+ if (data->fw_validpage_count == 0)
+ return -EINVAL;
+
/* Look for a firmware with the product id appended. */
fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
if (!fw_name) {
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 683c840c9dd7..a679e56c44cd 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
return 0;
}
-static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
{
int error;
u8 val[3];
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
return error;
}
- *id = val[0];
+ *id = le16_to_cpup((__le16 *)val);
return 0;
}
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index ff36a366b2aa..cb6aecbc1dc2 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
return 0;
}
-static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
{
int error;
u8 val[3];
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
return error;
}
- *id = val[1];
+ *id = be16_to_cpup((__be16 *)val);
return 0;
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 994ae7886156..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse)
struct synaptics_data *priv = psmouse->private;
priv->mode = 0;
-
- if (priv->absolute_mode) {
+ if (priv->absolute_mode)
priv->mode |= SYN_BIT_ABSOLUTE_MODE;
- if (SYN_CAP_EXTENDED(priv->capabilities))
- priv->mode |= SYN_BIT_W_MODE;
- }
-
- if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
+ if (priv->disable_gesture)
priv->mode |= SYN_BIT_DISABLE_GESTURE;
-
if (psmouse->rate >= 80)
priv->mode |= SYN_BIT_HIGH_RATE;
+ if (SYN_CAP_EXTENDED(priv->capabilities))
+ priv->mode |= SYN_BIT_W_MODE;
if (synaptics_mode_cmd(psmouse, priv->mode))
return -1;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 75516996db20..316f2c897101 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
* time before the ACK arrives.
*/
if (ps2_sendbyte(ps2dev, command & 0xff,
- command == PS2_CMD_RESET_BAT ? 1000 : 200))
- goto out;
+ command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
+ serio_pause_rx(ps2dev->serio);
+ goto out_reset_flags;
+ }
- for (i = 0; i < send; i++)
- if (ps2_sendbyte(ps2dev, param[i], 200))
- goto out;
+ for (i = 0; i < send; i++) {
+ if (ps2_sendbyte(ps2dev, param[i], 200)) {
+ serio_pause_rx(ps2dev->serio);
+ goto out_reset_flags;
+ }
+ }
/*
* The reset command takes a long time to execute.
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
!(ps2dev->flags & PS2_FLAG_CMD), timeout);
}
+ serio_pause_rx(ps2dev->serio);
+
if (param)
for (i = 0; i < receive; i++)
param[i] = ps2dev->cmdbuf[(receive - 1) - i];
if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
- goto out;
+ goto out_reset_flags;
rc = 0;
- out:
- serio_pause_rx(ps2dev->serio);
+ out_reset_flags:
ps2dev->flags = 0;
serio_continue_rx(ps2dev->serio);
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 26b45936f9fd..1e8cd6f1fe9e 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
parkbd_port = parkbd_allocate_serio();
if (!parkbd_port) {
parport_release(parkbd_dev);
+ parport_unregister_device(parkbd_dev);
return -ENOMEM;
}
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ff0b75813daa..8275267eac25 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -94,7 +94,7 @@ struct imx6ul_tsc {
* TSC module need ADC to get the measure value. So
* before config TSC, we should initialize ADC module.
*/
-static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
{
int adc_hc = 0;
int adc_gc;
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
timeout = wait_for_completion_timeout
(&tsc->completion, ADC_TIMEOUT);
- if (timeout == 0)
+ if (timeout == 0) {
dev_err(tsc->dev, "Timeout for adc calibration\n");
+ return -ETIMEDOUT;
+ }
adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
- if (adc_gs & ADC_CALF)
+ if (adc_gs & ADC_CALF) {
dev_err(tsc->dev, "ADC calibration failed\n");
+ return -EINVAL;
+ }
/* TSC need the ADC work in hardware trigger */
adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
adc_cfg |= ADC_HARDWARE_TRIGGER;
writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+ return 0;
}
/*
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
}
-static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
{
- imx6ul_adc_init(tsc);
+ int err;
+
+ err = imx6ul_adc_init(tsc);
+ if (err)
+ return err;
imx6ul_tsc_channel_config(tsc);
imx6ul_tsc_set(tsc);
+
+ return 0;
}
static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev)
return err;
}
- imx6ul_tsc_init(tsc);
-
- return 0;
+ return imx6ul_tsc_init(tsc);
}
static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
int tsc_irq;
int adc_irq;
- tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+ tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
if (!tsc)
return -ENOMEM;
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
if (!input_dev)
return -ENOMEM;
- input_dev->name = "iMX6UL TouchScreen Controller";
+ input_dev->name = "iMX6UL Touchscreen Controller";
input_dev->id.bustype = BUS_HOST;
input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
}
adc_irq = platform_get_irq(pdev, 1);
- if (adc_irq <= 0) {
+ if (adc_irq < 0) {
dev_err(&pdev->dev, "no adc irq resource?\n");
return adc_irq;
}
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
goto out;
}
- imx6ul_tsc_init(tsc);
+ retval = imx6ul_tsc_init(tsc);
}
out:
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 7cce87650fc8..1fafc9f57af6 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
dev_err(dev, "failed to get x-size property\n");
return NULL;
- };
+ }
if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
dev_err(dev, "failed to get y-size property\n");
return NULL;
- };
+ }
of_property_read_u32(np, "contact-threshold",
&pdata->contact_threshold);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4664c2a96c67..d9da766719c8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
endmenu
config IOMMU_IOVA
- bool
+ tristate
config OF_IOMMU
def_bool y
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d7349a3ee14..041bc1810a86 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+ /* Ensure we reserve the whole size-aligned region */
+ nrpages = __roundup_pow_of_two(nrpages);
if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/*
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void)
static int __init iommu_init_mempool(void)
{
int ret;
- ret = iommu_iova_cache_init();
+ ret = iova_cache_get();
if (ret)
return ret;
@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void)
kmem_cache_destroy(iommu_domain_cache);
domain_error:
- iommu_iova_cache_destroy();
+ iova_cache_put();
return -ENOMEM;
}
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void)
{
kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache);
- iommu_iova_cache_destroy();
+ iova_cache_put();
}
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d923f3e1..fa0adef32bd6 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,42 +18,9 @@
*/
#include <linux/iova.h>
+#include <linux/module.h>
#include <linux/slab.h>
-static struct kmem_cache *iommu_iova_cache;
-
-int iommu_iova_cache_init(void)
-{
- int ret = 0;
-
- iommu_iova_cache = kmem_cache_create("iommu_iova",
- sizeof(struct iova),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_iova_cache) {
- pr_err("Couldn't create iova cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-void iommu_iova_cache_destroy(void)
-{
- kmem_cache_destroy(iommu_iova_cache);
-}
-
-struct iova *alloc_iova_mem(void)
-{
- return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
- kmem_cache_free(iommu_iova_cache, iova);
-}
-
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
}
+EXPORT_SYMBOL_GPL(init_iova_domain);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
}
}
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
+/*
+ * Computes the padding size required, to make the start address
+ * naturally aligned on the power-of-two order of its size
*/
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
+static unsigned int
+iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
{
- unsigned int pad_size = 0;
- unsigned int order = ilog2(size);
-
- if (order)
- pad_size = (limit_pfn + 1) % (1 << order);
-
- return pad_size;
+ return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
}
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
rb_insert_color(&iova->node, root);
}
+static struct kmem_cache *iova_cache;
+static unsigned int iova_cache_users;
+static DEFINE_MUTEX(iova_cache_mutex);
+
+struct iova *alloc_iova_mem(void)
+{
+ return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(alloc_iova_mem);
+
+void free_iova_mem(struct iova *iova)
+{
+ kmem_cache_free(iova_cache, iova);
+}
+EXPORT_SYMBOL(free_iova_mem);
+
+int iova_cache_get(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (!iova_cache_users) {
+ iova_cache = kmem_cache_create(
+ "iommu_iova", sizeof(struct iova), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_cache) {
+ mutex_unlock(&iova_cache_mutex);
+ printk(KERN_ERR "Couldn't create iova cache\n");
+ return -ENOMEM;
+ }
+ }
+
+ iova_cache_users++;
+ mutex_unlock(&iova_cache_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (WARN_ON(!iova_cache_users)) {
+ mutex_unlock(&iova_cache_mutex);
+ return;
+ }
+ iova_cache_users--;
+ if (!iova_cache_users)
+ kmem_cache_destroy(iova_cache);
+ mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
/**
* alloc_iova - allocates an iova
* @iovad: - iova domain in question
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (!new_iova)
return NULL;
- /* If size aligned is set then round the size to
- * to next power of two.
- */
- if (size_aligned)
- size = __roundup_pow_of_two(size);
-
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned);
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
return new_iova;
}
+EXPORT_SYMBOL_GPL(alloc_iova);
/**
* find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return NULL;
}
+EXPORT_SYMBOL_GPL(find_iova);
/**
* __free_iova - frees the given iova
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
free_iova_mem(iova);
}
+EXPORT_SYMBOL_GPL(__free_iova);
/**
* free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
__free_iova(iovad, iova);
}
+EXPORT_SYMBOL_GPL(free_iova);
/**
* put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad)
}
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
+EXPORT_SYMBOL_GPL(put_iova_domain);
static int
__is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@ finish:
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return iova;
}
+EXPORT_SYMBOL_GPL(reserve_iova);
/**
* copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
}
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
+EXPORT_SYMBOL_GPL(copy_reserved_iova);
struct iova *
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@ error:
free_iova_mem(prev);
return NULL;
}
+
+MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index cf351c637464..a7c8c9ffbafd 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
dev_alias->dev_id = alias;
if (pdev != dev_alias->pdev)
- dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+ dev_alias->count += its_pci_msi_vec_count(pdev);
return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ac7ae2b3cb83..25ceae9f7348 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
out:
spin_unlock(&lpi_lock);
+ if (!bitmap)
+ *base = *nr_ids = 0;
+
return bitmap;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index af2f16bb8a94..aeaa061f0dbf 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained)
intrmask[i] = gic_read(intrmask_reg);
pending_reg += gic_reg_step;
intrmask_reg += gic_reg_step;
+
+ if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+ continue;
+
+ pending[i] |= (u64)gic_read(pending_reg) << 32;
+ intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
+ pending_reg += gic_reg_step;
+ intrmask_reg += gic_reg_step;
}
bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
- gic_map_to_vpe(irq, cpumask_first(&tmp));
+ gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
/* Update the pcpu_masks */
for (i = 0; i < NR_CPUS; i++)
@@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu,
GIC_SHARED_TO_HWIRQ(intr));
int i;
- gic_map_to_vpe(intr, cpu);
+ gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
for (i = 0; i < NR_CPUS; i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[cpu].pcpu_mask);
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index de36237d7c6b..051645498b53 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = -ENOTSUPP;
dev_err(&pdev->dev,
"IO mapped PCI devices are not supported\n");
- goto out_release;
+ goto out_iounmap;
}
pci_set_drvdata(pdev, priv);
@@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
if (ret < 0)
- goto out_iounmap;
+ goto out_mcb_bus;
num_cells = ret;
dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
@@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+out_mcb_bus:
+ mcb_release_bus(priv->bus);
out_iounmap:
iounmap(priv->base);
out_release:
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e51de52eeb94..48b5890c28e3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
ret = bitmap_storage_alloc(&store, chunks,
!bitmap->mddev->bitmap_info.external,
- bitmap->cluster_slot);
+ mddev_is_clustered(bitmap->mddev)
+ ? bitmap->cluster_slot : 0);
if (ret)
goto err;
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index 240c9f0e85e7..8a096456579b 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
static struct dm_cache_policy_type wb_policy_type = {
.name = "cleaner",
.version = {1, 0, 0},
- .hint_size = 0,
+ .hint_size = 4,
.owner = THIS_MODULE,
.create = wb_create
};
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index ebaa4f803eec..192bb8beeb6b 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
return -EINVAL;
}
- tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL);
+ tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL);
if (!tmp_store) {
ti->error = "Exception store allocation failed";
return -ENOMEM;
@@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
else if (persistent == 'N')
type = get_type("N");
else {
- ti->error = "Persistent flag is not P or N";
+ ti->error = "Exception store type is not P or N";
r = -EINVAL;
goto bad_type;
}
@@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
if (r)
goto bad;
- r = type->ctr(tmp_store, 0, NULL);
+ r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL));
if (r) {
ti->error = "Exception store type constructor failed";
goto bad;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 0b2536247cf5..fae34e7a0b1e 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -42,8 +42,7 @@ struct dm_exception_store_type {
const char *name;
struct module *module;
- int (*ctr) (struct dm_exception_store *store,
- unsigned argc, char **argv);
+ int (*ctr) (struct dm_exception_store *store, char *options);
/*
* Destroys this object when you've finished with it.
@@ -123,6 +122,8 @@ struct dm_exception_store {
unsigned chunk_shift;
void *context;
+
+ bool userspace_supports_overflow;
};
/*
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 97e165183e79..a0901214aef5 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
*/
if (min_region_size > (1 << 13)) {
/* If not a power of 2, make it the next power of 2 */
- if (min_region_size & (min_region_size - 1))
- region_size = 1 << fls(region_size);
+ region_size = roundup_pow_of_two(min_region_size);
DMINFO("Choosing default region size of %lu sectors",
region_size);
} else {
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index bf71583296f7..aeacad9be51d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -7,6 +7,7 @@
#include "dm-exception-store.h"
+#include <linux/ctype.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
@@ -843,8 +844,7 @@ static void persistent_drop_snapshot(struct dm_exception_store *store)
DMWARN("write header failed");
}
-static int persistent_ctr(struct dm_exception_store *store,
- unsigned argc, char **argv)
+static int persistent_ctr(struct dm_exception_store *store, char *options)
{
struct pstore *ps;
@@ -873,6 +873,16 @@ static int persistent_ctr(struct dm_exception_store *store,
return -ENOMEM;
}
+ if (options) {
+ char overflow = toupper(options[0]);
+ if (overflow == 'O')
+ store->userspace_supports_overflow = true;
+ else {
+ DMERR("Unsupported persistent store option: %s", options);
+ return -EINVAL;
+ }
+ }
+
store->context = ps;
return 0;
@@ -888,7 +898,8 @@ static unsigned persistent_status(struct dm_exception_store *store,
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
+ DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
+ (unsigned long long)store->chunk_size);
}
return sz;
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index 1ce9a2586e41..9b7c8c8049d6 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store,
*metadata_sectors = 0;
}
-static int transient_ctr(struct dm_exception_store *store,
- unsigned argc, char **argv)
+static int transient_ctr(struct dm_exception_store *store, char *options)
{
struct transient_c *tc;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index c0bcd6516dfe..c06b74e91cd6 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s)
}
/*
- * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
+ * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size>
*/
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
@@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src,
u.store_swap = snap_dest->store;
snap_dest->store = snap_src->store;
+ snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow;
snap_src->store = u.store_swap;
snap_dest->store->snap = snap_dest;
@@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
pe = __find_pending_exception(s, pe, chunk);
if (!pe) {
- s->snapshot_overflowed = 1;
- DMERR("Snapshot overflowed: Unable to allocate exception.");
+ if (s->store->userspace_supports_overflow) {
+ s->snapshot_overflowed = 1;
+ DMERR("Snapshot overflowed: Unable to allocate exception.");
+ } else
+ __invalidate_snapshot(s, -ENOMEM);
r = -EIO;
goto out_unlock;
}
@@ -2365,7 +2369,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 14, 0},
+ .version = {1, 15, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = {
static struct target_type merge_target = {
.name = dm_snapshot_merge_target_name,
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6264781dc69a..1b5c6047e4f1 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone)
struct dm_rq_target_io *tio = info->tio;
struct bio *bio = info->orig;
unsigned int nr_bytes = info->orig->bi_iter.bi_size;
+ int error = clone->bi_error;
bio_put(clone);
@@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone)
* the remainder.
*/
return;
- else if (bio->bi_error) {
+ else if (error) {
/*
* Don't notice the error to the upper layer yet.
* The error handling decision is made by the target driver,
* when the request is completed.
*/
- tio->error = bio->bi_error;
+ tio->error = error;
return;
}
@@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
might_sleep();
- map = dm_get_live_table(md, &srcu_idx);
-
spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
@@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
* do not race with internal suspend.
*/
mutex_lock(&md->suspend_lock);
+ map = dm_get_live_table(md, &srcu_idx);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
- mutex_unlock(&md->suspend_lock);
-
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
+ mutex_unlock(&md->suspend_lock);
/*
* Rare, but there may be I/O requests still going to complete,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4f5ecbe94ccb..c702de18207a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
+ if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev)
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
goto unlock;
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d222522c52e0..d132f06afdd1 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev)
return 0;
out_free_conf:
- if (conf->pool)
- mempool_destroy(conf->pool);
+ mempool_destroy(conf->pool);
kfree(conf->multipaths);
kfree(conf);
mddev->private = NULL;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 63e619b2f44e..f8e5db0cb5aa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
struct md_rdev *rdev;
bool discard_supported = false;
- rdev_for_each(rdev, mddev) {
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
- }
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);
+ rdev_for_each(rdev, mddev) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ discard_supported = true;
+ }
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4517f06c41ba..ddd8a5f572aa 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
}
if (bio && bio_data_dir(bio) == WRITE) {
- if (bio->bi_iter.bi_sector >=
- conf->mddev->curr_resync_completed) {
+ if (bio->bi_iter.bi_sector >= conf->next_resync) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
conf->r1buf_pool = NULL;
spin_lock_irq(&conf->resync_lock);
- conf->next_resync = 0;
+ conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
conf->start_next_window = MaxSector;
conf->current_window_requests +=
conf->next_window_requests;
@@ -2383,8 +2382,8 @@ static void raid1d(struct md_thread *thread)
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
- r1_bio = list_first_entry(&conf->bio_end_io_list,
- struct r1bio, retry_list);
+ r1_bio = list_first_entry(&tmp, struct r1bio,
+ retry_list);
list_del(&r1_bio->retry_list);
raid_end_bio_io(r1_bio);
}
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0fc33eb88855..9f69dc526f8c 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2688,8 +2688,8 @@ static void raid10d(struct md_thread *thread)
}
spin_unlock_irqrestore(&conf->device_lock, flags);
while (!list_empty(&tmp)) {
- r10_bio = list_first_entry(&conf->bio_end_io_list,
- struct r10bio, retry_list);
+ r10_bio = list_first_entry(&tmp, struct r10bio,
+ retry_list);
list_del(&r10_bio->retry_list);
raid_end_bio_io(r10_bio);
}
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
mdname(mddev));
if (conf) {
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf);
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev)
out_free_conf:
md_unregister_thread(&mddev->thread);
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
{
struct r10conf *conf = priv;
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15ef2c641b2b..49bb8d3ff9be 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf)
drop_one_stripe(conf))
;
- if (conf->slab_cache)
- kmem_cache_destroy(conf->slab_cache);
+ kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
+ if (bi)
+ s->to_read--;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
*/
clear_bit(R5_LOCKED, &sh->dev[i].flags);
}
+ s->to_write = 0;
+ s->written = 0;
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
*/
return 0;
- for (i = 0; i < s->failed; i++) {
+ for (i = 0; i < s->failed && i < 2; i++) {
if (fdev[i]->towrite &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
!test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
sh->sector < sh->raid_conf->mddev->recovery_cp)
/* reconstruct-write isn't being forced */
return 0;
- for (i = 0; i < s->failed; i++) {
+ for (i = 0; i < s->failed && i < 2; i++) {
if (s->failed_num[i] != sh->pd_idx &&
s->failed_num[i] != sh->qd_idx &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 8eec887c8f70..6d7c188fb65c 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
* after the host receives the enum_resp
* message clients may be added or removed
*/
- if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
+ if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
dev->hbm_state >= MEI_HBM_STOPPED) {
dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0520064dc33b..a3eb20bdcd97 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
int err = cmd->error;
/* Flag re-tuning needed on CRC errors */
- if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+ if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+ (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
(mrq->data && mrq->data->error == -EILSEQ) ||
- (mrq->stop && mrq->stop->error == -EILSEQ))
+ (mrq->stop && mrq->stop->error == -EILSEQ)))
mmc_retune_needed(host);
if (err && cmd->retries && mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abd933b7029b..5466f25f0281 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host)
0, &cd_gpio_invert);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
- else if (ret != -ENOENT)
+ else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
/*
@@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host)
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
- else if (ret != -ENOENT)
+ else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
if (of_property_read_bool(np, "disable-wp"))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 781e4db31767..7fb0753abe30 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -182,6 +182,7 @@ struct omap_hsmmc_host {
struct clk *fclk;
struct clk *dbclk;
struct regulator *pbias;
+ bool pbias_enabled;
void __iomem *base;
int vqmmc_enabled;
resource_size_t mapbase;
@@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return ret;
}
- if (!regulator_is_enabled(host->pbias)) {
+ if (host->pbias_enabled == 0) {
ret = regulator_enable(host->pbias);
if (ret) {
dev_err(host->dev, "pbias reg enable fail\n");
return ret;
}
+ host->pbias_enabled = 1;
}
} else {
- if (regulator_is_enabled(host->pbias)) {
+ if (host->pbias_enabled == 1) {
ret = regulator_disable(host->pbias);
if (ret) {
dev_err(host->dev, "pbias reg disable fail\n");
return ret;
}
+ host->pbias_enabled = 0;
}
}
@@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc");
if (IS_ERR(mmc->supply.vmmc)) {
ret = PTR_ERR(mmc->supply.vmmc);
- if (ret != -ENODEV)
+ if ((ret != -ENODEV) && host->dev->of_node)
return ret;
dev_dbg(host->dev, "unable to get vmmc regulator %ld\n",
PTR_ERR(mmc->supply.vmmc));
@@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux");
if (IS_ERR(mmc->supply.vqmmc)) {
ret = PTR_ERR(mmc->supply.vqmmc);
- if (ret != -ENODEV)
+ if ((ret != -ENODEV) && host->dev->of_node)
return ret;
dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n",
PTR_ERR(mmc->supply.vqmmc));
@@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
host->pbias = devm_regulator_get_optional(host->dev, "pbias");
if (IS_ERR(host->pbias)) {
ret = PTR_ERR(host->pbias);
- if (ret != -ENODEV)
+ if ((ret != -ENODEV) && host->dev->of_node)
return ret;
dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
PTR_ERR(host->pbias));
@@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
host->base = base + pdata->reg_offset;
host->power_mode = MMC_POWER_OFF;
host->next_data.cookie = 1;
+ host->pbias_enabled = 0;
host->vqmmc_enabled = 0;
ret = omap_hsmmc_gpio_init(mmc, host, pdata);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1420f29628c7..8cadd74e8407 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio.h>
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc)
{
struct pxamci_host *host = mmc_priv(mmc);
- if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
- if (host->pdata->gpio_card_ro_invert)
- return !gpio_get_value(host->pdata->gpio_card_ro);
- else
- return gpio_get_value(host->pdata->gpio_card_ro);
- }
+ if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
+ return mmc_gpio_get_ro(mmc);
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
static const struct mmc_host_ops pxamci_ops = {
.request = pxamci_request,
+ .get_cd = mmc_gpio_get_cd,
.get_ro = pxamci_get_ro,
.set_ios = pxamci_set_ios,
.enable_sdio_irq = pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev)
gpio_power = host->pdata->gpio_power;
}
if (gpio_is_valid(gpio_power)) {
- ret = gpio_request(gpio_power, "mmc card power");
+ ret = devm_gpio_request(&pdev->dev, gpio_power,
+ "mmc card power");
if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+ dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
+ gpio_power);
goto out;
}
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro)) {
- ret = gpio_request(gpio_ro, "mmc card read only");
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto err_gpio_ro;
- }
- gpio_direction_input(gpio_ro);
+ if (gpio_is_valid(gpio_ro))
+ ret = mmc_gpio_request_ro(mmc, gpio_ro);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+ goto out;
+ } else {
+ mmc->caps |= host->pdata->gpio_card_ro_invert ?
+ MMC_CAP2_RO_ACTIVE_HIGH : 0;
}
- if (gpio_is_valid(gpio_cd)) {
- ret = gpio_request(gpio_cd, "mmc card detect");
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
- goto err_gpio_cd;
- }
- gpio_direction_input(gpio_cd);
- ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "mmc card detect", mmc);
- if (ret) {
- dev_err(&pdev->dev, "failed to request card detect IRQ\n");
- goto err_request_irq;
- }
+ if (gpio_is_valid(gpio_cd))
+ ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+ goto out;
}
if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev)
return 0;
-err_request_irq:
- gpio_free(gpio_cd);
-err_gpio_cd:
- gpio_free(gpio_ro);
-err_gpio_ro:
- gpio_free(gpio_power);
- out:
+out:
if (host) {
if (host->dma_chan_rx)
dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev)
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
- if (gpio_is_valid(gpio_cd)) {
- free_irq(gpio_to_irq(gpio_cd), mmc);
- gpio_free(gpio_cd);
- }
- if (gpio_is_valid(gpio_ro))
- gpio_free(gpio_ro);
- if (gpio_is_valid(gpio_power))
- gpio_free(gpio_power);
if (host->vcc)
regulator_put(host->vcc);
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d1556643a41d..a0f05de5409f 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
static const struct sdhci_pltfm_data soc_data_sama5d2 = {
.ops = &sdhci_at91_sama5d2_ops,
+ .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
};
static const struct of_device_id sdhci_at91_dt_match[] = {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 946d37f94a31..f5edf9d3a18a 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev,
struct sdhci_pxa *pxa = pltfm_host->priv;
struct resource *res;
+ host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"conf-sdio3");
@@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
uhs == MMC_TIMING_UHS_DDR50) {
reg_val &= ~SDIO3_CONF_CLK_INV;
reg_val |= SDIO3_CONF_SD_FB_CLK;
+ } else if (uhs == MMC_TIMING_MMC_HS) {
+ reg_val &= ~SDIO3_CONF_CLK_INV;
+ reg_val &= ~SDIO3_CONF_SD_FB_CLK;
} else {
reg_val |= SDIO3_CONF_CLK_INV;
reg_val &= ~SDIO3_CONF_SD_FB_CLK;
@@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
ret = armada_38x_quirks(pdev, host);
if (ret < 0)
- goto err_clk_get;
+ goto err_mbus_win;
ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
if (ret < 0)
goto err_mbus_win;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 64b7fdbd1a9c..fbc7efdddcb5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
+ mdelay(1);
if (clock == 0)
return;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7c02ff46c8ac..9d4aa31b683a 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -412,6 +412,11 @@ struct sdhci_host {
#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
/* Broken Clock divider zero in controller */
#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
+/*
+ * When internal clock is disabled, a delay is needed before modifying the
+ * SD clock frequency or enabling back the internal clock.
+ */
+#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index a7b7a6771598..b981b8552e43 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -210,6 +210,16 @@
#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */
#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */
+#define SDXC_CLK_400K 0
+#define SDXC_CLK_25M 1
+#define SDXC_CLK_50M 2
+#define SDXC_CLK_50M_DDR 3
+
+struct sunxi_mmc_clk_delay {
+ u32 output;
+ u32 sample;
+};
+
struct sunxi_idma_des {
u32 config;
u32 buf_size;
@@ -229,6 +239,7 @@ struct sunxi_mmc_host {
struct clk *clk_mmc;
struct clk *clk_sample;
struct clk *clk_output;
+ const struct sunxi_mmc_clk_delay *clk_delays;
/* irq */
spinlock_t lock;
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
/* determine delays */
if (rate <= 400000) {
- oclk_dly = 180;
- sclk_dly = 42;
+ oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
} else if (rate <= 25000000) {
- oclk_dly = 180;
- sclk_dly = 75;
+ oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
} else if (rate <= 50000000) {
if (ios->timing == MMC_TIMING_UHS_DDR50) {
- oclk_dly = 60;
- sclk_dly = 120;
+ oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
} else {
- oclk_dly = 90;
- sclk_dly = 150;
+ oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
}
- } else if (rate <= 100000000) {
- oclk_dly = 6;
- sclk_dly = 24;
- } else if (rate <= 200000000) {
- oclk_dly = 3;
- sclk_dly = 12;
} else {
return -EINVAL;
}
@@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-mmc", },
{ .compatible = "allwinner,sun5i-a13-mmc", },
+ { .compatible = "allwinner,sun9i-a80-mmc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = {
.hw_reset = sunxi_mmc_hw_reset,
};
+static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
+ [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
+ [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
+ [SDXC_CLK_50M] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 },
+};
+
+static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
+ [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
+ [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
+ [SDXC_CLK_50M] = { .output = 150, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
+};
+
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
struct platform_device *pdev)
{
@@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
else
host->idma_des_size_bits = 16;
+ if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
+ host->clk_delays = sun9i_mmc_clk_delays;
+ else
+ host->clk_delays = sunxi_mmc_clk_delays;
+
ret = mmc_regulator_get_supply(host->mmc);
if (ret) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 2426db88db36..f04445b992f5 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
oob_chunk_size);
/* the last chunk */
- memcpy16_toio(&s[oob_chunk_size * sparebuf_size],
+ memcpy16_toio(&s[i * sparebuf_size],
&d[i * oob_chunk_size],
host->used_oobsize - i * oob_chunk_size);
}
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index f97a58d6aae1..e7d333c162be 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -147,6 +147,10 @@
#define NFC_ECC_MODE GENMASK(15, 12)
#define NFC_RANDOM_SEED GENMASK(30, 16)
+/* NFC_USER_DATA helper macros */
+#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \
+ ((buf)[2] << 16) | ((buf)[3] << 24))
+
#define NFC_DEFAULT_TIMEOUT_MS 1000
#define NFC_SRAM_SIZE 1024
@@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
/* Fill OOB data in */
- if (oob_required) {
- tmp = 0xffffffff;
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
- 4);
- } else {
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
- chip->oob_poi + offset - mtd->writesize,
- 4);
- }
+ writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
+ layout->oobfree[i].offset),
+ nfc->regs + NFC_REG_USER_DATA_BASE);
chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
@@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
offset += ecc->size;
/* Fill OOB data in */
- if (oob_required) {
- tmp = 0xffffffff;
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
- 4);
- } else {
- memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
- 4);
- }
+ writel(NFC_BUF_TO_USER_DATA(oob),
+ nfc->regs + NFC_REG_USER_DATA_BASE);
tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
(1 << 30);
@@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
node);
nand_release(&chip->mtd);
sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+ list_del(&chip->node);
}
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 5bbd1f094f4e..1fc23e48fe8e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
goto bad;
}
+ if (data_size > ubi->leb_size) {
+ ubi_err(ubi, "bad data_size");
+ goto bad;
+ }
+
if (vol_type == UBI_VID_STATIC) {
/*
* Although from high-level point of view static volumes may
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 80bdd5b88bac..d85c19762160 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ return -ENOSPC;
}
ubi->rsvd_pebs += reserved_pebs;
ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 275d9fb6fe5c..eb4489f9082f 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index f8baa897d1a0..1f7dd927cc5e 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
else
reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+ PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b7a0f7879de2..9e59663a6ead 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
}
/* Flush FLI data fifo. */
-static u32
+static int
bfa_flash_fifo_flush(void __iomem *pci_bar)
{
u32 i;
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
}
/* Read flash status. */
-static u32
+static int
bfa_flash_status_read(void __iomem *pci_bar)
{
union bfa_flash_dev_status_reg dev_status;
- u32 status;
+ int status;
u32 ret_status;
int i;
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
}
/* Start flash read operation. */
-static u32
+static int
bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
char *buf)
{
- u32 status;
+ int status;
/* len must be mutiple of 4 and not exceeding fifo size */
if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@ static enum bfa_status
bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
u32 len)
{
- u32 n, status;
+ u32 n;
+ int status;
u32 off, l, s, residue, fifo_sz;
residue = len;
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index cc2d8b4b18e3..253f8ed0537a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hip04_priv *priv;
struct resource *res;
- unsigned int irq;
+ int irq;
int ret;
ndev = alloc_etherdev(sizeof(struct hip04_priv));
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 28df37420da9..ac02c675c59c 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
u32 index;
};
-#define EMAC_ETHTOOL_REGS_VER 0
-#define EMAC4_ETHTOOL_REGS_VER 1
-#define EMAC4SYNC_ETHTOOL_REGS_VER 2
+#define EMAC_ETHTOOL_REGS_VER 3
+#define EMAC4_ETHTOOL_REGS_VER 4
+#define EMAC4SYNC_ETHTOOL_REGS_VER 5
#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 3e0d20037675..62488a67149d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
@@ -1007,6 +1014,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
if (i40e_is_nvm_update_op(&e->desc)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 851c1a159be8..2fdf978ae6a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
- rx_ctx.showiv = 1;
+ /* this controls whether VLAN is stripped from inner headers */
+ rx_ctx.showiv = 0;
#ifdef I40E_FCOE
rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f08450b90774..929d47152bf2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
if (ntu == ntc) {
@@ -948,6 +955,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d01aae..1d4e2e054647 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, port, steer, index, qp->qpn);
+ err = new_steering_entry(dev, port, steer,
+ index, qp->qpn);
else
- existing_steering_entry(dev, port, steer,
- index, qp->qpn);
+ err = existing_steering_entry(dev, port, steer,
+ index, qp->qpn);
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa0d5ffe92d8..9335e5ae18cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
return err;
}
-
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
-{
- struct mlx5_cmd_query_special_contexts_mbox_in in;
- struct mlx5_cmd_query_special_contexts_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- *rsvd_lkey = be32_to_cpu(out.resd_lkey);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_core_query_special_context);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2b32e0c5a0b4..b4f21232019a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- u16 rg_saw_cnt;
+ int rg_saw_cnt;
u32 data;
static const struct ephy_info e_info_8168h_1[] = {
{ 0x1e, 0x0800, 0x0001 },
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index d3c6676b3c0c..6fd4e5a5ef4a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
int rc;
/* Stop the user from reading */
- if (pos > nvmem->size)
+ if (pos >= nvmem->size)
return 0;
if (pos + count > nvmem->size)
@@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
int rc;
/* Stop the user from writing */
- if (pos > nvmem->size)
+ if (pos >= nvmem->size)
return 0;
if (pos + count > nvmem->size)
@@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
return rc;
/* shift bits in-place */
- if (cell->bit_offset || cell->bit_offset)
+ if (cell->bit_offset || cell->nbits)
nvmem_shift_read_buffer_in_place(cell, buf);
*len = cell->bytes;
@@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
/* free the tmp buffer */
- if (cell->bit_offset)
+ if (cell->bit_offset || cell->nbits)
kfree(buf);
if (IS_ERR_VALUE(rc))
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 14777dd5212d..cfa3b85064dd 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
struct nvmem_device *nvmem;
struct regmap *regmap;
struct sunxi_sid *sid;
- int i, size;
+ int ret, i, size;
char *randomness;
sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
@@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev)
return PTR_ERR(nvmem);
randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+ if (!randomness) {
+ ret = -EINVAL;
+ goto err_unreg_nvmem;
+ }
+
for (i = 0; i < size; i++)
randomness[i] = sunxi_sid_read_byte(sid, i);
@@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nvmem);
return 0;
+
+err_unreg_nvmem:
+ nvmem_unregister(nvmem);
+ return ret;
}
static int sunxi_sid_remove(struct platform_device *pdev)
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2ae03d..108a3118ace7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi)
* Unbound PCI devices are always put in D0, regardless of
* runtime PM status. During probe, the device is set to
* active and the usage count is incremented. If the driver
- * supports runtime PM, it should call pm_runtime_put_noidle()
- * in its probe routine and pm_runtime_get_noresume() in its
- * remove routine.
+ * supports runtime PM, it should call pm_runtime_put_noidle(),
+ * or any other runtime PM helper function decrementing the usage
+ * count, in its probe routine and pm_runtime_get_noresume() in
+ * its remove routine.
*/
pm_runtime_get_sync(dev);
pci_dev->driver = pci_drv;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 47da573d0bab..7eb5859dd035 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -206,6 +206,15 @@ config PHY_HIX5HD2_SATA
help
Support for SATA PHY on Hisilicon hix5hd2 Soc.
+config PHY_MT65XX_USB3
+ tristate "Mediatek USB3.0 PHY Driver"
+ depends on ARCH_MEDIATEK && OF
+ select GENERIC_PHY
+ help
+ Say 'Y' here to add support for Mediatek USB3.0 PHY driver
+ for mt65xx SoCs. it supports two usb2.0 ports and
+ one usb3.0 port.
+
config PHY_SUN4I_USB
tristate "Allwinner sunxi SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF
@@ -371,4 +380,13 @@ config PHY_BRCMSTB_SATA
Enable this to support the SATA3 PHY on 28nm Broadcom STB SoCs.
Likely useful only with CONFIG_SATA_BRCMSTB enabled.
+config PHY_CYGNUS_PCIE
+ tristate "Broadcom Cygnus PCIe PHY driver"
+ depends on OF && (ARCH_BCM_CYGNUS || COMPILE_TEST)
+ select GENERIC_PHY
+ default ARCH_BCM_CYGNUS
+ help
+ Enable this to support the Broadcom Cygnus PCIe PHY.
+ If unsure, say N.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index a5b18c18fc12..075db1a81aa5 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_TI_PIPE3) += phy-ti-pipe3.o
obj-$(CONFIG_TWL4030_USB) += phy-twl4030-usb.o
obj-$(CONFIG_PHY_EXYNOS5250_SATA) += phy-exynos5250-sata.o
obj-$(CONFIG_PHY_HIX5HD2_SATA) += phy-hix5hd2-sata.o
+obj-$(CONFIG_PHY_MT65XX_USB3) += phy-mt65xx-usb3.o
obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o
obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o
obj-$(CONFIG_PHY_SAMSUNG_USB2) += phy-exynos-usb2.o
@@ -46,3 +47,4 @@ obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
obj-$(CONFIG_PHY_BRCMSTB_SATA) += phy-brcmstb-sata.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
+obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c
new file mode 100644
index 000000000000..7ad72b7d2b98
--- /dev/null
+++ b/drivers/phy/phy-bcm-cygnus-pcie.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define PCIE_CFG_OFFSET 0x00
+#define PCIE1_PHY_IDDQ_SHIFT 10
+#define PCIE0_PHY_IDDQ_SHIFT 2
+
+enum cygnus_pcie_phy_id {
+ CYGNUS_PHY_PCIE0 = 0,
+ CYGNUS_PHY_PCIE1,
+ MAX_NUM_PHYS,
+};
+
+struct cygnus_pcie_phy_core;
+
+/**
+ * struct cygnus_pcie_phy - Cygnus PCIe PHY device
+ * @core: pointer to the Cygnus PCIe PHY core control
+ * @id: internal ID to identify the Cygnus PCIe PHY
+ * @phy: pointer to the kernel PHY device
+ */
+struct cygnus_pcie_phy {
+ struct cygnus_pcie_phy_core *core;
+ enum cygnus_pcie_phy_id id;
+ struct phy *phy;
+};
+
+/**
+ * struct cygnus_pcie_phy_core - Cygnus PCIe PHY core control
+ * @dev: pointer to device
+ * @base: base register
+ * @lock: mutex to protect access to individual PHYs
+ * @phys: pointer to Cygnus PHY device
+ */
+struct cygnus_pcie_phy_core {
+ struct device *dev;
+ void __iomem *base;
+ struct mutex lock;
+ struct cygnus_pcie_phy phys[MAX_NUM_PHYS];
+};
+
+static int cygnus_pcie_power_config(struct cygnus_pcie_phy *phy, bool enable)
+{
+ struct cygnus_pcie_phy_core *core = phy->core;
+ unsigned shift;
+ u32 val;
+
+ mutex_lock(&core->lock);
+
+ switch (phy->id) {
+ case CYGNUS_PHY_PCIE0:
+ shift = PCIE0_PHY_IDDQ_SHIFT;
+ break;
+
+ case CYGNUS_PHY_PCIE1:
+ shift = PCIE1_PHY_IDDQ_SHIFT;
+ break;
+
+ default:
+ mutex_unlock(&core->lock);
+ dev_err(core->dev, "PCIe PHY %d invalid\n", phy->id);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ val = readl(core->base + PCIE_CFG_OFFSET);
+ val &= ~BIT(shift);
+ writel(val, core->base + PCIE_CFG_OFFSET);
+ /*
+ * Wait 50 ms for the PCIe Serdes to stabilize after the analog
+ * front end is brought up
+ */
+ msleep(50);
+ } else {
+ val = readl(core->base + PCIE_CFG_OFFSET);
+ val |= BIT(shift);
+ writel(val, core->base + PCIE_CFG_OFFSET);
+ }
+
+ mutex_unlock(&core->lock);
+ dev_dbg(core->dev, "PCIe PHY %d %s\n", phy->id,
+ enable ? "enabled" : "disabled");
+ return 0;
+}
+
+static int cygnus_pcie_phy_power_on(struct phy *p)
+{
+ struct cygnus_pcie_phy *phy = phy_get_drvdata(p);
+
+ return cygnus_pcie_power_config(phy, true);
+}
+
+static int cygnus_pcie_phy_power_off(struct phy *p)
+{
+ struct cygnus_pcie_phy *phy = phy_get_drvdata(p);
+
+ return cygnus_pcie_power_config(phy, false);
+}
+
+static struct phy_ops cygnus_pcie_phy_ops = {
+ .power_on = cygnus_pcie_phy_power_on,
+ .power_off = cygnus_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int cygnus_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node, *child;
+ struct cygnus_pcie_phy_core *core;
+ struct phy_provider *provider;
+ struct resource *res;
+ unsigned cnt = 0;
+
+ if (of_get_child_count(node) == 0) {
+ dev_err(dev, "PHY no child node\n");
+ return -ENODEV;
+ }
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ core->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(core->base))
+ return PTR_ERR(core->base);
+
+ mutex_init(&core->lock);
+
+ for_each_available_child_of_node(node, child) {
+ unsigned int id;
+ struct cygnus_pcie_phy *p;
+
+ if (of_property_read_u32(child, "reg", &id)) {
+ dev_err(dev, "missing reg property for %s\n",
+ child->name);
+ return -EINVAL;
+ }
+
+ if (id >= MAX_NUM_PHYS) {
+ dev_err(dev, "invalid PHY id: %u\n", id);
+ return -EINVAL;
+ }
+
+ if (core->phys[id].phy) {
+ dev_err(dev, "duplicated PHY id: %u\n", id);
+ return -EINVAL;
+ }
+
+ p = &core->phys[id];
+ p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
+ if (IS_ERR(p->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(p->phy);
+ }
+
+ p->core = core;
+ p->id = id;
+ phy_set_drvdata(p->phy, p);
+ cnt++;
+ }
+
+ dev_set_drvdata(dev, core);
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(provider);
+ }
+
+ dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt);
+
+ return 0;
+}
+
+static const struct of_device_id cygnus_pcie_phy_match_table[] = {
+ { .compatible = "brcm,cygnus-pcie-phy" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cygnus_pcie_phy_match_table);
+
+static struct platform_driver cygnus_pcie_phy_driver = {
+ .driver = {
+ .name = "cygnus-pcie-phy",
+ .of_match_table = cygnus_pcie_phy_match_table,
+ },
+ .probe = cygnus_pcie_phy_probe,
+};
+module_platform_driver(cygnus_pcie_phy_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom Cygnus PCIe PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 0062027afb1e..77a2e054fdea 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = {
{ .compatible = "marvell,berlin2q-sata-phy" },
{ },
};
+MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
static struct platform_driver phy_berlin_sata_driver = {
.probe = phy_berlin_sata_probe,
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c
new file mode 100644
index 000000000000..f30b28bd41fe
--- /dev/null
+++ b/drivers/phy/phy-mt65xx-usb3.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/*
+ * for sifslv2 register, but exclude port's;
+ * relative to USB3_SIF2_BASE base address
+ */
+#define SSUSB_SIFSLV_SPLLC 0x0000
+
+/* offsets of sub-segment in each port registers */
+#define SSUSB_SIFSLV_U2PHY_COM_BASE 0x0000
+#define SSUSB_SIFSLV_U3PHYD_BASE 0x0100
+#define SSUSB_USB30_PHYA_SIV_B_BASE 0x0300
+#define SSUSB_SIFSLV_U3PHYA_DA_BASE 0x0400
+
+#define U3P_USBPHYACR0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0000)
+#define PA0_RG_U2PLL_FORCE_ON BIT(15)
+
+#define U3P_USBPHYACR2 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0008)
+#define PA2_RG_SIF_U2PLL_FORCE_EN BIT(18)
+
+#define U3P_USBPHYACR5 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0014)
+#define PA5_RG_U2_HSTX_SRCTRL GENMASK(14, 12)
+#define PA5_RG_U2_HSTX_SRCTRL_VAL(x) ((0x7 & (x)) << 12)
+#define PA5_RG_U2_HS_100U_U3_EN BIT(11)
+
+#define U3P_USBPHYACR6 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0018)
+#define PA6_RG_U2_ISO_EN BIT(31)
+#define PA6_RG_U2_BC11_SW_EN BIT(23)
+#define PA6_RG_U2_OTG_VBUSCMP_EN BIT(20)
+
+#define U3P_U2PHYACR4 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0020)
+#define P2C_RG_USB20_GPIO_CTL BIT(9)
+#define P2C_USB20_GPIO_MODE BIT(8)
+#define P2C_U2_GPIO_CTR_MSK (P2C_RG_USB20_GPIO_CTL | P2C_USB20_GPIO_MODE)
+
+#define U3D_U2PHYDCR0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0060)
+#define P2C_RG_SIF_U2PLL_FORCE_ON BIT(24)
+
+#define U3P_U2PHYDTM0 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x0068)
+#define P2C_FORCE_UART_EN BIT(26)
+#define P2C_FORCE_DATAIN BIT(23)
+#define P2C_FORCE_DM_PULLDOWN BIT(21)
+#define P2C_FORCE_DP_PULLDOWN BIT(20)
+#define P2C_FORCE_XCVRSEL BIT(19)
+#define P2C_FORCE_SUSPENDM BIT(18)
+#define P2C_FORCE_TERMSEL BIT(17)
+#define P2C_RG_DATAIN GENMASK(13, 10)
+#define P2C_RG_DATAIN_VAL(x) ((0xf & (x)) << 10)
+#define P2C_RG_DMPULLDOWN BIT(7)
+#define P2C_RG_DPPULLDOWN BIT(6)
+#define P2C_RG_XCVRSEL GENMASK(5, 4)
+#define P2C_RG_XCVRSEL_VAL(x) ((0x3 & (x)) << 4)
+#define P2C_RG_SUSPENDM BIT(3)
+#define P2C_RG_TERMSEL BIT(2)
+#define P2C_DTM0_PART_MASK \
+ (P2C_FORCE_DATAIN | P2C_FORCE_DM_PULLDOWN | \
+ P2C_FORCE_DP_PULLDOWN | P2C_FORCE_XCVRSEL | \
+ P2C_FORCE_TERMSEL | P2C_RG_DMPULLDOWN | \
+ P2C_RG_DPPULLDOWN | P2C_RG_TERMSEL)
+
+#define U3P_U2PHYDTM1 (SSUSB_SIFSLV_U2PHY_COM_BASE + 0x006C)
+#define P2C_RG_UART_EN BIT(16)
+#define P2C_RG_VBUSVALID BIT(5)
+#define P2C_RG_SESSEND BIT(4)
+#define P2C_RG_AVALID BIT(2)
+
+#define U3P_U3_PHYA_REG0 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0000)
+#define P3A_RG_U3_VUSB10_ON BIT(5)
+
+#define U3P_U3_PHYA_REG6 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0018)
+#define P3A_RG_TX_EIDLE_CM GENMASK(31, 28)
+#define P3A_RG_TX_EIDLE_CM_VAL(x) ((0xf & (x)) << 28)
+
+#define U3P_U3_PHYA_REG9 (SSUSB_USB30_PHYA_SIV_B_BASE + 0x0024)
+#define P3A_RG_RX_DAC_MUX GENMASK(5, 1)
+#define P3A_RG_RX_DAC_MUX_VAL(x) ((0x1f & (x)) << 1)
+
+#define U3P_U3PHYA_DA_REG0 (SSUSB_SIFSLV_U3PHYA_DA_BASE + 0x0000)
+#define P3A_RG_XTAL_EXT_EN_U3 GENMASK(11, 10)
+#define P3A_RG_XTAL_EXT_EN_U3_VAL(x) ((0x3 & (x)) << 10)
+
+#define U3P_PHYD_CDR1 (SSUSB_SIFSLV_U3PHYD_BASE + 0x005c)
+#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
+#define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24)
+#define P3D_RG_CDR_BIR_LTD0 GENMASK(12, 8)
+#define P3D_RG_CDR_BIR_LTD0_VAL(x) ((0x1f & (x)) << 8)
+
+#define U3P_XTALCTL3 (SSUSB_SIFSLV_SPLLC + 0x0018)
+#define XC3_RG_U3_XTAL_RX_PWD BIT(9)
+#define XC3_RG_U3_FRC_XTAL_RX_PWD BIT(8)
+
+struct mt65xx_phy_instance {
+ struct phy *phy;
+ void __iomem *port_base;
+ u32 index;
+ u8 type;
+};
+
+struct mt65xx_u3phy {
+ struct device *dev;
+ void __iomem *sif_base; /* include sif2, but exclude port's */
+ struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */
+ struct mt65xx_phy_instance **phys;
+ int nphys;
+};
+
+static void phy_instance_init(struct mt65xx_u3phy *u3phy,
+ struct mt65xx_phy_instance *instance)
+{
+ void __iomem *port_base = instance->port_base;
+ u32 index = instance->index;
+ u32 tmp;
+
+ /* switch to USB function. (system register, force ip into usb mode) */
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp &= ~P2C_FORCE_UART_EN;
+ tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0);
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+
+ tmp = readl(port_base + U3P_U2PHYDTM1);
+ tmp &= ~P2C_RG_UART_EN;
+ writel(tmp, port_base + U3P_U2PHYDTM1);
+
+ if (!index) {
+ tmp = readl(port_base + U3P_U2PHYACR4);
+ tmp &= ~P2C_U2_GPIO_CTR_MSK;
+ writel(tmp, port_base + U3P_U2PHYACR4);
+
+ tmp = readl(port_base + U3P_USBPHYACR2);
+ tmp |= PA2_RG_SIF_U2PLL_FORCE_EN;
+ writel(tmp, port_base + U3P_USBPHYACR2);
+
+ tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
+ writel(tmp, port_base + U3D_U2PHYDCR0);
+ } else {
+ tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
+ writel(tmp, port_base + U3D_U2PHYDCR0);
+
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+ }
+
+ /* DP/DM BC1.1 path Disable */
+ tmp = readl(port_base + U3P_USBPHYACR6);
+ tmp &= ~PA6_RG_U2_BC11_SW_EN;
+ writel(tmp, port_base + U3P_USBPHYACR6);
+
+ tmp = readl(port_base + U3P_U3PHYA_DA_REG0);
+ tmp &= ~P3A_RG_XTAL_EXT_EN_U3;
+ tmp |= P3A_RG_XTAL_EXT_EN_U3_VAL(2);
+ writel(tmp, port_base + U3P_U3PHYA_DA_REG0);
+
+ tmp = readl(port_base + U3P_U3_PHYA_REG9);
+ tmp &= ~P3A_RG_RX_DAC_MUX;
+ tmp |= P3A_RG_RX_DAC_MUX_VAL(4);
+ writel(tmp, port_base + U3P_U3_PHYA_REG9);
+
+ tmp = readl(port_base + U3P_U3_PHYA_REG6);
+ tmp &= ~P3A_RG_TX_EIDLE_CM;
+ tmp |= P3A_RG_TX_EIDLE_CM_VAL(0xe);
+ writel(tmp, port_base + U3P_U3_PHYA_REG6);
+
+ tmp = readl(port_base + U3P_PHYD_CDR1);
+ tmp &= ~(P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1);
+ tmp |= P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3);
+ writel(tmp, port_base + U3P_PHYD_CDR1);
+
+ dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index);
+}
+
+static void phy_instance_power_on(struct mt65xx_u3phy *u3phy,
+ struct mt65xx_phy_instance *instance)
+{
+ void __iomem *port_base = instance->port_base;
+ u32 index = instance->index;
+ u32 tmp;
+
+ if (!index) {
+ /* Set RG_SSUSB_VUSB10_ON as 1 after VUSB10 ready */
+ tmp = readl(port_base + U3P_U3_PHYA_REG0);
+ tmp |= P3A_RG_U3_VUSB10_ON;
+ writel(tmp, port_base + U3P_U3_PHYA_REG0);
+ }
+
+ /* (force_suspendm=0) (let suspendm=1, enable usb 480MHz pll) */
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp &= ~(P2C_FORCE_SUSPENDM | P2C_RG_XCVRSEL);
+ tmp &= ~(P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+
+ /* OTG Enable */
+ tmp = readl(port_base + U3P_USBPHYACR6);
+ tmp |= PA6_RG_U2_OTG_VBUSCMP_EN;
+ writel(tmp, port_base + U3P_USBPHYACR6);
+
+ if (!index) {
+ tmp = readl(u3phy->sif_base + U3P_XTALCTL3);
+ tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD;
+ writel(tmp, u3phy->sif_base + U3P_XTALCTL3);
+
+ /* [mt8173]disable Change 100uA current from SSUSB */
+ tmp = readl(port_base + U3P_USBPHYACR5);
+ tmp &= ~PA5_RG_U2_HS_100U_U3_EN;
+ writel(tmp, port_base + U3P_USBPHYACR5);
+ }
+
+ tmp = readl(port_base + U3P_U2PHYDTM1);
+ tmp |= P2C_RG_VBUSVALID | P2C_RG_AVALID;
+ tmp &= ~P2C_RG_SESSEND;
+ writel(tmp, port_base + U3P_U2PHYDTM1);
+
+ /* USB 2.0 slew rate calibration */
+ tmp = readl(port_base + U3P_USBPHYACR5);
+ tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
+ tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(4);
+ writel(tmp, port_base + U3P_USBPHYACR5);
+
+ if (index) {
+ tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
+ writel(tmp, port_base + U3D_U2PHYDCR0);
+
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+ }
+ dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index);
+}
+
+static void phy_instance_power_off(struct mt65xx_u3phy *u3phy,
+ struct mt65xx_phy_instance *instance)
+{
+ void __iomem *port_base = instance->port_base;
+ u32 index = instance->index;
+ u32 tmp;
+
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN);
+ tmp |= P2C_FORCE_SUSPENDM;
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+
+ /* OTG Disable */
+ tmp = readl(port_base + U3P_USBPHYACR6);
+ tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN;
+ writel(tmp, port_base + U3P_USBPHYACR6);
+
+ if (!index) {
+ /* (also disable)Change 100uA current switch to USB2.0 */
+ tmp = readl(port_base + U3P_USBPHYACR5);
+ tmp &= ~PA5_RG_U2_HS_100U_U3_EN;
+ writel(tmp, port_base + U3P_USBPHYACR5);
+ }
+
+ /* let suspendm=0, set utmi into analog power down */
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp &= ~P2C_RG_SUSPENDM;
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+ udelay(1);
+
+ tmp = readl(port_base + U3P_U2PHYDTM1);
+ tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID);
+ tmp |= P2C_RG_SESSEND;
+ writel(tmp, port_base + U3P_U2PHYDTM1);
+
+ if (!index) {
+ tmp = readl(port_base + U3P_U3_PHYA_REG0);
+ tmp &= ~P3A_RG_U3_VUSB10_ON;
+ writel(tmp, port_base + U3P_U3_PHYA_REG0);
+ } else {
+ tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
+ writel(tmp, port_base + U3D_U2PHYDCR0);
+ }
+
+ dev_dbg(u3phy->dev, "%s(%d)\n", __func__, index);
+}
+
+static void phy_instance_exit(struct mt65xx_u3phy *u3phy,
+ struct mt65xx_phy_instance *instance)
+{
+ void __iomem *port_base = instance->port_base;
+ u32 index = instance->index;
+ u32 tmp;
+
+ if (index) {
+ tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
+ writel(tmp, port_base + U3D_U2PHYDCR0);
+
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp &= ~P2C_FORCE_SUSPENDM;
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+ }
+}
+
+static int mt65xx_phy_init(struct phy *phy)
+{
+ struct mt65xx_phy_instance *instance = phy_get_drvdata(phy);
+ struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent);
+ int ret;
+
+ ret = clk_prepare_enable(u3phy->u3phya_ref);
+ if (ret) {
+ dev_err(u3phy->dev, "failed to enable u3phya_ref\n");
+ return ret;
+ }
+
+ phy_instance_init(u3phy, instance);
+ return 0;
+}
+
+static int mt65xx_phy_power_on(struct phy *phy)
+{
+ struct mt65xx_phy_instance *instance = phy_get_drvdata(phy);
+ struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent);
+
+ phy_instance_power_on(u3phy, instance);
+ return 0;
+}
+
+static int mt65xx_phy_power_off(struct phy *phy)
+{
+ struct mt65xx_phy_instance *instance = phy_get_drvdata(phy);
+ struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent);
+
+ phy_instance_power_off(u3phy, instance);
+ return 0;
+}
+
+static int mt65xx_phy_exit(struct phy *phy)
+{
+ struct mt65xx_phy_instance *instance = phy_get_drvdata(phy);
+ struct mt65xx_u3phy *u3phy = dev_get_drvdata(phy->dev.parent);
+
+ phy_instance_exit(u3phy, instance);
+ clk_disable_unprepare(u3phy->u3phya_ref);
+ return 0;
+}
+
+static struct phy *mt65xx_phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct mt65xx_u3phy *u3phy = dev_get_drvdata(dev);
+ struct mt65xx_phy_instance *instance = NULL;
+ struct device_node *phy_np = args->np;
+ int index;
+
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of cells in 'phy' property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for (index = 0; index < u3phy->nphys; index++)
+ if (phy_np == u3phy->phys[index]->phy->dev.of_node) {
+ instance = u3phy->phys[index];
+ break;
+ }
+
+ if (!instance) {
+ dev_err(dev, "failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ instance->type = args->args[0];
+
+ if (!(instance->type == PHY_TYPE_USB2 ||
+ instance->type == PHY_TYPE_USB3)) {
+ dev_err(dev, "unsupported device type: %d\n", instance->type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return instance->phy;
+}
+
+static struct phy_ops mt65xx_u3phy_ops = {
+ .init = mt65xx_phy_init,
+ .exit = mt65xx_phy_exit,
+ .power_on = mt65xx_phy_power_on,
+ .power_off = mt65xx_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int mt65xx_u3phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *child_np;
+ struct phy_provider *provider;
+ struct resource *sif_res;
+ struct mt65xx_u3phy *u3phy;
+ struct resource res;
+ int port;
+
+ u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL);
+ if (!u3phy)
+ return -ENOMEM;
+
+ u3phy->nphys = of_get_child_count(np);
+ u3phy->phys = devm_kcalloc(dev, u3phy->nphys,
+ sizeof(*u3phy->phys), GFP_KERNEL);
+ if (!u3phy->phys)
+ return -ENOMEM;
+
+ u3phy->dev = dev;
+ platform_set_drvdata(pdev, u3phy);
+
+ sif_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ u3phy->sif_base = devm_ioremap_resource(dev, sif_res);
+ if (IS_ERR(u3phy->sif_base)) {
+ dev_err(dev, "failed to remap sif regs\n");
+ return PTR_ERR(u3phy->sif_base);
+ }
+
+ u3phy->u3phya_ref = devm_clk_get(dev, "u3phya_ref");
+ if (IS_ERR(u3phy->u3phya_ref)) {
+ dev_err(dev, "error to get u3phya_ref\n");
+ return PTR_ERR(u3phy->u3phya_ref);
+ }
+
+ port = 0;
+ for_each_child_of_node(np, child_np) {
+ struct mt65xx_phy_instance *instance;
+ struct phy *phy;
+ int retval;
+
+ instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
+ if (!instance)
+ return -ENOMEM;
+
+ u3phy->phys[port] = instance;
+
+ phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create phy\n");
+ return PTR_ERR(phy);
+ }
+
+ retval = of_address_to_resource(child_np, 0, &res);
+ if (retval) {
+ dev_err(dev, "failed to get address resource(id-%d)\n",
+ port);
+ return retval;
+ }
+
+ instance->port_base = devm_ioremap_resource(&phy->dev, &res);
+ if (IS_ERR(instance->port_base)) {
+ dev_err(dev, "failed to remap phy regs\n");
+ return PTR_ERR(instance->port_base);
+ }
+
+ instance->phy = phy;
+ instance->index = port;
+ phy_set_drvdata(phy, instance);
+ port++;
+ }
+
+ provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id mt65xx_u3phy_id_table[] = {
+ { .compatible = "mediatek,mt8173-u3phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mt65xx_u3phy_id_table);
+
+static struct platform_driver mt65xx_u3phy_driver = {
+ .probe = mt65xx_u3phy_probe,
+ .driver = {
+ .name = "mt65xx-u3phy",
+ .of_match_table = mt65xx_u3phy_id_table,
+ },
+};
+
+module_platform_driver(mt65xx_u3phy_driver);
+
+MODULE_AUTHOR("Chunfeng Yun <chunfeng.yun@mediatek.com>");
+MODULE_DESCRIPTION("mt65xx USB PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 49a1ed0cef56..107cb57c3513 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -432,6 +432,7 @@ out_disable_src:
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk);
static
int ufs_qcom_phy_disable_vreg(struct phy *phy,
@@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy)
phy->is_ref_clk_enabled = false;
}
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
#define UFS_REF_CLK_EN (1 << 5)
@@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
{
ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
{
ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
/* Turn ON M-PHY RMMI interface clocks */
int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
@@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk);
/* Turn OFF M-PHY RMMI interface clocks */
void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
@@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
phy->is_iface_clk_enabled = false;
}
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk);
int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
{
@@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy)
return ret;
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes);
int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
{
@@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes)
return ret;
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
u8 major, u16 minor, u16 step)
@@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
ufs_qcom_phy->host_ctrl_rev_minor = minor;
ufs_qcom_phy->host_ctrl_rev_step = step;
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version);
int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
{
@@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B)
return ret;
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
int ufs_qcom_phy_remove(struct phy *generic_phy,
struct ufs_qcom_phy *ufs_qcom_phy)
@@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy)
return ufs_qcom_phy->phy_spec_ops->
is_physical_coding_sublayer_ready(ufs_qcom_phy);
}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready);
int ufs_qcom_phy_power_on(struct phy *generic_phy)
{
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 5a5c073e72fe..91d6f342c565 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
struct device_node *child;
struct regmap *grf;
unsigned int reg_offset;
+ int err;
grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
if (IS_ERR(grf)) {
@@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
return PTR_ERR(rk_phy->phy);
}
phy_set_drvdata(rk_phy->phy, rk_phy);
+
+ /* only power up usb phy when it use, so disable it when init*/
+ err = rockchip_usb_phy_power(rk_phy, 1);
+ if (err)
+ return err;
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index f278a9c547e1..1d22d93b552d 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -27,6 +27,13 @@ static int samsung_usb2_phy_power_on(struct phy *phy)
dev_dbg(drv->dev, "Request to power_on \"%s\" usb phy\n",
inst->cfg->label);
+
+ if (drv->vbus) {
+ ret = regulator_enable(drv->vbus);
+ if (ret)
+ goto err_regulator;
+ }
+
ret = clk_prepare_enable(drv->clk);
if (ret)
goto err_main_clk;
@@ -48,6 +55,9 @@ err_power_on:
err_instance_clk:
clk_disable_unprepare(drv->clk);
err_main_clk:
+ if (drv->vbus)
+ regulator_disable(drv->vbus);
+err_regulator:
return ret;
}
@@ -55,7 +65,7 @@ static int samsung_usb2_phy_power_off(struct phy *phy)
{
struct samsung_usb2_phy_instance *inst = phy_get_drvdata(phy);
struct samsung_usb2_phy_driver *drv = inst->drv;
- int ret;
+ int ret = 0;
dev_dbg(drv->dev, "Request to power_off \"%s\" usb phy\n",
inst->cfg->label);
@@ -68,7 +78,10 @@ static int samsung_usb2_phy_power_off(struct phy *phy)
}
clk_disable_unprepare(drv->ref_clk);
clk_disable_unprepare(drv->clk);
- return 0;
+ if (drv->vbus)
+ ret = regulator_disable(drv->vbus);
+
+ return ret;
}
static const struct phy_ops samsung_usb2_phy_ops = {
@@ -203,6 +216,14 @@ static int samsung_usb2_phy_probe(struct platform_device *pdev)
return ret;
}
+ drv->vbus = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(drv->vbus)) {
+ ret = PTR_ERR(drv->vbus);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ drv->vbus = NULL;
+ }
+
for (i = 0; i < drv->cfg->num_phys; i++) {
char *label = drv->cfg->phys[i].label;
struct samsung_usb2_phy_instance *p = &drv->instances[i];
diff --git a/drivers/phy/phy-samsung-usb2.h b/drivers/phy/phy-samsung-usb2.h
index 44bead9b8f34..6563e7ca0ac4 100644
--- a/drivers/phy/phy-samsung-usb2.h
+++ b/drivers/phy/phy-samsung-usb2.h
@@ -17,6 +17,7 @@
#include <linux/device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <linux/regulator/consumer.h>
#define KHZ 1000
#define MHZ (KHZ * KHZ)
@@ -37,6 +38,7 @@ struct samsung_usb2_phy_driver {
const struct samsung_usb2_phy_config *cfg;
struct clk *clk;
struct clk *ref_clk;
+ struct regulator *vbus;
unsigned long ref_rate;
u32 ref_reg_val;
struct device *dev;
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index 731b395d6e6a..b12964b70625 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -551,19 +551,15 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->id_det_gpio = devm_gpiod_get(dev, "usb0_id_det", GPIOD_IN);
- if (IS_ERR(data->id_det_gpio)) {
- if (PTR_ERR(data->id_det_gpio) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- data->id_det_gpio = NULL;
- }
-
- data->vbus_det_gpio = devm_gpiod_get(dev, "usb0_vbus_det", GPIOD_IN);
- if (IS_ERR(data->vbus_det_gpio)) {
- if (PTR_ERR(data->vbus_det_gpio) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- data->vbus_det_gpio = NULL;
- }
+ data->id_det_gpio = devm_gpiod_get_optional(dev, "usb0_id_det",
+ GPIOD_IN);
+ if (IS_ERR(data->id_det_gpio))
+ return PTR_ERR(data->id_det_gpio);
+
+ data->vbus_det_gpio = devm_gpiod_get_optional(dev, "usb0_vbus_det",
+ GPIOD_IN);
+ if (IS_ERR(data->vbus_det_gpio))
+ return PTR_ERR(data->vbus_det_gpio);
if (of_find_property(np, "usb0_vbus_power-supply", NULL)) {
data->vbus_power_supply = devm_power_supply_get_by_phandle(dev,
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 01bf3476a791..a9567af7cec0 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = {
AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
- AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+ AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
- AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
+ AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
/* secondary switchable output of DCDC1 */
AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7849187d91ae..8a34f6acc801 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
return 0;
}
+ /* Did the lookup explicitly defer for us? */
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
if (have_full_constraints()) {
r = dummy_regulator_rdev;
} else {
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index add419d6ff34..a56a7b243e91 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
.llseek = noop_llseek,
};
+/*
+ * The controllers use an inline buffer instead of a mapped SGL for small,
+ * single entry buffers. Note that we treat a zero-length transfer like
+ * a mapped SGL.
+ */
+static bool twa_command_mapped(struct scsi_cmnd *cmd)
+{
+ return scsi_sg_count(cmd) != 1 ||
+ scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
+}
+
/* This function will complete an aen request from the isr */
static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
{
@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
}
/* Now complete the io */
- scsi_dma_unmap(cmd);
+ if (twa_command_mapped(cmd))
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
struct scsi_cmnd *cmd = tw_dev->srb[i];
cmd->result = (DID_RESET << 16);
- scsi_dma_unmap(cmd);
+ if (twa_command_mapped(cmd))
+ scsi_dma_unmap(cmd);
cmd->scsi_done(cmd);
}
}
@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
switch (retval) {
case SCSI_MLQUEUE_HOST_BUSY:
- scsi_dma_unmap(SCpnt);
+ if (twa_command_mapped(SCpnt))
+ scsi_dma_unmap(SCpnt);
twa_free_request_id(tw_dev, request_id);
break;
case 1:
SCpnt->result = (DID_ERROR << 16);
- scsi_dma_unmap(SCpnt);
+ if (twa_command_mapped(SCpnt))
+ scsi_dma_unmap(SCpnt);
done(SCpnt);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
/* Map sglist from scsi layer to cmd packet */
if (scsi_sg_count(srb)) {
- if ((scsi_sg_count(srb) == 1) &&
- (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
+ if (!twa_command_mapped(srb)) {
if (srb->sc_data_direction == DMA_TO_DEVICE ||
srb->sc_data_direction == DMA_BIDIRECTIONAL)
scsi_sg_copy_to_buffer(srb,
@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
{
struct scsi_cmnd *cmd = tw_dev->srb[request_id];
- if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
+ if (!twa_command_mapped(cmd) &&
(cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
if (scsi_sg_count(cmd) == 1) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 33c74d3436c9..6bffd91b973a 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
wake_up(&conn->ehwait);
}
-static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
+static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
{
struct iscsi_nopout hdr;
struct iscsi_task *task;
if (!rhdr && conn->ping_task)
- return;
+ return -EINVAL;
memset(&hdr, 0, sizeof(struct iscsi_nopout));
hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
@@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
hdr.ttt = RESERVED_ITT;
task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
- if (!task)
+ if (!task) {
iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
- else if (!rhdr) {
+ return -EIO;
+ } else if (!rhdr) {
/* only track our nops */
conn->ping_task = task;
conn->last_ping = jiffies;
}
+
+ return 0;
}
static int iscsi_nop_out_rsp(struct iscsi_task *task,
@@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
if (time_before_eq(last_recv + recv_timeout, jiffies)) {
/* send a ping to try to provoke some traffic */
ISCSI_DBG_CONN(conn, "Sending nopout as ping\n");
- iscsi_send_nopout(conn, NULL);
- next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
+ if (iscsi_send_nopout(conn, NULL))
+ next_timeout = jiffies + (1 * HZ);
+ else
+ next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
} else
next_timeout = last_recv + recv_timeout;
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index edb044a7b56d..0a2168e69bbc 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
dh = __scsi_dh_lookup(name);
if (!dh) {
- request_module(name);
+ request_module("scsi_dh_%s", name);
dh = __scsi_dh_lookup(name);
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cbfc5990052b..126a48c6431e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(cmd->request);
+ blk_mq_complete_request(cmd->request, cmd->request->errors);
}
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 3cf9faa6cc3f..a85d863d4a44 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
- dspi->irq = platform_get_irq(pdev, 0);
- if (dspi->irq <= 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret == 0)
ret = -EINVAL;
+ if (ret < 0)
goto free_master;
- }
+ dspi->irq = ret;
ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index 4299cf45f947..5e1f16c36b49 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
__this_cpu_write(reporting_keystroke, true);
input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
+ input_sync(virt_keyboard);
__this_cpu_write(reporting_keystroke, false);
/* reenable preemption */
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 7ff96270c933..e570ff084add 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
switch_on_temp = 0;
temperature_threshold = control_temp - switch_on_temp;
+ /*
+ * estimate_pid_constants() tries to find appropriate default
+ * values for thermal zones that don't provide them. If a
+ * system integrator has configured a thermal zone with two
+ * passive trip points at the same temperature, that person
+ * hasn't put any effort to set up the thermal zone properly
+ * so just give up.
+ */
+ if (!temperature_threshold)
+ return;
if (!tz->tzp->k_po || force)
tz->tzp->k_po = int_to_frac(sustainable_power) /
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 20932cc9c8f7..b09023b07169 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
spin_lock_irqsave(&tty->ctrl_lock, flags);
tty->ctrl_status |= TIOCPKT_FLUSHREAD;
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
- if (waitqueue_active(&tty->link->read_wait))
- wake_up_interruptible(&tty->link->read_wait);
+ wake_up_interruptible(&tty->link->read_wait);
}
}
@@ -1382,8 +1381,7 @@ handle_newline:
put_tty_queue(c, ldata);
smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- if (waitqueue_active(&tty->read_wait))
- wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, POLLIN);
return 0;
}
}
@@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- if (waitqueue_active(&tty->read_wait))
- wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, POLLIN);
}
}
@@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/* The termios change make the tty ready for I/O */
- if (waitqueue_active(&tty->write_wait))
- wake_up_interruptible(&tty->write_wait);
- if (waitqueue_active(&tty->read_wait))
- wake_up_interruptible(&tty->read_wait);
+ wake_up_interruptible(&tty->write_wait);
+ wake_up_interruptible(&tty->read_wait);
}
/**
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index b1e0ba3e525b..0bbf34035d6a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */
UART_FCR7_64BYTE,
.flags = UART_CAP_FIFO,
},
+ [PORT_RT2880] = {
+ .name = "Palmchip BK-3103",
+ .fifo_size = 16,
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .rxtrig_bytes = {1, 4, 8, 14},
+ .flags = UART_CAP_FIFO,
+ },
};
/* Uart divisor latch read */
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 5ca5cf3e9359..538ea03bc101 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
ret = atmel_init_gpios(port, &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to initialize GPIOs.");
- goto err;
+ goto err_clear_bit;
}
ret = atmel_init_port(port, pdev);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index fe3d41cc8416..d0388a071ba1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
int locked = 1;
int retval;
- retval = clk_prepare_enable(sport->clk_per);
+ retval = clk_enable(sport->clk_per);
if (retval)
return;
- retval = clk_prepare_enable(sport->clk_ipg);
+ retval = clk_enable(sport->clk_ipg);
if (retval) {
- clk_disable_unprepare(sport->clk_per);
+ clk_disable(sport->clk_per);
return;
}
@@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
if (locked)
spin_unlock_irqrestore(&sport->port.lock, flags);
- clk_disable_unprepare(sport->clk_ipg);
- clk_disable_unprepare(sport->clk_per);
+ clk_disable(sport->clk_ipg);
+ clk_disable(sport->clk_per);
}
/*
@@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options)
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
- clk_disable_unprepare(sport->clk_ipg);
+ clk_disable(sport->clk_ipg);
+ if (retval) {
+ clk_unprepare(sport->clk_ipg);
+ goto error_console;
+ }
+
+ retval = clk_prepare(sport->clk_per);
+ if (retval)
+ clk_disable_unprepare(sport->clk_ipg);
error_console:
return retval;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 5a3fa8913880..a660ab181cca 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
atomic_inc(&buf->priority);
mutex_lock(&buf->lock);
- while ((next = buf->head->next) != NULL) {
+ /* paired w/ release in __tty_buffer_request_room; ensures there are
+ * no pending memory accesses to the freed buffer
+ */
+ while ((next = smp_load_acquire(&buf->head->next)) != NULL) {
tty_buffer_free(port, buf->head);
buf->head = next;
}
@@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
if (n != NULL) {
n->flags = flags;
buf->tail = n;
- b->commit = b->used;
+ /* paired w/ acquire in flush_to_ldisc(); ensures
+ * flush_to_ldisc() sees buffer data.
+ */
+ smp_store_release(&b->commit, b->used);
/* paired w/ acquire in flush_to_ldisc(); ensures the
* latest commit value can be read before the head is
* advanced to the next buffer
@@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- buf->tail->commit = buf->tail->used;
+ /* paired w/ acquire in flush_to_ldisc(); ensures
+ * flush_to_ldisc() sees buffer data.
+ */
+ smp_store_release(&buf->tail->commit, buf->tail->used);
schedule_work(&buf->work);
}
EXPORT_SYMBOL(tty_schedule_flip);
@@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work)
struct tty_struct *tty;
struct tty_ldisc *disc;
- tty = port->itty;
+ tty = READ_ONCE(port->itty);
if (tty == NULL)
return;
@@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work)
* is advancing to the next buffer
*/
next = smp_load_acquire(&head->next);
- count = head->commit - head->read;
+ /* paired w/ release in __tty_buffer_request_room() or in
+ * tty_buffer_flush(); ensures we see the committed buffer data
+ */
+ count = smp_load_acquire(&head->commit) - head->read;
if (!count) {
if (next == NULL) {
check_other_closed(tty);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 02785d844354..2eefaa6e3e3a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2128,8 +2128,24 @@ retry_open:
if (!noctty &&
current->signal->leader &&
!current->signal->tty &&
- tty->session == NULL)
- __proc_set_tty(tty);
+ tty->session == NULL) {
+ /*
+ * Don't let a process that only has write access to the tty
+ * obtain the privileges associated with having a tty as
+ * controlling terminal (being able to reopen it with full
+ * access through /dev/tty, being able to perform pushback).
+ * Many distributions set the group of all ttys to "tty" and
+ * grant write-only access to all terminals for setgid tty
+ * binaries, which should not imply full privileges on all ttys.
+ *
+ * This could theoretically break old code that performs open()
+ * on a write-only file descriptor. In that case, it might be
+ * necessary to also permit this if
+ * inode_permission(inode, MAY_READ) == 0.
+ */
+ if (filp->f_mode & FMODE_READ)
+ __proc_set_tty(tty);
+ }
spin_unlock_irq(&current->sighand->siglock);
read_unlock(&tasklist_lock);
tty_unlock(tty);
@@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p)
* Takes ->siglock() when updating signal->tty
*/
-static int tiocsctty(struct tty_struct *tty, int arg)
+static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
{
int ret = 0;
@@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg)
goto unlock;
}
}
+
+ /* See the comment in tty_open(). */
+ if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
+ ret = -EPERM;
+ goto unlock;
+ }
+
proc_set_tty(tty);
unlock:
read_unlock(&tasklist_lock);
@@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
no_tty();
return 0;
case TIOCSCTTY:
- return tiocsctty(tty, arg);
+ return tiocsctty(tty, file, arg);
case TIOCGPGRP:
return tiocgpgrp(tty, real_tty, p);
case TIOCSPGRP:
@@ -3151,13 +3174,18 @@ struct class *tty_class;
static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
unsigned int index, unsigned int count)
{
+ int err;
+
/* init here, since reused cdevs cause crashes */
driver->cdevs[index] = cdev_alloc();
if (!driver->cdevs[index])
return -ENOMEM;
- cdev_init(driver->cdevs[index], &tty_fops);
+ driver->cdevs[index]->ops = &tty_fops;
driver->cdevs[index]->owner = driver->owner;
- return cdev_add(driver->cdevs[index], dev, count);
+ err = cdev_add(driver->cdevs[index], dev, count);
+ if (err)
+ kobject_put(&driver->cdevs[index]->kobj);
+ return err;
}
/**
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index d8926c6cd2a8..d5c57f1e98fd 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/
-obj-$(CONFIG_USB_FUSBH200_HCD) += host/
obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_MAX3421_HCD) += host/
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b9ddf0c1ffe5..7caff020106e 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -853,6 +853,10 @@ int usb_get_bos_descriptor(struct usb_device *dev)
dev->bos->ss_cap =
(struct usb_ss_cap_descriptor *)buffer;
break;
+ case USB_SSP_CAP_TYPE:
+ dev->bos->ssp_cap =
+ (struct usb_ssp_cap_descriptor *)buffer;
+ break;
case CONTAINER_ID_TYPE:
dev->bos->ss_id =
(struct usb_ss_container_id_descriptor *)buffer;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 6b5063e7943f..56593a9a8726 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -296,6 +296,10 @@ static int usb_probe_interface(struct device *dev)
if (udev->authorized == 0) {
dev_err(&intf->dev, "Device is not authorized for usage\n");
return error;
+ } else if (intf->authorized == 0) {
+ dev_err(&intf->dev, "Interface %d is not authorized for usage\n",
+ intf->altsetting->desc.bInterfaceNumber);
+ return error;
}
id = usb_match_dynamic_id(intf, driver);
@@ -417,12 +421,10 @@ static int usb_unbind_interface(struct device *dev)
if (ep->streams == 0)
continue;
if (j == 0) {
- eps = kmalloc(USB_MAXENDPOINTS * sizeof(void *),
+ eps = kmalloc_array(USB_MAXENDPOINTS, sizeof(void *),
GFP_KERNEL);
- if (!eps) {
- dev_warn(dev, "oom, leaking streams\n");
+ if (!eps)
break;
- }
}
eps[j++] = ep;
}
@@ -508,6 +510,10 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (dev->driver)
return -EBUSY;
+ /* reject claim if interface is not authorized */
+ if (!iface->authorized)
+ return -ENODEV;
+
udev = interface_to_usbdev(iface);
dev->driver = &driver->drvwrap.driver;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 4d64e5c499e1..1c102d60cd9f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -131,7 +131,7 @@ static inline int is_root_hub(struct usb_device *udev)
/* usb 3.0 root hub device descriptor */
static const u8 usb3_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
- 0x01, /* __u8 bDescriptorType; Device */
+ USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x00, 0x03, /* __le16 bcdUSB; v3.0 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
@@ -152,7 +152,7 @@ static const u8 usb3_rh_dev_descriptor[18] = {
/* usb 2.5 (wireless USB 1.0) root hub device descriptor */
static const u8 usb25_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
- 0x01, /* __u8 bDescriptorType; Device */
+ USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x50, 0x02, /* __le16 bcdUSB; v2.5 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
@@ -173,7 +173,7 @@ static const u8 usb25_rh_dev_descriptor[18] = {
/* usb 2.0 root hub device descriptor */
static const u8 usb2_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
- 0x01, /* __u8 bDescriptorType; Device */
+ USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x00, 0x02, /* __le16 bcdUSB; v2.0 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
@@ -196,7 +196,7 @@ static const u8 usb2_rh_dev_descriptor[18] = {
/* usb 1.1 root hub device descriptor */
static const u8 usb11_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
- 0x01, /* __u8 bDescriptorType; Device */
+ USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
0x10, 0x01, /* __le16 bcdUSB; v1.1 */
0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
@@ -223,7 +223,7 @@ static const u8 fs_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
- 0x02, /* __u8 bDescriptorType; Configuration */
+ USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
0x19, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
@@ -248,7 +248,7 @@ static const u8 fs_rh_config_descriptor[] = {
/* one interface */
0x09, /* __u8 if_bLength; */
- 0x04, /* __u8 if_bDescriptorType; Interface */
+ USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
0x00, /* __u8 if_bInterfaceNumber; */
0x00, /* __u8 if_bAlternateSetting; */
0x01, /* __u8 if_bNumEndpoints; */
@@ -259,7 +259,7 @@ static const u8 fs_rh_config_descriptor[] = {
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
- 0x05, /* __u8 ep_bDescriptorType; Endpoint */
+ USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
0x03, /* __u8 ep_bmAttributes; Interrupt */
0x02, 0x00, /* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8) */
@@ -270,7 +270,7 @@ static const u8 hs_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
- 0x02, /* __u8 bDescriptorType; Configuration */
+ USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
0x19, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
@@ -295,7 +295,7 @@ static const u8 hs_rh_config_descriptor[] = {
/* one interface */
0x09, /* __u8 if_bLength; */
- 0x04, /* __u8 if_bDescriptorType; Interface */
+ USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
0x00, /* __u8 if_bInterfaceNumber; */
0x00, /* __u8 if_bAlternateSetting; */
0x01, /* __u8 if_bNumEndpoints; */
@@ -306,7 +306,7 @@ static const u8 hs_rh_config_descriptor[] = {
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
- 0x05, /* __u8 ep_bDescriptorType; Endpoint */
+ USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
0x03, /* __u8 ep_bmAttributes; Interrupt */
/* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
@@ -318,7 +318,7 @@ static const u8 hs_rh_config_descriptor[] = {
static const u8 ss_rh_config_descriptor[] = {
/* one configuration */
0x09, /* __u8 bLength; */
- 0x02, /* __u8 bDescriptorType; Configuration */
+ USB_DT_CONFIG, /* __u8 bDescriptorType; Configuration */
0x1f, 0x00, /* __le16 wTotalLength; */
0x01, /* __u8 bNumInterfaces; (1) */
0x01, /* __u8 bConfigurationValue; */
@@ -332,7 +332,7 @@ static const u8 ss_rh_config_descriptor[] = {
/* one interface */
0x09, /* __u8 if_bLength; */
- 0x04, /* __u8 if_bDescriptorType; Interface */
+ USB_DT_INTERFACE, /* __u8 if_bDescriptorType; Interface */
0x00, /* __u8 if_bInterfaceNumber; */
0x00, /* __u8 if_bAlternateSetting; */
0x01, /* __u8 if_bNumEndpoints; */
@@ -343,7 +343,7 @@ static const u8 ss_rh_config_descriptor[] = {
/* one endpoint (status change endpoint) */
0x07, /* __u8 ep_bLength; */
- 0x05, /* __u8 ep_bDescriptorType; Endpoint */
+ USB_DT_ENDPOINT, /* __u8 ep_bDescriptorType; Endpoint */
0x81, /* __u8 ep_bEndpointAddress; IN Endpoint 1 */
0x03, /* __u8 ep_bmAttributes; Interrupt */
/* __le16 ep_wMaxPacketSize; 1 + (MAX_ROOT_PORTS / 8)
@@ -353,7 +353,8 @@ static const u8 ss_rh_config_descriptor[] = {
/* one SuperSpeed endpoint companion descriptor */
0x06, /* __u8 ss_bLength */
- 0x30, /* __u8 ss_bDescriptorType; SuperSpeed EP Companion */
+ USB_DT_SS_ENDPOINT_COMP, /* __u8 ss_bDescriptorType; SuperSpeed EP */
+ /* Companion */
0x00, /* __u8 ss_bMaxBurst; allows 1 TX between ACKs */
0x00, /* __u8 ss_bmAttributes; 1 packet per service interval */
0x02, 0x00 /* __le16 ss_wBytesPerInterval; 15 bits for max 15 ports */
@@ -555,6 +556,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
switch (wValue & 0xff00) {
case USB_DT_DEVICE << 8:
switch (hcd->speed) {
+ case HCD_USB31:
case HCD_USB3:
bufp = usb3_rh_dev_descriptor;
break;
@@ -576,6 +578,7 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
break;
case USB_DT_CONFIG << 8:
switch (hcd->speed) {
+ case HCD_USB31:
case HCD_USB3:
bufp = ss_rh_config_descriptor;
len = sizeof ss_rh_config_descriptor;
@@ -854,10 +857,10 @@ static ssize_t authorized_default_show(struct device *dev,
{
struct usb_device *rh_usb_dev = to_usb_device(dev);
struct usb_bus *usb_bus = rh_usb_dev->bus;
- struct usb_hcd *usb_hcd;
+ struct usb_hcd *hcd;
- usb_hcd = bus_to_hcd(usb_bus);
- return snprintf(buf, PAGE_SIZE, "%u\n", usb_hcd->authorized_default);
+ hcd = bus_to_hcd(usb_bus);
+ return snprintf(buf, PAGE_SIZE, "%u\n", !!HCD_DEV_AUTHORIZED(hcd));
}
static ssize_t authorized_default_store(struct device *dev,
@@ -868,12 +871,16 @@ static ssize_t authorized_default_store(struct device *dev,
unsigned val;
struct usb_device *rh_usb_dev = to_usb_device(dev);
struct usb_bus *usb_bus = rh_usb_dev->bus;
- struct usb_hcd *usb_hcd;
+ struct usb_hcd *hcd;
- usb_hcd = bus_to_hcd(usb_bus);
+ hcd = bus_to_hcd(usb_bus);
result = sscanf(buf, "%u\n", &val);
if (result == 1) {
- usb_hcd->authorized_default = val ? 1 : 0;
+ if (val)
+ set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+
result = size;
} else {
result = -EINVAL;
@@ -882,9 +889,53 @@ static ssize_t authorized_default_store(struct device *dev,
}
static DEVICE_ATTR_RW(authorized_default);
+/*
+ * interface_authorized_default_show - show default authorization status
+ * for USB interfaces
+ *
+ * note: interface_authorized_default is the default value
+ * for initializing the authorized attribute of interfaces
+ */
+static ssize_t interface_authorized_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_device *usb_dev = to_usb_device(dev);
+ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
+
+ return sprintf(buf, "%u\n", !!HCD_INTF_AUTHORIZED(hcd));
+}
+
+/*
+ * interface_authorized_default_store - store default authorization status
+ * for USB interfaces
+ *
+ * note: interface_authorized_default is the default value
+ * for initializing the authorized attribute of interfaces
+ */
+static ssize_t interface_authorized_default_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_device *usb_dev = to_usb_device(dev);
+ struct usb_hcd *hcd = bus_to_hcd(usb_dev->bus);
+ int rc = count;
+ bool val;
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ if (val)
+ set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(interface_authorized_default);
+
/* Group all the USB bus attributes */
static struct attribute *usb_bus_attrs[] = {
&dev_attr_authorized_default.attr,
+ &dev_attr_interface_authorized_default.attr,
NULL,
};
@@ -2676,12 +2727,22 @@ int usb_add_hcd(struct usb_hcd *hcd,
dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
/* Keep old behaviour if authorized_default is not in [0, 1]. */
- if (authorized_default < 0 || authorized_default > 1)
- hcd->authorized_default = hcd->wireless ? 0 : 1;
- else
- hcd->authorized_default = authorized_default;
+ if (authorized_default < 0 || authorized_default > 1) {
+ if (hcd->wireless)
+ clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+ else
+ set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+ } else {
+ if (authorized_default)
+ set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+ }
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ /* per default all interfaces are authorized */
+ set_bit(HCD_FLAG_INTF_AUTHORIZED, &hcd->flags);
+
/* HC is in reset state, but accessible. Now do the one-time init,
* bottom up so that hcds can customize the root hubs before hub_wq
* starts talking to them. (Note, bus id is assigned early too.)
@@ -2717,6 +2778,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
rhdev->speed = USB_SPEED_WIRELESS;
break;
case HCD_USB3:
+ case HCD_USB31:
rhdev->speed = USB_SPEED_SUPER;
break;
default:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 431839bd291f..bdeadc112d29 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1070,7 +1070,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
* for HUB_POST_RESET, but it's easier not to.
*/
if (type == HUB_INIT) {
- unsigned delay = hub_power_on_good_delay(hub);
+ delay = hub_power_on_good_delay(hub);
hub_power_on(hub, false);
INIT_DELAYED_WORK(&hub->init_work, hub_init_func2);
@@ -1404,7 +1404,6 @@ static int hub_configure(struct usb_hub *hub,
/* FIXME for USB 3.0, skip for now */
if ((wHubCharacteristics & HUB_CHAR_COMPOUND) &&
!(hub_is_superspeed(hdev))) {
- int i;
char portstr[USB_MAXCHILDREN + 1];
for (i = 0; i < maxchild; i++)
@@ -2240,39 +2239,49 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
&& udev->parent == udev->bus->root_hub) {
struct usb_otg_descriptor *desc = NULL;
struct usb_bus *bus = udev->bus;
+ unsigned port1 = udev->portnum;
/* descriptor may appear anywhere in config */
- if (__usb_get_extra_descriptor(udev->rawdescriptors[0],
- le16_to_cpu(udev->config[0].desc.wTotalLength),
- USB_DT_OTG, (void **) &desc) == 0) {
- if (desc->bmAttributes & USB_OTG_HNP) {
- unsigned port1 = udev->portnum;
+ err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
+ le16_to_cpu(udev->config[0].desc.wTotalLength),
+ USB_DT_OTG, (void **) &desc);
+ if (err || !(desc->bmAttributes & USB_OTG_HNP))
+ return 0;
- dev_info(&udev->dev,
- "Dual-Role OTG device on %sHNP port\n",
- (port1 == bus->otg_port)
- ? "" : "non-");
-
- /* enable HNP before suspend, it's simpler */
- if (port1 == bus->otg_port)
- bus->b_hnp_enable = 1;
- err = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_SET_FEATURE, 0,
- bus->b_hnp_enable
- ? USB_DEVICE_B_HNP_ENABLE
- : USB_DEVICE_A_ALT_HNP_SUPPORT,
- 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (err < 0) {
- /* OTG MESSAGE: report errors here,
- * customize to match your product.
- */
- dev_info(&udev->dev,
- "can't set HNP mode: %d\n",
- err);
- bus->b_hnp_enable = 0;
- }
+ dev_info(&udev->dev, "Dual-Role OTG device on %sHNP port\n",
+ (port1 == bus->otg_port) ? "" : "non-");
+
+ /* enable HNP before suspend, it's simpler */
+ if (port1 == bus->otg_port) {
+ bus->b_hnp_enable = 1;
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_B_HNP_ENABLE,
+ 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err < 0) {
+ /*
+ * OTG MESSAGE: report errors here,
+ * customize to match your product.
+ */
+ dev_err(&udev->dev, "can't set HNP mode: %d\n",
+ err);
+ bus->b_hnp_enable = 0;
}
+ } else if (desc->bLength == sizeof
+ (struct usb_otg_descriptor)) {
+ /* Set a_alt_hnp_support for legacy otg device */
+ err = usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, 0,
+ USB_DEVICE_A_ALT_HNP_SUPPORT,
+ 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (err < 0)
+ dev_err(&udev->dev,
+ "set a_alt_hnp_support failed: %d\n",
+ err);
}
}
#endif
@@ -4222,7 +4231,7 @@ static int hub_enable_device(struct usb_device *udev)
* but it is still necessary to lock the port.
*/
static int
-hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
int retry_counter)
{
struct usb_device *hdev = hub->hdev;
@@ -4526,7 +4535,7 @@ fail:
}
static void
-check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
+check_highspeed(struct usb_hub *hub, struct usb_device *udev, int port1)
{
struct usb_qualifier_descriptor *qual;
int status;
@@ -4534,11 +4543,11 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
return;
- qual = kmalloc (sizeof *qual, GFP_KERNEL);
+ qual = kmalloc(sizeof *qual, GFP_KERNEL);
if (qual == NULL)
return;
- status = usb_get_descriptor (udev, USB_DT_DEVICE_QUALIFIER, 0,
+ status = usb_get_descriptor(udev, USB_DT_DEVICE_QUALIFIER, 0,
qual, sizeof *qual);
if (status == sizeof *qual) {
dev_info(&udev->dev, "not running at top speed; "
@@ -4554,7 +4563,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
}
static unsigned
-hub_power_remaining (struct usb_hub *hub)
+hub_power_remaining(struct usb_hub *hub)
{
struct usb_device *hdev = hub->hdev;
int remaining;
@@ -4741,7 +4750,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0200
&& udev->speed == USB_SPEED_FULL
&& highspeed_hubs != 0)
- check_highspeed (hub, udev, port1);
+ check_highspeed(hub, udev, port1);
/* Store the parent's children[] pointer. At this point
* udev becomes globally accessible, although presumably
@@ -5115,7 +5124,7 @@ static const struct usb_device_id hub_id_table[] = {
{ } /* Terminating entry */
};
-MODULE_DEVICE_TABLE (usb, hub_id_table);
+MODULE_DEVICE_TABLE(usb, hub_id_table);
static struct usb_driver hub_driver = {
.name = "hub",
@@ -5227,7 +5236,7 @@ static int descriptors_changed(struct usb_device *udev,
changed = 1;
break;
}
- if (memcmp (buf, udev->rawdescriptors[index], old_length)
+ if (memcmp(buf, udev->rawdescriptors[index], old_length)
!= 0) {
dev_dbg(&udev->dev, "config index %d changed (#%d)\n",
index,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index f368d2053da5..8e641b5893ed 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1387,8 +1387,6 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
* new altsetting.
*/
if (manual) {
- int i;
-
for (i = 0; i < alt->desc.bNumEndpoints; i++) {
epaddr = alt->endpoint[i].desc.bEndpointAddress;
pipe = __create_pipe(dev,
@@ -1555,6 +1553,44 @@ static void usb_release_interface(struct device *dev)
kfree(intf);
}
+/*
+ * usb_deauthorize_interface - deauthorize an USB interface
+ *
+ * @intf: USB interface structure
+ */
+void usb_deauthorize_interface(struct usb_interface *intf)
+{
+ struct device *dev = &intf->dev;
+
+ device_lock(dev->parent);
+
+ if (intf->authorized) {
+ device_lock(dev);
+ intf->authorized = 0;
+ device_unlock(dev);
+
+ usb_forced_unbind_intf(intf);
+ }
+
+ device_unlock(dev->parent);
+}
+
+/*
+ * usb_authorize_interface - authorize an USB interface
+ *
+ * @intf: USB interface structure
+ */
+void usb_authorize_interface(struct usb_interface *intf)
+{
+ struct device *dev = &intf->dev;
+
+ if (!intf->authorized) {
+ device_lock(dev);
+ intf->authorized = 1; /* authorize interface */
+ device_unlock(dev);
+ }
+}
+
static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct usb_device *usb_dev;
@@ -1807,6 +1843,7 @@ free_interfaces:
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
+ intf->authorized = !!HCD_INTF_AUTHORIZED(hcd);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d85abfed84cc..f5a381945db2 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Logitech ConferenceCam CC3000e */
+ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
+
+ /* Logitech PTZ Pro Camera */
+ { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Philips PSC805 audio device */
{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Plantronic Audio 655 DSP */
+ { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* Plantronic Audio 648 USB */
+ { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Artisman Watchdog Dongle */
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index cfc68c11c3f5..d9ec2de6c4cf 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -957,6 +957,41 @@ static ssize_t supports_autosuspend_show(struct device *dev,
}
static DEVICE_ATTR_RO(supports_autosuspend);
+/*
+ * interface_authorized_show - show authorization status of an USB interface
+ * 1 is authorized, 0 is deauthorized
+ */
+static ssize_t interface_authorized_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+
+ return sprintf(buf, "%u\n", intf->authorized);
+}
+
+/*
+ * interface_authorized_store - authorize or deauthorize an USB interface
+ */
+static ssize_t interface_authorized_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ bool val;
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ if (val)
+ usb_authorize_interface(intf);
+ else
+ usb_deauthorize_interface(intf);
+
+ return count;
+}
+static struct device_attribute dev_attr_interface_authorized =
+ __ATTR(authorized, S_IRUGO | S_IWUSR,
+ interface_authorized_show, interface_authorized_store);
+
static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceNumber.attr,
&dev_attr_bAlternateSetting.attr,
@@ -966,6 +1001,7 @@ static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceProtocol.attr,
&dev_attr_modalias.attr,
&dev_attr_supports_autosuspend.attr,
+ &dev_attr_interface_authorized.attr,
NULL,
};
static struct attribute_group intf_attr_grp = {
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 8d5b2f4113cd..f8bbd0b6d9fe 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -510,7 +510,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
if (root_hub) /* Root hub always ok [and always wired] */
dev->authorized = 1;
else {
- dev->authorized = usb_hcd->authorized_default;
+ dev->authorized = !!HCD_DEV_AUTHORIZED(usb_hcd);
dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
}
return dev;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 457255a3306a..05b5e17abf92 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -27,6 +27,8 @@ extern void usb_release_interface_cache(struct kref *ref);
extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
extern int usb_deauthorize_device(struct usb_device *);
extern int usb_authorize_device(struct usb_device *);
+extern void usb_deauthorize_interface(struct usb_interface *);
+extern void usb_authorize_interface(struct usb_interface *);
extern void usb_detect_quirks(struct usb_device *udev);
extern void usb_detect_interface_quirks(struct usb_device *udev);
extern int usb_remove_device(struct usb_device *udev);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index d1b81539d632..d6199507f861 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep)
bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool,
GFP_ATOMIC,
&dma);
- if (!bd_table->start_bd)
+ if (!bd_table->start_bd) {
+ kfree(bd_table);
goto fail;
+ }
bd_table->dma = dma;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 079991e283e9..3bb08870148f 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -348,16 +348,6 @@ config USB_ISP1362_HCD
To compile this driver as a module, choose M here: the
module will be called isp1362-hcd.
-config USB_FUSBH200_HCD
- tristate "FUSBH200 HCD support"
- depends on USB
- ---help---
- Faraday FUSBH200 is designed to meet USB2.0 EHCI specification
- with minor modification.
-
- To compile this driver as a module, choose M here: the
- module will be called fusbh200-hcd.
-
config USB_FOTG210_HCD
tristate "FOTG210 HCD support"
depends on USB
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 754efaa8ccf8..e7558abc994d 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -28,9 +28,6 @@ ifneq ($(CONFIG_USB), )
obj-$(CONFIG_PCI) += pci-quirks.o
endif
-obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
-obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
-
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o
obj-$(CONFIG_USB_EHCI_HCD_PLATFORM) += ehci-platform.o
@@ -65,6 +62,8 @@ obj-$(CONFIG_USB_OHCI_HCD_PXA27X) += ohci-pxa27x.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o
obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
+obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
+obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
@@ -75,6 +74,5 @@ obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o
obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
-obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 275c92e53a59..c4f84c81de01 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -80,12 +80,12 @@ static int ehci_msm_probe(struct platform_device *pdev)
return -ENOMEM;
}
- hcd->irq = platform_get_irq(pdev, 0);
- if (hcd->irq < 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
dev_err(&pdev->dev, "Unable to get IRQ resource\n");
- ret = hcd->irq;
goto put_hcd;
}
+ hcd->irq = ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index bfcbb9aa8816..ee8d5faa0194 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -224,7 +224,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
if (IS_ERR(priv->phy)) {
err = PTR_ERR(priv->phy);
- goto err_phy_get;
+ if (err != -ENOSYS)
+ goto err_phy_get;
} else {
err = phy_init(priv->phy);
if (err)
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 5c3c08598682..bd7082f297bb 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -19,6 +19,7 @@
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
+#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -162,8 +163,10 @@ static int ehci_platform_probe(struct platform_device *dev)
err = dma_coerce_mask_and_coherent(&dev->dev,
pdata->dma_mask_64 ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
- if (err)
+ if (err) {
+ dev_err(&dev->dev, "Error: DMA mask configuration failed\n");
return err;
+ }
irq = platform_get_irq(dev, 0);
if (irq < 0) {
@@ -385,6 +388,12 @@ static const struct of_device_id vt8500_ehci_ids[] = {
};
MODULE_DEVICE_TABLE(of, vt8500_ehci_ids);
+static const struct acpi_device_id ehci_acpi_match[] = {
+ { "PNP0D20", 0 }, /* EHCI controller without debug */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ehci_acpi_match);
+
static const struct platform_device_id ehci_platform_table[] = {
{ "ehci-platform", 0 },
{ }
@@ -403,6 +412,7 @@ static struct platform_driver ehci_platform_driver = {
.name = "ehci-platform",
.pm = &ehci_platform_pm_ops,
.of_match_table = vt8500_ehci_ids,
+ .acpi_match_table = ACPI_PTR(ehci_acpi_match),
}
};
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 34e14746b92e..3c4e5253955c 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -149,6 +149,7 @@ static const struct of_device_id spear_ehci_id_table[] = {
{ .compatible = "st,spear600-ehci", },
{ },
};
+MODULE_DEVICE_TABLE(of, spear_ehci_id_table);
static struct platform_driver spear_ehci_hcd_driver = {
.probe = spear_ehci_hcd_drv_probe,
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 000ed80ab592..787f4e3d16d8 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1,5 +1,4 @@
-/*
- * Faraday FOTG210 EHCI-like driver
+/* Faraday FOTG210 EHCI-like driver
*
* Copyright (c) 2013 Faraday Technology Corporation
*
@@ -50,32 +49,29 @@
#include <asm/irq.h>
#include <asm/unaligned.h>
-/*-------------------------------------------------------------------------*/
#define DRIVER_AUTHOR "Yuan-Hsin Chen"
#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver"
-
-static const char hcd_name[] = "fotg210_hcd";
+static const char hcd_name[] = "fotg210_hcd";
#undef FOTG210_URB_TRACE
-
#define FOTG210_STATS
/* magic numbers that can affect system performance */
-#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
-#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
-#define FOTG210_TUNE_RL_TT 0
-#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
-#define FOTG210_TUNE_MULT_TT 1
-/*
- * Some drivers think it's safe to schedule isochronous transfers more than
- * 256 ms into the future (partly as a result of an old bug in the scheduling
+#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define FOTG210_TUNE_RL_TT 0
+#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define FOTG210_TUNE_MULT_TT 1
+
+/* Some drivers think it's safe to schedule isochronous transfers more than 256
+ * ms into the future (partly as a result of an old bug in the scheduling
* code). In an attempt to avoid trouble, we will use a minimum scheduling
* length of 512 frames instead of 256.
*/
-#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
+#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
/* Initial IRQ latency: faster than hw default */
-static int log2_irq_thresh; /* 0 to 6 */
+static int log2_irq_thresh; /* 0 to 6 */
module_param(log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
@@ -89,66 +85,57 @@ static unsigned int hird;
module_param(hird, int, S_IRUGO);
MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
-#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
#include "fotg210.h"
-/*-------------------------------------------------------------------------*/
-
#define fotg210_dbg(fotg210, fmt, args...) \
- dev_dbg(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+ dev_dbg(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
#define fotg210_err(fotg210, fmt, args...) \
- dev_err(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+ dev_err(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
#define fotg210_info(fotg210, fmt, args...) \
- dev_info(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+ dev_info(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
#define fotg210_warn(fotg210, fmt, args...) \
- dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+ dev_warn(fotg210_to_hcd(fotg210)->self.controller, fmt, ## args)
-/* check the values in the HCSPARAMS register
- * (host controller _Structural_ parameters)
- * see EHCI spec, Table 2-4 for each value
+/* check the values in the HCSPARAMS register (host controller _Structural_
+ * parameters) see EHCI spec, Table 2-4 for each value
*/
static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
{
- u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
- fotg210_dbg(fotg210,
- "%s hcs_params 0x%x ports=%d\n",
- label, params,
- HCS_N_PORTS(params)
- );
+ fotg210_dbg(fotg210, "%s hcs_params 0x%x ports=%d\n", label, params,
+ HCS_N_PORTS(params));
}
-/* check the values in the HCCPARAMS register
- * (host controller _Capability_ parameters)
- * see EHCI Spec, Table 2-5 for each value
- * */
+/* check the values in the HCCPARAMS register (host controller _Capability_
+ * parameters) see EHCI Spec, Table 2-5 for each value
+ */
static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
{
- u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
- fotg210_dbg(fotg210,
- "%s hcc_params %04x uframes %s%s\n",
- label,
- params,
- HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
- HCC_CANPARK(params) ? " park" : "");
+ fotg210_dbg(fotg210, "%s hcc_params %04x uframes %s%s\n", label,
+ params,
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "");
}
static void __maybe_unused
dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
{
fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
- hc32_to_cpup(fotg210, &qtd->hw_next),
- hc32_to_cpup(fotg210, &qtd->hw_alt_next),
- hc32_to_cpup(fotg210, &qtd->hw_token),
- hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
+ hc32_to_cpup(fotg210, &qtd->hw_next),
+ hc32_to_cpup(fotg210, &qtd->hw_alt_next),
+ hc32_to_cpup(fotg210, &qtd->hw_token),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
if (qtd->hw_buf[1])
fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
- hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
- hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
- hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
- hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
+ hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
}
static void __maybe_unused
@@ -156,101 +143,100 @@ dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
struct fotg210_qh_hw *hw = qh->hw;
- fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label,
- qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label, qh,
+ hw->hw_next, hw->hw_info1, hw->hw_info2,
+ hw->hw_current);
+
dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
}
static void __maybe_unused
dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
{
- fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n",
- label, itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
- itd->urb);
+ fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n", label,
+ itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
+ itd->urb);
+
fotg210_dbg(fotg210,
- " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- hc32_to_cpu(fotg210, itd->hw_transaction[0]),
- hc32_to_cpu(fotg210, itd->hw_transaction[1]),
- hc32_to_cpu(fotg210, itd->hw_transaction[2]),
- hc32_to_cpu(fotg210, itd->hw_transaction[3]),
- hc32_to_cpu(fotg210, itd->hw_transaction[4]),
- hc32_to_cpu(fotg210, itd->hw_transaction[5]),
- hc32_to_cpu(fotg210, itd->hw_transaction[6]),
- hc32_to_cpu(fotg210, itd->hw_transaction[7]));
+ " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_transaction[0]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[1]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[2]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[3]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[4]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[5]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[6]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[7]));
+
fotg210_dbg(fotg210,
- " buf: %08x %08x %08x %08x %08x %08x %08x\n",
- hc32_to_cpu(fotg210, itd->hw_bufp[0]),
- hc32_to_cpu(fotg210, itd->hw_bufp[1]),
- hc32_to_cpu(fotg210, itd->hw_bufp[2]),
- hc32_to_cpu(fotg210, itd->hw_bufp[3]),
- hc32_to_cpu(fotg210, itd->hw_bufp[4]),
- hc32_to_cpu(fotg210, itd->hw_bufp[5]),
- hc32_to_cpu(fotg210, itd->hw_bufp[6]));
+ " buf: %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_bufp[0]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[1]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[2]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[3]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[4]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[5]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[6]));
+
fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n",
- itd->index[0], itd->index[1], itd->index[2],
- itd->index[3], itd->index[4], itd->index[5],
- itd->index[6], itd->index[7]);
+ itd->index[0], itd->index[1], itd->index[2],
+ itd->index[3], itd->index[4], itd->index[5],
+ itd->index[6], itd->index[7]);
}
static int __maybe_unused
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
- return scnprintf(buf, len,
- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
- label, label[0] ? " " : "", status,
- (status & STS_ASS) ? " Async" : "",
- (status & STS_PSS) ? " Periodic" : "",
- (status & STS_RECL) ? " Recl" : "",
- (status & STS_HALT) ? " Halt" : "",
- (status & STS_IAA) ? " IAA" : "",
- (status & STS_FATAL) ? " FATAL" : "",
- (status & STS_FLR) ? " FLR" : "",
- (status & STS_PCD) ? " PCD" : "",
- (status & STS_ERR) ? " ERR" : "",
- (status & STS_INT) ? " INT" : ""
- );
+ return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", status,
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+ (status & STS_HALT) ? " Halt" : "",
+ (status & STS_IAA) ? " IAA" : "",
+ (status & STS_FATAL) ? " FATAL" : "",
+ (status & STS_FLR) ? " FLR" : "",
+ (status & STS_PCD) ? " PCD" : "",
+ (status & STS_ERR) ? " ERR" : "",
+ (status & STS_INT) ? " INT" : "");
}
static int __maybe_unused
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
- return scnprintf(buf, len,
- "%s%sintrenable %02x%s%s%s%s%s%s",
- label, label[0] ? " " : "", enable,
- (enable & STS_IAA) ? " IAA" : "",
- (enable & STS_FATAL) ? " FATAL" : "",
- (enable & STS_FLR) ? " FLR" : "",
- (enable & STS_PCD) ? " PCD" : "",
- (enable & STS_ERR) ? " ERR" : "",
- (enable & STS_INT) ? " INT" : ""
- );
+ return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
+ label, label[0] ? " " : "", enable,
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+ (enable & STS_PCD) ? " PCD" : "",
+ (enable & STS_ERR) ? " ERR" : "",
+ (enable & STS_INT) ? " INT" : "");
}
static const char *const fls_strings[] = { "1024", "512", "256", "??" };
-static int
-dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+static int dbg_command_buf(char *buf, unsigned len, const char *label,
+ u32 command)
{
return scnprintf(buf, len,
- "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
- "period=%s%s %s",
- label, label[0] ? " " : "", command,
- (command & CMD_PARK) ? " park" : "(park)",
- CMD_PARK_CNT(command),
- (command >> 16) & 0x3f,
- (command & CMD_IAAD) ? " IAAD" : "",
- (command & CMD_ASE) ? " Async" : "",
- (command & CMD_PSE) ? " Periodic" : "",
- fls_strings[(command >> 2) & 0x3],
- (command & CMD_RESET) ? " Reset" : "",
- (command & CMD_RUN) ? "RUN" : "HALT"
- );
-}
-
-static char
-*dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
-{
- char *sig;
+ "%s%scommand %07x %s=%d ithresh=%d%s%s%s period=%s%s %s",
+ label, label[0] ? " " : "", command,
+ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT(command),
+ (command >> 16) & 0x3f,
+ (command & CMD_IAAD) ? " IAAD" : "",
+ (command & CMD_ASE) ? " Async" : "",
+ (command & CMD_PSE) ? " Periodic" : "",
+ fls_strings[(command >> 2) & 0x3],
+ (command & CMD_RESET) ? " Reset" : "",
+ (command & CMD_RUN) ? "RUN" : "HALT");
+}
+
+static char *dbg_port_buf(char *buf, unsigned len, const char *label, int port,
+ u32 status)
+{
+ char *sig;
/* signaling state */
switch (status & (3 << 10)) {
@@ -268,44 +254,41 @@ static char
break;
}
- scnprintf(buf, len,
- "%s%sport:%d status %06x %d "
- "sig=%s%s%s%s%s%s%s%s",
- label, label[0] ? " " : "", port, status,
- status>>25,/*device address */
- sig,
- (status & PORT_RESET) ? " RESET" : "",
- (status & PORT_SUSPEND) ? " SUSPEND" : "",
- (status & PORT_RESUME) ? " RESUME" : "",
- (status & PORT_PEC) ? " PEC" : "",
- (status & PORT_PE) ? " PE" : "",
- (status & PORT_CSC) ? " CSC" : "",
- (status & PORT_CONNECT) ? " CONNECT" : "");
+ scnprintf(buf, len, "%s%sport:%d status %06x %d sig=%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", port, status,
+ status >> 25, /*device address */
+ sig,
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+ (status & PORT_PEC) ? " PEC" : "",
+ (status & PORT_PE) ? " PE" : "",
+ (status & PORT_CSC) ? " CSC" : "",
+ (status & PORT_CONNECT) ? " CONNECT" : "");
+
return buf;
}
/* functions have the "wrong" filename when they're output... */
-#define dbg_status(fotg210, label, status) { \
- char _buf[80]; \
- dbg_status_buf(_buf, sizeof(_buf), label, status); \
- fotg210_dbg(fotg210, "%s\n", _buf); \
+#define dbg_status(fotg210, label, status) { \
+ char _buf[80]; \
+ dbg_status_buf(_buf, sizeof(_buf), label, status); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
}
-#define dbg_cmd(fotg210, label, command) { \
- char _buf[80]; \
- dbg_command_buf(_buf, sizeof(_buf), label, command); \
- fotg210_dbg(fotg210, "%s\n", _buf); \
+#define dbg_cmd(fotg210, label, command) { \
+ char _buf[80]; \
+ dbg_command_buf(_buf, sizeof(_buf), label, command); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
}
-#define dbg_port(fotg210, label, port, status) { \
- char _buf[80]; \
- fotg210_dbg(fotg210, "%s\n", dbg_port_buf(_buf, sizeof(_buf), label, port, status) ); \
+#define dbg_port(fotg210, label, port, status) { \
+ char _buf[80]; \
+ fotg210_dbg(fotg210, "%s\n", \
+ dbg_port_buf(_buf, sizeof(_buf), label, port, status));\
}
-/*-------------------------------------------------------------------------*/
-
/* troubleshooting help: expose state in debugfs */
-
static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
@@ -347,17 +330,22 @@ struct debug_buffer {
size_t alloc_size;
};
-#define speed_char(info1)({ char tmp; \
- switch (info1 & (3 << 12)) { \
- case QH_FULL_SPEED: \
- tmp = 'f'; break; \
- case QH_LOW_SPEED: \
- tmp = 'l'; break; \
- case QH_HIGH_SPEED: \
- tmp = 'h'; break; \
- default: \
- tmp = '?'; break; \
- } tmp; })
+static inline char speed_char(u32 scratch)
+{
+ switch (scratch & (3 << 12)) {
+ case QH_FULL_SPEED:
+ return 'f';
+
+ case QH_LOW_SPEED:
+ return 'l';
+
+ case QH_HIGH_SPEED:
+ return 'h';
+
+ default:
+ return '?';
+ }
+}
static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
{
@@ -373,33 +361,29 @@ static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
return '/';
}
-static void qh_lines(
- struct fotg210_hcd *fotg210,
- struct fotg210_qh *qh,
- char **nextp,
- unsigned *sizep
-)
-{
- u32 scratch;
- u32 hw_curr;
- struct fotg210_qtd *td;
- unsigned temp;
- unsigned size = *sizep;
- char *next = *nextp;
- char mark;
- __le32 list_end = FOTG210_LIST_END(fotg210);
- struct fotg210_qh_hw *hw = qh->hw;
-
- if (hw->hw_qtd_next == list_end) /* NEC does this */
+static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ char **nextp, unsigned *sizep)
+{
+ u32 scratch;
+ u32 hw_curr;
+ struct fotg210_qtd *td;
+ unsigned temp;
+ unsigned size = *sizep;
+ char *next = *nextp;
+ char mark;
+ __le32 list_end = FOTG210_LIST_END(fotg210);
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
mark = '@';
else
mark = token_mark(fotg210, hw->hw_token);
- if (mark == '/') { /* qh_alt_next controls qh advance? */
- if ((hw->hw_alt_next & QTD_MASK(fotg210))
- == fotg210->async->hw->hw_alt_next)
- mark = '#'; /* blocked */
+ if (mark == '/') { /* qh_alt_next controls qh advance? */
+ if ((hw->hw_alt_next & QTD_MASK(fotg210)) ==
+ fotg210->async->hw->hw_alt_next)
+ mark = '#'; /* blocked */
else if (hw->hw_alt_next == list_end)
- mark = '.'; /* use hw_qtd_next */
+ mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
@@ -462,6 +446,7 @@ static void qh_lines(
temp = snprintf(next, size, "\n");
if (size < temp)
temp = size;
+
size -= temp;
next += temp;
@@ -472,12 +457,12 @@ done:
static ssize_t fill_async_buffer(struct debug_buffer *buf)
{
- struct usb_hcd *hcd;
- struct fotg210_hcd *fotg210;
- unsigned long flags;
- unsigned temp, size;
- char *next;
- struct fotg210_qh *qh;
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size;
+ char *next;
+ struct fotg210_qh *qh;
hcd = bus_to_hcd(buf->bus);
fotg210 = hcd_to_fotg210(hcd);
@@ -492,7 +477,7 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
*/
spin_lock_irqsave(&fotg210->lock, flags);
for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
- qh = qh->qh_next.qh)
+ qh = qh->qh_next.qh)
qh_lines(fotg210, qh, &next, &size);
if (fotg210->async_unlink && size > 0) {
temp = scnprintf(next, size, "\nunlink =\n");
@@ -508,21 +493,50 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
return strlen(buf->output_buf);
}
+/* count tds, get ep direction */
+static unsigned output_buf_tds_dir(char *buf, struct fotg210_hcd *fotg210,
+ struct fotg210_qh_hw *hw, struct fotg210_qh *qh, unsigned size)
+{
+ u32 scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
+ struct fotg210_qtd *qtd;
+ char *type = "";
+ unsigned temp = 0;
+
+ /* count tds, get ep direction */
+ list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
+ temp++;
+ switch ((hc32_to_cpu(fotg210, qtd->hw_token) >> 8) & 0x03) {
+ case 0:
+ type = "out";
+ continue;
+ case 1:
+ type = "in";
+ continue;
+ }
+ }
+
+ return scnprintf(buf, size, "(%c%d ep%d%s [%d/%d] q%d p%d)",
+ speed_char(scratch), scratch & 0x007f,
+ (scratch >> 8) & 0x000f, type, qh->usecs,
+ qh->c_usecs, temp, (scratch >> 16) & 0x7ff);
+}
+
#define DBG_SCHED_LIMIT 64
static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
{
- struct usb_hcd *hcd;
- struct fotg210_hcd *fotg210;
- unsigned long flags;
- union fotg210_shadow p, *seen;
- unsigned temp, size, seen_count;
- char *next;
- unsigned i;
- __hc32 tag;
-
- seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC);
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ union fotg210_shadow p, *seen;
+ unsigned temp, size, seen_count;
+ char *next;
+ unsigned i;
+ __hc32 tag;
+
+ seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
if (!seen)
return 0;
+
seen_count = 0;
hcd = bus_to_hcd(buf->bus);
@@ -542,6 +556,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
p = fotg210->pshadow[i];
if (likely(!p.ptr))
continue;
+
tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
temp = scnprintf(next, size, "%4d: ", i);
@@ -569,7 +584,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
continue;
if (p.qh->qh_next.ptr) {
temp = scnprintf(next, size,
- " ...");
+ " ...");
size -= temp;
next += temp;
}
@@ -577,38 +592,9 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
}
/* show more info the first time around */
if (temp == seen_count) {
- u32 scratch = hc32_to_cpup(fotg210,
- &hw->hw_info1);
- struct fotg210_qtd *qtd;
- char *type = "";
-
- /* count tds, get ep direction */
- temp = 0;
- list_for_each_entry(qtd,
- &p.qh->qtd_list,
- qtd_list) {
- temp++;
- switch (0x03 & (hc32_to_cpu(
- fotg210,
- qtd->hw_token) >> 8)) {
- case 0:
- type = "out";
- continue;
- case 1:
- type = "in";
- continue;
- }
- }
-
- temp = scnprintf(next, size,
- "(%c%d ep%d%s "
- "[%d/%d] q%d p%d)",
- speed_char(scratch),
- scratch & 0x007f,
- (scratch >> 8) & 0x000f, type,
- p.qh->usecs, p.qh->c_usecs,
- temp,
- 0x7ff & (scratch >> 16));
+ temp = output_buf_tds_dir(next,
+ fotg210, hw,
+ p.qh, size);
if (seen_count < DBG_SCHED_LIMIT)
seen[seen_count++].qh = p.qh;
@@ -619,14 +605,14 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
break;
case Q_TYPE_FSTN:
temp = scnprintf(next, size,
- " fstn-%8x/%p", p.fstn->hw_prev,
- p.fstn);
+ " fstn-%8x/%p",
+ p.fstn->hw_prev, p.fstn);
tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
p = p.fstn->fstn_next;
break;
case Q_TYPE_ITD:
temp = scnprintf(next, size,
- " itd/%p", p.itd);
+ " itd/%p", p.itd);
tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
p = p.itd->itd_next;
break;
@@ -663,13 +649,13 @@ static const char *rh_state_string(struct fotg210_hcd *fotg210)
static ssize_t fill_registers_buffer(struct debug_buffer *buf)
{
- struct usb_hcd *hcd;
- struct fotg210_hcd *fotg210;
- unsigned long flags;
- unsigned temp, size, i;
- char *next, scratch[80];
- static const char fmt[] = "%*s\n";
- static const char label[] = "";
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size, i;
+ char *next, scratch[80];
+ static const char fmt[] = "%*s\n";
+ static const char label[] = "";
hcd = bus_to_hcd(buf->bus);
fotg210 = hcd_to_fotg210(hcd);
@@ -680,26 +666,26 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
if (!HCD_HW_ACCESSIBLE(hcd)) {
size = scnprintf(next, size,
- "bus %s, device %s\n"
- "%s\n"
- "SUSPENDED(no register access)\n",
- hcd->self.controller->bus->name,
- dev_name(hcd->self.controller),
- hcd->product_desc);
+ "bus %s, device %s\n"
+ "%s\n"
+ "SUSPENDED(no register access)\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc);
goto done;
}
/* Capability Registers */
i = HC_VERSION(fotg210, fotg210_readl(fotg210,
- &fotg210->caps->hc_capbase));
+ &fotg210->caps->hc_capbase));
temp = scnprintf(next, size,
- "bus %s, device %s\n"
- "%s\n"
- "EHCI %x.%02x, rh state %s\n",
- hcd->self.controller->bus->name,
- dev_name(hcd->self.controller),
- hcd->product_desc,
- i >> 8, i & 0x0ff, rh_state_string(fotg210));
+ "bus %s, device %s\n"
+ "%s\n"
+ "EHCI %x.%02x, rh state %s\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc,
+ i >> 8, i & 0x0ff, rh_state_string(fotg210));
size -= temp;
next += temp;
@@ -747,14 +733,14 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
#ifdef FOTG210_STATS
temp = scnprintf(next, size,
- "irq normal %ld err %ld iaa %ld(lost %ld)\n",
- fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
- fotg210->stats.lost_iaa);
+ "irq normal %ld err %ld iaa %ld(lost %ld)\n",
+ fotg210->stats.normal, fotg210->stats.error,
+ fotg210->stats.iaa, fotg210->stats.lost_iaa);
size -= temp;
next += temp;
temp = scnprintf(next, size, "complete %ld unlink %ld\n",
- fotg210->stats.complete, fotg210->stats.unlink);
+ fotg210->stats.complete, fotg210->stats.unlink);
size -= temp;
next += temp;
#endif
@@ -765,8 +751,8 @@ done:
return buf->alloc_size - size;
}
-static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
- ssize_t (*fill_func)(struct debug_buffer *))
+static struct debug_buffer
+*alloc_buffer(struct usb_bus *bus, ssize_t (*fill_func)(struct debug_buffer *))
{
struct debug_buffer *buf;
@@ -806,7 +792,7 @@ out:
}
static ssize_t debug_output(struct file *file, char __user *user_buf,
- size_t len, loff_t *offset)
+ size_t len, loff_t *offset)
{
struct debug_buffer *buf = file->private_data;
int ret = 0;
@@ -822,7 +808,7 @@ static ssize_t debug_output(struct file *file, char __user *user_buf,
mutex_unlock(&buf->mutex);
ret = simple_read_from_buffer(user_buf, len, offset,
- buf->output_buf, buf->count);
+ buf->output_buf, buf->count);
out:
return ret;
@@ -850,6 +836,7 @@ static int debug_async_open(struct inode *inode, struct file *file)
static int debug_periodic_open(struct inode *inode, struct file *file)
{
struct debug_buffer *buf;
+
buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
if (!buf)
return -ENOMEM;
@@ -862,7 +849,7 @@ static int debug_periodic_open(struct inode *inode, struct file *file)
static int debug_registers_open(struct inode *inode, struct file *file)
{
file->private_data = alloc_buffer(inode->i_private,
- fill_registers_buffer);
+ fill_registers_buffer);
return file->private_data ? 0 : -ENOMEM;
}
@@ -872,20 +859,20 @@ static inline void create_debug_files(struct fotg210_hcd *fotg210)
struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
fotg210->debug_dir = debugfs_create_dir(bus->bus_name,
- fotg210_debug_root);
+ fotg210_debug_root);
if (!fotg210->debug_dir)
return;
if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus,
- &debug_async_fops))
+ &debug_async_fops))
goto file_error;
if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus,
- &debug_periodic_fops))
+ &debug_periodic_fops))
goto file_error;
if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus,
- &debug_registers_fops))
+ &debug_registers_fops))
goto file_error;
return;
@@ -899,10 +886,7 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210)
debugfs_remove_recursive(fotg210->debug_dir);
}
-/*-------------------------------------------------------------------------*/
-
-/*
- * handshake - spin reading hc until handshake completes or fails
+/* handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
@@ -919,9 +903,9 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210)
* bridge shutdown: shutting down the bridge before the devices using it.
*/
static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
- u32 mask, u32 done, int usec)
+ u32 mask, u32 done, int usec)
{
- u32 result;
+ u32 result;
do {
result = fotg210_readl(fotg210, ptr);
@@ -936,13 +920,12 @@ static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
return -ETIMEDOUT;
}
-/*
- * Force HC to halt state from unknown (EHCI spec section 2.3).
+/* Force HC to halt state from unknown (EHCI spec section 2.3).
* Must be called with interrupts enabled and the lock not held.
*/
static int fotg210_halt(struct fotg210_hcd *fotg210)
{
- u32 temp;
+ u32 temp;
spin_lock_irq(&fotg210->lock);
@@ -962,20 +945,20 @@ static int fotg210_halt(struct fotg210_hcd *fotg210)
synchronize_irq(fotg210_to_hcd(fotg210)->irq);
return handshake(fotg210, &fotg210->regs->status,
- STS_HALT, STS_HALT, 16 * 125);
+ STS_HALT, STS_HALT, 16 * 125);
}
-/*
- * Reset a non-running (STS_HALT == 1) controller.
+/* Reset a non-running (STS_HALT == 1) controller.
* Must be called with interrupts enabled and the lock not held.
*/
static int fotg210_reset(struct fotg210_hcd *fotg210)
{
- int retval;
- u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
+ int retval;
+ u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
/* If the EHCI debug controller is active, special care must be
- * taken before and after a host controller reset */
+ * taken before and after a host controller reset
+ */
if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
fotg210->debug = NULL;
@@ -985,7 +968,7 @@ static int fotg210_reset(struct fotg210_hcd *fotg210)
fotg210->rh_state = FOTG210_RH_HALTED;
fotg210->next_statechange = jiffies;
retval = handshake(fotg210, &fotg210->regs->command,
- CMD_RESET, 0, 250 * 1000);
+ CMD_RESET, 0, 250 * 1000);
if (retval)
return retval;
@@ -998,13 +981,12 @@ static int fotg210_reset(struct fotg210_hcd *fotg210)
return retval;
}
-/*
- * Idle the controller (turn off the schedules).
+/* Idle the controller (turn off the schedules).
* Must be called with interrupts enabled and the lock not held.
*/
static void fotg210_quiesce(struct fotg210_hcd *fotg210)
{
- u32 temp;
+ u32 temp;
if (fotg210->rh_state != FOTG210_RH_RUNNING)
return;
@@ -1012,7 +994,7 @@ static void fotg210_quiesce(struct fotg210_hcd *fotg210)
/* wait for any schedule enables/disables to take effect */
temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
- 16 * 125);
+ 16 * 125);
/* then disable anything that's still active */
spin_lock_irq(&fotg210->lock);
@@ -1022,11 +1004,9 @@ static void fotg210_quiesce(struct fotg210_hcd *fotg210)
/* hardware can take 16 microframes to turn off ... */
handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
- 16 * 125);
+ 16 * 125);
}
-/*-------------------------------------------------------------------------*/
-
static void end_unlink_async(struct fotg210_hcd *fotg210);
static void unlink_empty_async(struct fotg210_hcd *fotg210);
static void fotg210_work(struct fotg210_hcd *fotg210);
@@ -1034,8 +1014,6 @@ static void start_unlink_intr(struct fotg210_hcd *fotg210,
struct fotg210_qh *qh);
static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
-/*-------------------------------------------------------------------------*/
-
/* Set a bit in the USBCMD register */
static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
{
@@ -1056,10 +1034,7 @@ static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
fotg210_readl(fotg210, &fotg210->regs->command);
}
-/*-------------------------------------------------------------------------*/
-
-/*
- * EHCI timer support... Now using hrtimers.
+/* EHCI timer support... Now using hrtimers.
*
* Lots of different events are triggered from fotg210->hrtimer. Whenever
* the timer routine runs, it checks each possible event; events that are
@@ -1081,8 +1056,7 @@ static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
* allow for an expiration range of 1 ms.
*/
-/*
- * Delay lengths for the hrtimer event types.
+/* Delay lengths for the hrtimer event types.
* Keep this list sorted by delay length, in the same order as
* the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
*/
@@ -1103,7 +1077,7 @@ static unsigned event_delays_ns[] = {
static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
bool resched)
{
- ktime_t *timeout = &fotg210->hr_timeouts[event];
+ ktime_t *timeout = &fotg210->hr_timeouts[event];
if (resched)
*timeout = ktime_add(ktime_get(),
@@ -1122,7 +1096,7 @@ static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
{
- unsigned actual, want;
+ unsigned actual, want;
/* Don't enable anything if the controller isn't running (e.g., died) */
if (fotg210->rh_state != FOTG210_RH_RUNNING)
@@ -1136,7 +1110,7 @@ static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
/* Poll again later, but give up after about 20 ms */
if (fotg210->ASS_poll_count++ < 20) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
- true);
+ true);
return;
}
fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
@@ -1154,8 +1128,8 @@ static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
/* Turn off the schedule after a while */
fotg210_enable_event(fotg210,
- FOTG210_HRTIMER_DISABLE_ASYNC,
- true);
+ FOTG210_HRTIMER_DISABLE_ASYNC,
+ true);
}
}
}
@@ -1170,7 +1144,7 @@ static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
{
- unsigned actual, want;
+ unsigned actual, want;
/* Don't do anything if the controller isn't running (e.g., died) */
if (fotg210->rh_state != FOTG210_RH_RUNNING)
@@ -1184,7 +1158,7 @@ static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
/* Poll again later, but give up after about 20 ms */
if (fotg210->PSS_poll_count++ < 20) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
- true);
+ true);
return;
}
fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
@@ -1202,8 +1176,8 @@ static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
/* Turn off the schedule after a while */
fotg210_enable_event(fotg210,
- FOTG210_HRTIMER_DISABLE_PERIODIC,
- true);
+ FOTG210_HRTIMER_DISABLE_PERIODIC,
+ true);
}
}
}
@@ -1224,7 +1198,7 @@ static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
if (fotg210->died_poll_count++ < 5) {
/* Try again later */
fotg210_enable_event(fotg210,
- FOTG210_HRTIMER_POLL_DEAD, true);
+ FOTG210_HRTIMER_POLL_DEAD, true);
return;
}
fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
@@ -1243,7 +1217,7 @@ static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
/* Handle unlinked interrupt QHs once they are gone from the hardware */
static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
{
- bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+ bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
/*
* Process all the QHs on the intr_unlink list that were added
@@ -1254,7 +1228,7 @@ static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
*/
fotg210->intr_unlinking = true;
while (fotg210->intr_unlink) {
- struct fotg210_qh *qh = fotg210->intr_unlink;
+ struct fotg210_qh *qh = fotg210->intr_unlink;
if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
break;
@@ -1266,7 +1240,7 @@ static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
/* Handle remaining entries later */
if (fotg210->intr_unlink) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
- true);
+ true);
++fotg210->intr_unlink_cycle;
}
fotg210->intr_unlinking = false;
@@ -1288,7 +1262,7 @@ static void start_free_itds(struct fotg210_hcd *fotg210)
/* Wait for controller to stop using old iTDs and siTDs */
static void end_free_itds(struct fotg210_hcd *fotg210)
{
- struct fotg210_itd *itd, *n;
+ struct fotg210_itd *itd, *n;
if (fotg210->rh_state < FOTG210_RH_RUNNING)
fotg210->last_itd_to_free = NULL;
@@ -1339,7 +1313,7 @@ static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
COUNT(fotg210->stats.lost_iaa);
fotg210_writel(fotg210, STS_IAA,
- &fotg210->regs->status);
+ &fotg210->regs->status);
}
fotg210_dbg(fotg210, "IAA watchdog: status %x cmd %x\n",
@@ -1355,7 +1329,7 @@ static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
/* Not needed if the controller isn't running or it's already enabled */
if (fotg210->rh_state != FOTG210_RH_RUNNING ||
(fotg210->enabled_hrtimer_events &
- BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
+ BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
return;
/*
@@ -1365,12 +1339,11 @@ static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
fotg210->async_count + fotg210->intr_count > 0))
fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
- true);
+ true);
}
-/*
- * Handler functions for the hrtimer event types.
+/* Handler functions for the hrtimer event types.
* Keep this array in the same order as the event types indexed by
* enum fotg210_hrtimer_event in fotg210.h.
*/
@@ -1391,10 +1364,10 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
{
struct fotg210_hcd *fotg210 =
container_of(t, struct fotg210_hcd, hrtimer);
- ktime_t now;
- unsigned long events;
- unsigned long flags;
- unsigned e;
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
spin_lock_irqsave(&fotg210->lock, flags);
@@ -1418,50 +1391,37 @@ static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-/*-------------------------------------------------------------------------*/
-
-#define fotg210_bus_suspend NULL
-#define fotg210_bus_resume NULL
+#define fotg210_bus_suspend NULL
+#define fotg210_bus_resume NULL
-/*-------------------------------------------------------------------------*/
-
-static int check_reset_complete(
- struct fotg210_hcd *fotg210,
- int index,
- u32 __iomem *status_reg,
- int port_status
-) {
+static int check_reset_complete(struct fotg210_hcd *fotg210, int index,
+ u32 __iomem *status_reg, int port_status)
+{
if (!(port_status & PORT_CONNECT))
return port_status;
/* if reset finished and it's still not enabled -- handoff */
- if (!(port_status & PORT_PE)) {
+ if (!(port_status & PORT_PE))
/* with integrated TT, there's nobody to hand it to! */
- fotg210_dbg(fotg210,
- "Failed to enable port %d on root hub TT\n",
- index+1);
- return port_status;
- } else {
+ fotg210_dbg(fotg210, "Failed to enable port %d on root hub TT\n",
+ index + 1);
+ else
fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
- index + 1);
- }
+ index + 1);
return port_status;
}
-/*-------------------------------------------------------------------------*/
-
/* build "status change" packet (one or two bytes) from HC registers */
-static int
-fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
+static int fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- u32 temp, status;
- u32 mask;
- int retval = 1;
- unsigned long flags;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp, status;
+ u32 mask;
+ int retval = 1;
+ unsigned long flags;
/* init status to no-changes */
buf[0] = 0;
@@ -1488,9 +1448,9 @@ fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
* controller by the user.
*/
- if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend)
- || (fotg210->reset_done[0] && time_after_eq(
- jiffies, fotg210->reset_done[0]))) {
+ if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend) ||
+ (fotg210->reset_done[0] &&
+ time_after_eq(jiffies, fotg210->reset_done[0]))) {
buf[0] |= 1 << 1;
status = STS_PCD;
}
@@ -1499,15 +1459,11 @@ fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
return status ? retval : 0;
}
-/*-------------------------------------------------------------------------*/
-
-static void
-fotg210_hub_descriptor(
- struct fotg210_hcd *fotg210,
- struct usb_hub_descriptor *desc
-) {
- int ports = HCS_N_PORTS(fotg210->hcs_params);
- u16 temp;
+static void fotg210_hub_descriptor(struct fotg210_hcd *fotg210,
+ struct usb_hub_descriptor *desc)
+{
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u16 temp;
desc->bDescriptorType = USB_DT_HUB;
desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */
@@ -1526,23 +1482,16 @@ fotg210_hub_descriptor(
desc->wHubCharacteristics = cpu_to_le16(temp);
}
-/*-------------------------------------------------------------------------*/
-
-static int fotg210_hub_control(
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-) {
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- int ports = HCS_N_PORTS(fotg210->hcs_params);
- u32 __iomem *status_reg = &fotg210->regs->port_status;
- u32 temp, temp1, status;
- unsigned long flags;
- int retval = 0;
- unsigned selector;
+static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+ u32 temp, temp1, status;
+ unsigned long flags;
+ int retval = 0;
+ unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
@@ -1605,7 +1554,7 @@ static int fotg210_hub_control(
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
fotg210_writel(fotg210, temp | OTGISR_OVC,
- &fotg210->regs->otgisr);
+ &fotg210->regs->otgisr);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
@@ -1617,7 +1566,7 @@ static int fotg210_hub_control(
break;
case GetHubDescriptor:
fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
- buf);
+ buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
@@ -1663,16 +1612,16 @@ static int fotg210_hub_control(
/* stop resume signaling */
temp = fotg210_readl(fotg210, status_reg);
- fotg210_writel(fotg210,
- temp & ~(PORT_RWC_BITS | PORT_RESUME),
- status_reg);
+ fotg210_writel(fotg210, temp &
+ ~(PORT_RWC_BITS | PORT_RESUME),
+ status_reg);
clear_bit(wIndex, &fotg210->resuming_ports);
retval = handshake(fotg210, status_reg,
- PORT_RESUME, 0, 2000 /* 2msec */);
+ PORT_RESUME, 0, 2000);/* 2ms */
if (retval != 0) {
fotg210_err(fotg210,
- "port %d resume error %d\n",
- wIndex + 1, retval);
+ "port %d resume error %d\n",
+ wIndex + 1, retval);
goto error;
}
temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
@@ -1680,17 +1629,16 @@ static int fotg210_hub_control(
}
/* whoever resets must GetPortStatus to complete it!! */
- if ((temp & PORT_RESET)
- && time_after_eq(jiffies,
- fotg210->reset_done[wIndex])) {
+ if ((temp & PORT_RESET) && time_after_eq(jiffies,
+ fotg210->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
fotg210->reset_done[wIndex] = 0;
clear_bit(wIndex, &fotg210->resuming_ports);
/* force reset to complete */
fotg210_writel(fotg210,
- temp & ~(PORT_RWC_BITS | PORT_RESET),
- status_reg);
+ temp & ~(PORT_RWC_BITS | PORT_RESET),
+ status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
@@ -1698,7 +1646,7 @@ static int fotg210_hub_control(
PORT_RESET, 0, 1000);
if (retval != 0) {
fotg210_err(fotg210, "port %d reset error %d\n",
- wIndex + 1, retval);
+ wIndex + 1, retval);
goto error;
}
@@ -1718,7 +1666,7 @@ static int fotg210_hub_control(
temp &= ~PORT_RWC_BITS;
fotg210_writel(fotg210, temp, status_reg);
fotg210_dbg(fotg210, "port %d --> companion\n",
- wIndex + 1);
+ wIndex + 1);
temp = fotg210_readl(fotg210, status_reg);
}
@@ -1788,7 +1736,7 @@ static int fotg210_hub_control(
* mode if we have hostpc feature
*/
fotg210_writel(fotg210, temp | PORT_SUSPEND,
- status_reg);
+ status_reg);
set_bit(wIndex, &fotg210->suspended_ports);
break;
case USB_PORT_FEAT_RESET:
@@ -1866,9 +1814,8 @@ static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
{
return 0;
}
-/*-------------------------------------------------------------------------*/
-/*
- * There's basically three types of memory:
+
+/* There's basically three types of memory:
* - data used only by the HCD ... kmalloc is fine
* - async and periodic schedules, shared by HC and HCD ... these
* need to use dma_pool or dma_alloc_coherent
@@ -1878,12 +1825,9 @@ static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
* No memory seen by this driver is pageable.
*/
-/*-------------------------------------------------------------------------*/
-
/* Allocate the key transfer structures from the previously allocated pool */
-
static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
- struct fotg210_qtd *qtd, dma_addr_t dma)
+ struct fotg210_qtd *qtd, dma_addr_t dma)
{
memset(qtd, 0, sizeof(*qtd));
qtd->qtd_dma = dma;
@@ -1894,10 +1838,10 @@ static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
}
static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
- gfp_t flags)
+ gfp_t flags)
{
- struct fotg210_qtd *qtd;
- dma_addr_t dma;
+ struct fotg210_qtd *qtd;
+ dma_addr_t dma;
qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
if (qtd != NULL)
@@ -1907,7 +1851,7 @@ static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
}
static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
- struct fotg210_qtd *qtd)
+ struct fotg210_qtd *qtd)
{
dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
}
@@ -1927,10 +1871,10 @@ static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
}
static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
- gfp_t flags)
+ gfp_t flags)
{
- struct fotg210_qh *qh;
- dma_addr_t dma;
+ struct fotg210_qh *qh;
+ dma_addr_t dma;
qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
if (!qh)
@@ -1958,8 +1902,6 @@ fail:
return NULL;
}
-/*-------------------------------------------------------------------------*/
-
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
@@ -1976,23 +1918,19 @@ static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
fotg210->dummy = NULL;
/* DMA consistent memory and pools */
- if (fotg210->qtd_pool)
- dma_pool_destroy(fotg210->qtd_pool);
+ dma_pool_destroy(fotg210->qtd_pool);
fotg210->qtd_pool = NULL;
- if (fotg210->qh_pool) {
- dma_pool_destroy(fotg210->qh_pool);
- fotg210->qh_pool = NULL;
- }
+ dma_pool_destroy(fotg210->qh_pool);
+ fotg210->qh_pool = NULL;
- if (fotg210->itd_pool)
- dma_pool_destroy(fotg210->itd_pool);
+ dma_pool_destroy(fotg210->itd_pool);
fotg210->itd_pool = NULL;
if (fotg210->periodic)
dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
- fotg210->periodic_size * sizeof(u32),
- fotg210->periodic, fotg210->periodic_dma);
+ fotg210->periodic_size * sizeof(u32),
+ fotg210->periodic, fotg210->periodic_dma);
fotg210->periodic = NULL;
/* shadow periodic table */
@@ -2039,8 +1977,8 @@ static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
/* Hardware periodic table */
fotg210->periodic = (__le32 *)
dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
- fotg210->periodic_size * sizeof(__le32),
- &fotg210->periodic_dma, 0);
+ fotg210->periodic_size * sizeof(__le32),
+ &fotg210->periodic_dma, 0);
if (fotg210->periodic == NULL)
goto fail;
@@ -2049,7 +1987,7 @@ static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
/* software shadow of hardware table */
fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
- flags);
+ flags);
if (fotg210->pshadow != NULL)
return 0;
@@ -2058,9 +1996,7 @@ fail:
fotg210_mem_cleanup(fotg210);
return -ENOMEM;
}
-/*-------------------------------------------------------------------------*/
-/*
- * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
+/* EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
*
* Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
* entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
@@ -2077,16 +2013,12 @@ fail:
* buffer low/full speed data so the host collects it at high speed.
*/
-/*-------------------------------------------------------------------------*/
-
/* fill a qtd, returning how much of the buffer we were able to queue up */
-
-static int
-qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf,
- size_t len, int token, int maxpacket)
+static int qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd,
+ dma_addr_t buf, size_t len, int token, int maxpacket)
{
- int i, count;
- u64 addr = buf;
+ int i, count;
+ u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
@@ -2121,11 +2053,8 @@ qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf,
return count;
}
-/*-------------------------------------------------------------------------*/
-
-static inline void
-qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
- struct fotg210_qtd *qtd)
+static inline void qh_update(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh, struct fotg210_qtd *qtd)
{
struct fotg210_qh_hw *hw = qh->hw;
@@ -2141,7 +2070,7 @@ qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
* ever clear it.
*/
if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
- unsigned is_out, epnum;
+ unsigned is_out, epnum;
is_out = qh->is_out;
epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
@@ -2158,8 +2087,7 @@ qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
-static void
-qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+static void qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
struct fotg210_qtd *qtd;
@@ -2185,16 +2113,14 @@ qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
qh_update(fotg210, qh, qtd);
}
-/*-------------------------------------------------------------------------*/
-
static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- struct fotg210_qh *qh = ep->hcpriv;
- unsigned long flags;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh = ep->hcpriv;
+ unsigned long flags;
spin_lock_irqsave(&fotg210->lock, flags);
qh->clearing_tt = 0;
@@ -2205,8 +2131,7 @@ static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
}
static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
- struct fotg210_qh *qh,
- struct urb *urb, u32 token)
+ struct fotg210_qh *qh, struct urb *urb, u32 token)
{
/* If an async split transaction gets an error or is unlinked,
@@ -2217,27 +2142,24 @@ static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
struct usb_device *tt = urb->dev->tt->hub;
+
dev_dbg(&tt->dev,
- "clear tt buffer port %d, a%d ep%d t%08x\n",
- urb->dev->ttport, urb->dev->devnum,
- usb_pipeendpoint(urb->pipe), token);
+ "clear tt buffer port %d, a%d ep%d t%08x\n",
+ urb->dev->ttport, urb->dev->devnum,
+ usb_pipeendpoint(urb->pipe), token);
if (urb->dev->tt->hub !=
- fotg210_to_hcd(fotg210)->self.root_hub) {
+ fotg210_to_hcd(fotg210)->self.root_hub) {
if (usb_hub_clear_tt_buffer(urb) == 0)
qh->clearing_tt = 1;
}
}
}
-static int qtd_copy_status(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- size_t length,
- u32 token
-)
+static int qtd_copy_status(struct fotg210_hcd *fotg210, struct urb *urb,
+ size_t length, u32 token)
{
- int status = -EINPROGRESS;
+ int status = -EINPROGRESS;
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely(QTD_PID(token) != 2))
@@ -2274,32 +2196,32 @@ static int qtd_copy_status(
} else if (token & QTD_STS_XACT) {
/* timeout, bad CRC, wrong PID, etc */
fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
- urb->dev->devpath,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out");
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
status = -EPROTO;
} else { /* unknown */
status = -EPROTO;
}
fotg210_dbg(fotg210,
- "dev%d ep%d%s qtd token %08x --> status %d\n",
- usb_pipedevice(urb->pipe),
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- token, status);
+ "dev%d ep%d%s qtd token %08x --> status %d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ token, status);
}
return status;
}
-static void
-fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, int status)
+static void fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb,
+ int status)
__releases(fotg210->lock)
__acquires(fotg210->lock)
{
if (likely(urb->hcpriv != NULL)) {
- struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
+ struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
@@ -2320,12 +2242,12 @@ __acquires(fotg210->lock)
#ifdef FOTG210_URB_TRACE
fotg210_dbg(fotg210,
- "%s %s urb %p ep%d%s status %d len %d/%d\n",
- __func__, urb->dev->devpath, urb,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- status,
- urb->actual_length, urb->transfer_buffer_length);
+ "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ status,
+ urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
@@ -2337,21 +2259,20 @@ __acquires(fotg210->lock)
static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
-/*
- * Process and free completed qtds for a qh, returning URBs to drivers.
+/* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
-static unsigned
-qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+static unsigned qh_completions(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
{
- struct fotg210_qtd *last, *end = qh->dummy;
- struct list_head *entry, *tmp;
- int last_status;
- int stopped;
- unsigned count = 0;
- u8 state;
- struct fotg210_qh_hw *hw = qh->hw;
+ struct fotg210_qtd *last, *end = qh->dummy;
+ struct list_head *entry, *tmp;
+ int last_status;
+ int stopped;
+ unsigned count = 0;
+ u8 state;
+ struct fotg210_qh_hw *hw = qh->hw;
if (unlikely(list_empty(&qh->qtd_list)))
return count;
@@ -2370,7 +2291,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
- rescan:
+rescan:
last = NULL;
last_status = -EINPROGRESS;
qh->needs_rescan = 0;
@@ -2381,9 +2302,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
* if queue is stopped, handles unlinks.
*/
list_for_each_safe(entry, tmp, &qh->qtd_list) {
- struct fotg210_qtd *qtd;
- struct urb *urb;
- u32 token = 0;
+ struct fotg210_qtd *qtd;
+ struct urb *urb;
+ u32 token = 0;
qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
urb = qtd->urb;
@@ -2392,7 +2313,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
if (last) {
if (likely(last->urb != urb)) {
fotg210_urb_done(fotg210, last->urb,
- last_status);
+ last_status);
count++;
last_status = -EINPROGRESS;
}
@@ -2409,20 +2330,17 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
token = hc32_to_cpu(fotg210, qtd->hw_token);
/* always clean up qtds the hc de-activated */
- retry_xacterr:
+retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
/* Report Data Buffer Error: non-fatal but useful */
if (token & QTD_STS_DBE)
fotg210_dbg(fotg210,
"detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- urb,
- usb_endpoint_num(&urb->ep->desc),
+ urb, usb_endpoint_num(&urb->ep->desc),
usb_endpoint_dir_in(&urb->ep->desc)
? "in" : "out",
- urb->transfer_buffer_length,
- qtd,
- qh);
+ urb->transfer_buffer_length, qtd, qh);
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
@@ -2433,12 +2351,14 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
* reach the software xacterr limit
*/
if ((token & QTD_STS_XACT) &&
- QTD_CERR(token) == 0 &&
- ++qh->xacterrs < QH_XACTERR_MAX &&
- !urb->unlinked) {
+ QTD_CERR(token) == 0 &&
+ ++qh->xacterrs < QH_XACTERR_MAX &&
+ !urb->unlinked) {
fotg210_dbg(fotg210,
- "detected XactErr len %zu/%zu retry %d\n",
- qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token),
+ qtd->length,
+ qh->xacterrs);
/* reset the token in the qtd and the
* qh overlay (which still contains
@@ -2466,9 +2386,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
* URB_SHORT_NOT_OK was set so the driver submitting
* the urbs could clean it up.
*/
- } else if (IS_SHORT_READ(token)
- && !(qtd->hw_alt_next
- & FOTG210_LIST_END(fotg210))) {
+ } else if (IS_SHORT_READ(token) &&
+ !(qtd->hw_alt_next &
+ FOTG210_LIST_END(fotg210))) {
stopped = 1;
}
@@ -2492,9 +2412,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
continue;
/* qh unlinked; token in overlay may be most current */
- if (state == QH_STATE_IDLE
- && cpu_to_hc32(fotg210, qtd->qtd_dma)
- == hw->hw_current) {
+ if (state == QH_STATE_IDLE &&
+ cpu_to_hc32(fotg210, qtd->qtd_dma)
+ == hw->hw_current) {
token = hc32_to_cpu(fotg210, hw->hw_token);
/* An unlink may leave an incomplete
@@ -2502,7 +2422,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
* We have to clear it.
*/
fotg210_clear_tt_buffer(fotg210, qh, urb,
- token);
+ token);
}
}
@@ -2516,9 +2436,9 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
if (last_status == -EINPROGRESS) {
last_status = qtd_copy_status(fotg210, urb,
qtd->length, token);
- if (last_status == -EREMOTEIO
- && (qtd->hw_alt_next
- & FOTG210_LIST_END(fotg210)))
+ if (last_status == -EREMOTEIO &&
+ (qtd->hw_alt_next &
+ FOTG210_LIST_END(fotg210)))
last_status = -EINPROGRESS;
/* As part of low/full-speed endpoint-halt processing
@@ -2537,7 +2457,7 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
*/
if (last_status != -EPIPE)
fotg210_clear_tt_buffer(fotg210, qh,
- urb, token);
+ urb, token);
}
}
@@ -2615,26 +2535,21 @@ qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
return count;
}
-/*-------------------------------------------------------------------------*/
-
/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
/* ... and packet size, for any kind of endpoint descriptor */
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-/*
- * reverse of qh_urb_transaction: free a list of TDs.
+/* reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
-static void qtd_list_free(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- struct list_head *qtd_list
-) {
- struct list_head *entry, *temp;
+static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb,
+ struct list_head *qtd_list)
+{
+ struct list_head *entry, *temp;
list_for_each_safe(entry, temp, qtd_list) {
- struct fotg210_qtd *qtd;
+ struct fotg210_qtd *qtd;
qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
list_del(&qtd->qtd_list);
@@ -2642,23 +2557,18 @@ static void qtd_list_free(
}
}
-/*
- * create a list of filled qtds for this URB; won't link into qh.
+/* create a list of filled qtds for this URB; won't link into qh.
*/
-static struct list_head *
-qh_urb_transaction(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- struct list_head *head,
- gfp_t flags
-) {
- struct fotg210_qtd *qtd, *qtd_prev;
- dma_addr_t buf;
- int len, this_sg_len, maxpacket;
- int is_input;
- u32 token;
- int i;
- struct scatterlist *sg;
+static struct list_head *qh_urb_transaction(struct fotg210_hcd *fotg210,
+ struct urb *urb, struct list_head *head, gfp_t flags)
+{
+ struct fotg210_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, this_sg_len, maxpacket;
+ int is_input;
+ u32 token;
+ int i;
+ struct scatterlist *sg;
/*
* URBs map to sequences of QTDs: one logical transaction
@@ -2768,8 +2678,8 @@ qh_urb_transaction(
* have the alt_next mechanism keep the queue running after the
* last data qtd (the only one, for control and most other cases).
*/
- if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
- || usb_pipecontrol(urb->pipe)))
+ if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 ||
+ usb_pipecontrol(urb->pipe)))
qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
/*
@@ -2778,7 +2688,7 @@ qh_urb_transaction(
* (zero length).
*/
if (likely(urb->transfer_buffer_length != 0)) {
- int one_more = 0;
+ int one_more = 0;
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
@@ -2813,9 +2723,7 @@ cleanup:
return NULL;
}
-/*-------------------------------------------------------------------------*/
-/*
- * Would be best to create all qh's from config descriptors,
+/* Would be best to create all qh's from config descriptors,
* when each interface/altsetting is established. Unlink
* any previous qh and cancel its urbs first; endpoints are
* implicitly reset then (data toggle too).
@@ -2823,26 +2731,22 @@ cleanup:
*/
-/*
- * Each QH holds a qtd list; a QH is used for everything except iso.
+/* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
-static struct fotg210_qh *
-qh_make(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- gfp_t flags
-) {
- struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
- u32 info1 = 0, info2 = 0;
- int is_input, type;
- int maxp = 0;
- struct usb_tt *tt = urb->dev->tt;
- struct fotg210_qh_hw *hw;
+static struct fotg210_qh *qh_make(struct fotg210_hcd *fotg210, struct urb *urb,
+ gfp_t flags)
+{
+ struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
+ u32 info1 = 0, info2 = 0;
+ int is_input, type;
+ int maxp = 0;
+ struct usb_tt *tt = urb->dev->tt;
+ struct fotg210_qh_hw *hw;
if (!qh)
return qh;
@@ -2862,7 +2766,7 @@ qh_make(
*/
if (max_packet(maxp) > 1024) {
fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
- max_packet(maxp));
+ max_packet(maxp));
goto done;
}
@@ -2896,7 +2800,7 @@ qh_make(
urb->interval = qh->period << 3;
}
} else {
- int think_time;
+ int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
@@ -2986,7 +2890,7 @@ qh_make(
break;
default:
fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
- urb->dev->speed);
+ urb->dev->speed);
done:
qh_destroy(fotg210, qh);
return NULL;
@@ -3005,8 +2909,6 @@ done:
return qh;
}
-/*-------------------------------------------------------------------------*/
-
static void enable_async(struct fotg210_hcd *fotg210)
{
if (fotg210->async_count++)
@@ -3036,8 +2938,8 @@ static void disable_async(struct fotg210_hcd *fotg210)
static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
- __hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
- struct fotg210_qh *head;
+ __hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
+ struct fotg210_qh *head;
/* Don't link a QH if there's a Clear-TT-Buffer pending */
if (unlikely(qh->clearing_tt))
@@ -3064,24 +2966,17 @@ static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
enable_async(fotg210);
}
-/*-------------------------------------------------------------------------*/
-
-/*
- * For control/bulk/interrupt, return QH with these TDs appended.
+/* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
-static struct fotg210_qh *qh_append_tds(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- struct list_head *qtd_list,
- int epnum,
- void **ptr
-)
+static struct fotg210_qh *qh_append_tds(struct fotg210_hcd *fotg210,
+ struct urb *urb, struct list_head *qtd_list,
+ int epnum, void **ptr)
{
- struct fotg210_qh *qh = NULL;
- __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
+ struct fotg210_qh *qh = NULL;
+ __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
qh = (struct fotg210_qh *) *ptr;
if (unlikely(qh == NULL)) {
@@ -3090,7 +2985,7 @@ static struct fotg210_qh *qh_append_tds(
*ptr = qh;
}
if (likely(qh != NULL)) {
- struct fotg210_qtd *qtd;
+ struct fotg210_qtd *qtd;
if (unlikely(list_empty(qtd_list)))
qtd = NULL;
@@ -3109,9 +3004,9 @@ static struct fotg210_qh *qh_append_tds(
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely(qtd != NULL)) {
- struct fotg210_qtd *dummy;
- dma_addr_t dma;
- __hc32 token;
+ struct fotg210_qtd *dummy;
+ dma_addr_t dma;
+ __hc32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
@@ -3150,32 +3045,28 @@ static struct fotg210_qh *qh_append_tds(
return qh;
}
-/*-------------------------------------------------------------------------*/
-
-static int
-submit_async(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- struct list_head *qtd_list,
- gfp_t mem_flags
-) {
- int epnum;
- unsigned long flags;
- struct fotg210_qh *qh = NULL;
- int rc;
+static int submit_async(struct fotg210_hcd *fotg210, struct urb *urb,
+ struct list_head *qtd_list, gfp_t mem_flags)
+{
+ int epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh = NULL;
+ int rc;
epnum = urb->ep->desc.bEndpointAddress;
#ifdef FOTG210_URB_TRACE
{
struct fotg210_qtd *qtd;
+
qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
fotg210_dbg(fotg210,
- "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- __func__, urb->dev->devpath, urb,
- epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
- urb->transfer_buffer_length,
- qtd, urb->ep->hcpriv);
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN)
+ ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
}
#endif
@@ -3200,19 +3091,17 @@ submit_async(
*/
if (likely(qh->qh_state == QH_STATE_IDLE))
qh_link_async(fotg210, qh);
- done:
+done:
spin_unlock_irqrestore(&fotg210->lock, flags);
if (unlikely(qh == NULL))
qtd_list_free(fotg210, urb, qtd_list);
return rc;
}
-/*-------------------------------------------------------------------------*/
-
static void single_unlink_async(struct fotg210_hcd *fotg210,
- struct fotg210_qh *qh)
+ struct fotg210_qh *qh)
{
- struct fotg210_qh *prev;
+ struct fotg210_qh *prev;
/* Add to the end of the list of QHs waiting for the next IAAD */
qh->qh_state = QH_STATE_UNLINK;
@@ -3260,7 +3149,7 @@ static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
&fotg210->regs->command);
fotg210_readl(fotg210, &fotg210->regs->command);
fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
- true);
+ true);
}
}
@@ -3268,10 +3157,10 @@ static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
static void end_unlink_async(struct fotg210_hcd *fotg210)
{
- struct fotg210_qh *qh;
+ struct fotg210_qh *qh;
/* Process the idle QHs */
- restart:
+restart:
fotg210->async_unlinking = true;
while (fotg210->async_iaa) {
qh = fotg210->async_iaa;
@@ -3326,7 +3215,7 @@ static void unlink_empty_async(struct fotg210_hcd *fotg210)
/* QHs that haven't been empty for long enough will be handled later */
if (check_unlinks_later) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
- true);
+ true);
++fotg210->async_unlink_cycle;
}
}
@@ -3335,7 +3224,7 @@ static void unlink_empty_async(struct fotg210_hcd *fotg210)
/* caller must own fotg210->lock */
static void start_unlink_async(struct fotg210_hcd *fotg210,
- struct fotg210_qh *qh)
+ struct fotg210_qh *qh)
{
/*
* If the QH isn't linked then there's nothing we can do
@@ -3352,18 +3241,16 @@ static void start_unlink_async(struct fotg210_hcd *fotg210,
start_iaa_cycle(fotg210, false);
}
-/*-------------------------------------------------------------------------*/
-
static void scan_async(struct fotg210_hcd *fotg210)
{
- struct fotg210_qh *qh;
- bool check_unlinks_later = false;
+ struct fotg210_qh *qh;
+ bool check_unlinks_later = false;
fotg210->qh_scan_next = fotg210->async->qh_next.qh;
while (fotg210->qh_scan_next) {
qh = fotg210->qh_scan_next;
fotg210->qh_scan_next = qh->qh_next.qh;
- rescan:
+rescan:
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
int temp;
@@ -3395,15 +3282,13 @@ static void scan_async(struct fotg210_hcd *fotg210)
*/
if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
!(fotg210->enabled_hrtimer_events &
- BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
+ BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
fotg210_enable_event(fotg210,
- FOTG210_HRTIMER_ASYNC_UNLINKS, true);
+ FOTG210_HRTIMER_ASYNC_UNLINKS, true);
++fotg210->async_unlink_cycle;
}
}
-/*-------------------------------------------------------------------------*/
-/*
- * EHCI scheduled transaction support: interrupt, iso, split iso
+/* EHCI scheduled transaction support: interrupt, iso, split iso
* These are called "periodic" transactions in the EHCI spec.
*
* Note that for interrupt transfers, the QH/QTD manipulation is shared
@@ -3414,19 +3299,14 @@ static void scan_async(struct fotg210_hcd *fotg210)
* It keeps track of every ITD (or SITD) that's linked, and holds enough
* pre-calculated schedule data to make appending to the queue be quick.
*/
-
static int fotg210_get_frame(struct usb_hcd *hcd);
-/*-------------------------------------------------------------------------*/
-
-/*
- * periodic_next_shadow - return "next" pointer on shadow list
+/* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd
* @tag: hardware tag for type of this record
*/
-static union fotg210_shadow *
-periodic_next_shadow(struct fotg210_hcd *fotg210,
- union fotg210_shadow *periodic, __hc32 tag)
+static union fotg210_shadow *periodic_next_shadow(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
{
switch (hc32_to_cpu(fotg210, tag)) {
case Q_TYPE_QH:
@@ -3438,9 +3318,8 @@ periodic_next_shadow(struct fotg210_hcd *fotg210,
}
}
-static __hc32 *
-shadow_next_periodic(struct fotg210_hcd *fotg210,
- union fotg210_shadow *periodic, __hc32 tag)
+static __hc32 *shadow_next_periodic(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
{
switch (hc32_to_cpu(fotg210, tag)) {
/* our fotg210_shadow.qh is actually software part */
@@ -3454,11 +3333,11 @@ shadow_next_periodic(struct fotg210_hcd *fotg210,
/* caller must hold fotg210->lock */
static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
- void *ptr)
+ void *ptr)
{
- union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
- __hc32 *hw_p = &fotg210->periodic[frame];
- union fotg210_shadow here = *prev_p;
+ union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
@@ -3479,17 +3358,17 @@ static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
Q_NEXT_TYPE(fotg210, *hw_p));
*hw_p = *shadow_next_periodic(fotg210, &here,
- Q_NEXT_TYPE(fotg210, *hw_p));
+ Q_NEXT_TYPE(fotg210, *hw_p));
}
/* how many of the uframe's 125 usecs are allocated? */
-static unsigned short
-periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe)
+static unsigned short periodic_usecs(struct fotg210_hcd *fotg210,
+ unsigned frame, unsigned uframe)
{
- __hc32 *hw_p = &fotg210->periodic[frame];
- union fotg210_shadow *q = &fotg210->pshadow[frame];
- unsigned usecs = 0;
- struct fotg210_qh_hw *hw;
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow *q = &fotg210->pshadow[frame];
+ unsigned usecs = 0;
+ struct fotg210_qh_hw *hw;
while (q->ptr) {
switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
@@ -3526,12 +3405,10 @@ periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe)
}
if (usecs > fotg210->uframe_periodic_max)
fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
- frame * 8 + uframe, usecs);
+ frame * 8 + uframe, usecs);
return usecs;
}
-/*-------------------------------------------------------------------------*/
-
static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
{
if (!dev1->tt || !dev2->tt)
@@ -3548,13 +3425,8 @@ static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
* for a periodic transfer starting at the specified frame, using
* all the uframes in the mask.
*/
-static int tt_no_collision(
- struct fotg210_hcd *fotg210,
- unsigned period,
- struct usb_device *dev,
- unsigned frame,
- u32 uf_mask
-)
+static int tt_no_collision(struct fotg210_hcd *fotg210, unsigned period,
+ struct usb_device *dev, unsigned frame, u32 uf_mask)
{
if (period == 0) /* error */
return 0;
@@ -3564,9 +3436,9 @@ static int tt_no_collision(
* calling convention doesn't make that distinction.
*/
for (; frame < fotg210->periodic_size; frame += period) {
- union fotg210_shadow here;
- __hc32 type;
- struct fotg210_qh_hw *hw;
+ union fotg210_shadow here;
+ __hc32 type;
+ struct fotg210_qh_hw *hw;
here = fotg210->pshadow[frame];
type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
@@ -3579,7 +3451,7 @@ static int tt_no_collision(
case Q_TYPE_QH:
hw = here.qh->hw;
if (same_tt(dev, here.qh->dev)) {
- u32 mask;
+ u32 mask;
mask = hc32_to_cpu(fotg210,
hw->hw_info2);
@@ -3594,8 +3466,8 @@ static int tt_no_collision(
/* case Q_TYPE_FSTN: */
default:
fotg210_dbg(fotg210,
- "periodic frame %d bogus type %d\n",
- frame, type);
+ "periodic frame %d bogus type %d\n",
+ frame, type);
}
/* collision or error */
@@ -3607,8 +3479,6 @@ static int tt_no_collision(
return 1;
}
-/*-------------------------------------------------------------------------*/
-
static void enable_periodic(struct fotg210_hcd *fotg210)
{
if (fotg210->periodic_count++)
@@ -3632,8 +3502,6 @@ static void disable_periodic(struct fotg210_hcd *fotg210)
fotg210_poll_PSS(fotg210);
}
-/*-------------------------------------------------------------------------*/
-
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
@@ -3642,24 +3510,24 @@ static void disable_periodic(struct fotg210_hcd *fotg210)
*/
static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
- unsigned i;
- unsigned period = qh->period;
+ unsigned i;
+ unsigned period = qh->period;
dev_dbg(&qh->dev->dev,
- "link qh%d-%04x/%p start %d [%d/%d us]\n",
- period, hc32_to_cpup(fotg210, &qh->hw->hw_info2)
- & (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
+ "link qh%d-%04x/%p start %d [%d/%d us]\n", period,
+ hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
+ (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
+ qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < fotg210->periodic_size; i += period) {
- union fotg210_shadow *prev = &fotg210->pshadow[i];
- __hc32 *hw_p = &fotg210->periodic[i];
- union fotg210_shadow here = *prev;
- __hc32 type = 0;
+ union fotg210_shadow *prev = &fotg210->pshadow[i];
+ __hc32 *hw_p = &fotg210->periodic[i];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
@@ -3707,10 +3575,10 @@ static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
}
static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
- struct fotg210_qh *qh)
+ struct fotg210_qh *qh)
{
- unsigned i;
- unsigned period;
+ unsigned i;
+ unsigned period;
/*
* If qh is for a low/full-speed device, simply unlinking it
@@ -3741,10 +3609,10 @@ static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
: (qh->usecs * 8);
dev_dbg(&qh->dev->dev,
- "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
- qh->period,
- hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
- (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs);
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period, hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
+ (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs,
+ qh->c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
@@ -3757,7 +3625,7 @@ static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
}
static void start_unlink_intr(struct fotg210_hcd *fotg210,
- struct fotg210_qh *qh)
+ struct fotg210_qh *qh)
{
/* If the QH isn't linked then there's nothing we can do
* unless we were called during a giveback, in which case
@@ -3794,15 +3662,15 @@ static void start_unlink_intr(struct fotg210_hcd *fotg210,
fotg210_handle_intr_unlinks(fotg210);
else if (fotg210->intr_unlink == qh) {
fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
- true);
+ true);
++fotg210->intr_unlink_cycle;
}
}
static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
- struct fotg210_qh_hw *hw = qh->hw;
- int rc;
+ struct fotg210_qh_hw *hw = qh->hw;
+ int rc;
qh->qh_state = QH_STATE_IDLE;
hw->hw_next = FOTG210_LIST_END(fotg210);
@@ -3811,7 +3679,7 @@ static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list) &&
- fotg210->rh_state == FOTG210_RH_RUNNING) {
+ fotg210->rh_state == FOTG210_RH_RUNNING) {
rc = qh_schedule(fotg210, qh);
/* An error here likely indicates handshake failure
@@ -3830,16 +3698,10 @@ static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
disable_periodic(fotg210);
}
-/*-------------------------------------------------------------------------*/
-
-static int check_period(
- struct fotg210_hcd *fotg210,
- unsigned frame,
- unsigned uframe,
- unsigned period,
- unsigned usecs
-) {
- int claimed;
+static int check_period(struct fotg210_hcd *fotg210, unsigned frame,
+ unsigned uframe, unsigned period, unsigned usecs)
+{
+ int claimed;
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
@@ -3857,7 +3719,7 @@ static int check_period(
do {
for (uframe = 0; uframe < 7; uframe++) {
claimed = periodic_usecs(fotg210, frame,
- uframe);
+ uframe);
if (claimed > usecs)
return 0;
}
@@ -3876,16 +3738,11 @@ static int check_period(
return 1;
}
-static int check_intr_schedule(
- struct fotg210_hcd *fotg210,
- unsigned frame,
- unsigned uframe,
- const struct fotg210_qh *qh,
- __hc32 *c_maskp
-)
+static int check_intr_schedule(struct fotg210_hcd *fotg210, unsigned frame,
+ unsigned uframe, const struct fotg210_qh *qh, __hc32 *c_maskp)
{
- int retval = -ENOSPC;
- u8 mask = 0;
+ int retval = -ENOSPC;
+ u8 mask = 0;
if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
@@ -3911,10 +3768,10 @@ static int check_intr_schedule(
mask |= 1 << uframe;
if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
- qh->period, qh->c_usecs))
+ qh->period, qh->c_usecs))
goto done;
if (!check_period(fotg210, frame, uframe + qh->gap_uf,
- qh->period, qh->c_usecs))
+ qh->period, qh->c_usecs))
goto done;
retval = 0;
}
@@ -3927,11 +3784,11 @@ done:
*/
static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
{
- int status;
- unsigned uframe;
- __hc32 c_mask;
- unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
- struct fotg210_qh_hw *hw = qh->hw;
+ int status;
+ unsigned uframe;
+ __hc32 c_mask;
+ unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ struct fotg210_qh_hw *hw = qh->hw;
qh_refresh(fotg210, qh);
hw->hw_next = FOTG210_LIST_END(fotg210);
@@ -3954,7 +3811,7 @@ static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
- int i;
+ int i;
for (i = qh->period; status && i > 0; --i) {
frame = ++fotg210->random_frame % qh->period;
@@ -3971,7 +3828,7 @@ static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
} else {
frame = 0;
status = check_intr_schedule(fotg210, 0, 0, qh,
- &c_mask);
+ &c_mask);
}
if (status)
goto done;
@@ -3992,17 +3849,14 @@ done:
return status;
}
-static int intr_submit(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- struct list_head *qtd_list,
- gfp_t mem_flags
-) {
- unsigned epnum;
- unsigned long flags;
- struct fotg210_qh *qh;
- int status;
- struct list_head empty;
+static int intr_submit(struct fotg210_hcd *fotg210, struct urb *urb,
+ struct list_head *qtd_list, gfp_t mem_flags)
+{
+ unsigned epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh;
+ int status;
+ struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
@@ -4050,11 +3904,11 @@ done_not_linked:
static void scan_intr(struct fotg210_hcd *fotg210)
{
- struct fotg210_qh *qh;
+ struct fotg210_qh *qh;
list_for_each_entry_safe(qh, fotg210->qh_scan_next,
- &fotg210->intr_qh_list, intr_node) {
- rescan:
+ &fotg210->intr_qh_list, intr_node) {
+rescan:
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)) {
int temp;
@@ -4069,7 +3923,7 @@ static void scan_intr(struct fotg210_hcd *fotg210)
temp = qh_completions(fotg210, qh);
if (unlikely(qh->needs_rescan ||
(list_empty(&qh->qtd_list) &&
- qh->qh_state == QH_STATE_LINKED)))
+ qh->qh_state == QH_STATE_LINKED)))
start_unlink_intr(fotg210, qh);
else if (temp != 0)
goto rescan;
@@ -4077,12 +3931,9 @@ static void scan_intr(struct fotg210_hcd *fotg210)
}
}
-/*-------------------------------------------------------------------------*/
-
/* fotg210_iso_stream ops work with both ITD and SITD */
-static struct fotg210_iso_stream *
-iso_stream_alloc(gfp_t mem_flags)
+static struct fotg210_iso_stream *iso_stream_alloc(gfp_t mem_flags)
{
struct fotg210_iso_stream *stream;
@@ -4095,20 +3946,15 @@ iso_stream_alloc(gfp_t mem_flags)
return stream;
}
-static void
-iso_stream_init(
- struct fotg210_hcd *fotg210,
- struct fotg210_iso_stream *stream,
- struct usb_device *dev,
- int pipe,
- unsigned interval
-)
+static void iso_stream_init(struct fotg210_hcd *fotg210,
+ struct fotg210_iso_stream *stream, struct usb_device *dev,
+ int pipe, unsigned interval)
{
- u32 buf1;
- unsigned epnum, maxp;
- int is_input;
- long bandwidth;
- unsigned multi;
+ u32 buf1;
+ unsigned epnum, maxp;
+ int is_input;
+ long bandwidth;
+ unsigned multi;
/*
* this might be a "high bandwidth" highspeed endpoint,
@@ -4153,13 +3999,13 @@ iso_stream_init(
stream->maxp = maxp;
}
-static struct fotg210_iso_stream *
-iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb)
+static struct fotg210_iso_stream *iso_stream_find(struct fotg210_hcd *fotg210,
+ struct urb *urb)
{
- unsigned epnum;
- struct fotg210_iso_stream *stream;
+ unsigned epnum;
+ struct fotg210_iso_stream *stream;
struct usb_host_endpoint *ep;
- unsigned long flags;
+ unsigned long flags;
epnum = usb_pipeendpoint(urb->pipe);
if (usb_pipein(urb->pipe))
@@ -4182,8 +4028,8 @@ iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb)
/* if dev->ep[epnum] is a QH, hw is set */
} else if (unlikely(stream->hw != NULL)) {
fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
- urb->dev->devpath, epnum,
- usb_pipein(urb->pipe) ? "in" : "out");
+ urb->dev->devpath, epnum,
+ usb_pipein(urb->pipe) ? "in" : "out");
stream = NULL;
}
@@ -4191,15 +4037,13 @@ iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb)
return stream;
}
-/*-------------------------------------------------------------------------*/
-
/* fotg210_iso_sched ops can be ITD-only or SITD-only */
-static struct fotg210_iso_sched *
-iso_sched_alloc(unsigned packets, gfp_t mem_flags)
+static struct fotg210_iso_sched *iso_sched_alloc(unsigned packets,
+ gfp_t mem_flags)
{
- struct fotg210_iso_sched *iso_sched;
- int size = sizeof(*iso_sched);
+ struct fotg210_iso_sched *iso_sched;
+ int size = sizeof(*iso_sched);
size += packets * sizeof(struct fotg210_iso_packet);
iso_sched = kzalloc(size, mem_flags);
@@ -4209,16 +4053,12 @@ iso_sched_alloc(unsigned packets, gfp_t mem_flags)
return iso_sched;
}
-static inline void
-itd_sched_init(
- struct fotg210_hcd *fotg210,
- struct fotg210_iso_sched *iso_sched,
- struct fotg210_iso_stream *stream,
- struct urb *urb
-)
+static inline void itd_sched_init(struct fotg210_hcd *fotg210,
+ struct fotg210_iso_sched *iso_sched,
+ struct fotg210_iso_stream *stream, struct urb *urb)
{
- unsigned i;
- dma_addr_t dma = urb->transfer_dma;
+ unsigned i;
+ dma_addr_t dma = urb->transfer_dma;
/* how many uframes are needed for these transfers */
iso_sched->span = urb->number_of_packets * stream->interval;
@@ -4227,10 +4067,10 @@ itd_sched_init(
* when we fit new itds into the schedule.
*/
for (i = 0; i < urb->number_of_packets; i++) {
- struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
- unsigned length;
- dma_addr_t buf;
- u32 trans;
+ struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
+ unsigned length;
+ dma_addr_t buf;
+ u32 trans;
length = urb->iso_frame_desc[i].length;
buf = dma + urb->iso_frame_desc[i].offset;
@@ -4251,11 +4091,8 @@ itd_sched_init(
}
}
-static void
-iso_sched_free(
- struct fotg210_iso_stream *stream,
- struct fotg210_iso_sched *iso_sched
-)
+static void iso_sched_free(struct fotg210_iso_stream *stream,
+ struct fotg210_iso_sched *iso_sched)
{
if (!iso_sched)
return;
@@ -4264,20 +4101,15 @@ iso_sched_free(
kfree(iso_sched);
}
-static int
-itd_urb_transaction(
- struct fotg210_iso_stream *stream,
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- gfp_t mem_flags
-)
+static int itd_urb_transaction(struct fotg210_iso_stream *stream,
+ struct fotg210_hcd *fotg210, struct urb *urb, gfp_t mem_flags)
{
- struct fotg210_itd *itd;
- dma_addr_t itd_dma;
- int i;
- unsigned num_itds;
- struct fotg210_iso_sched *sched;
- unsigned long flags;
+ struct fotg210_itd *itd;
+ dma_addr_t itd_dma;
+ int i;
+ unsigned num_itds;
+ struct fotg210_iso_sched *sched;
+ unsigned long flags;
sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
if (unlikely(sched == NULL))
@@ -4306,7 +4138,7 @@ itd_urb_transaction(
list_del(&itd->itd_list);
itd_dma = itd->itd_dma;
} else {
- alloc_itd:
+alloc_itd:
spin_unlock_irqrestore(&fotg210->lock, flags);
itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
&itd_dma);
@@ -4330,16 +4162,8 @@ itd_urb_transaction(
return 0;
}
-/*-------------------------------------------------------------------------*/
-
-static inline int
-itd_slot_ok(
- struct fotg210_hcd *fotg210,
- u32 mod,
- u32 uframe,
- u8 usecs,
- u32 period
-)
+static inline int itd_slot_ok(struct fotg210_hcd *fotg210, u32 mod, u32 uframe,
+ u8 usecs, u32 period)
{
uframe %= period;
do {
@@ -4354,8 +4178,7 @@ itd_slot_ok(
return 1;
}
-/*
- * This scheduler plans almost as far into the future as it has actual
+/* This scheduler plans almost as far into the future as it has actual
* periodic schedule slots. (Affected by TUNE_FLS, which defaults to
* "as small as possible" to be cache-friendlier.) That limits the size
* transfers you can stream reliably; avoid more than 64 msec per urb.
@@ -4365,19 +4188,15 @@ itd_slot_ok(
* given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler!
*/
-#define SCHEDULE_SLOP 80 /* microframes */
+#define SCHEDULE_SLOP 80 /* microframes */
-static int
-iso_stream_schedule(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- struct fotg210_iso_stream *stream
-)
+static int iso_stream_schedule(struct fotg210_hcd *fotg210, struct urb *urb,
+ struct fotg210_iso_stream *stream)
{
- u32 now, next, start, period, span;
- int status;
- unsigned mod = fotg210->periodic_size << 3;
- struct fotg210_iso_sched *sched = urb->hcpriv;
+ u32 now, next, start, period, span;
+ int status;
+ unsigned mod = fotg210->periodic_size << 3;
+ struct fotg210_iso_sched *sched = urb->hcpriv;
period = urb->interval;
span = sched->span;
@@ -4396,7 +4215,7 @@ iso_stream_schedule(
* slot in the schedule, implicitly assuming URB_ISO_ASAP.
*/
if (likely(!list_empty(&stream->td_list))) {
- u32 excess;
+ u32 excess;
/* For high speed devices, allow scheduling within the
* isochronous scheduling threshold. For full speed devices
@@ -4435,6 +4254,7 @@ iso_stream_schedule(
*/
else {
int done = 0;
+
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -4457,15 +4277,15 @@ iso_stream_schedule(
/* no room in the schedule */
if (!done) {
fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
- urb, now, now + mod);
+ urb, now, now + mod);
status = -ENOSPC;
goto fail;
}
}
/* Tried to schedule too far into the future? */
- if (unlikely(start - now + span - period
- >= mod - 2 * SCHEDULE_SLOP)) {
+ if (unlikely(start - now + span - period >=
+ mod - 2 * SCHEDULE_SLOP)) {
fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
urb, start - now, span - period,
mod - 2 * SCHEDULE_SLOP);
@@ -4485,17 +4305,14 @@ iso_stream_schedule(
fotg210->next_frame = now >> 3;
return 0;
- fail:
+fail:
iso_sched_free(stream, sched);
urb->hcpriv = NULL;
return status;
}
-/*-------------------------------------------------------------------------*/
-
-static inline void
-itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream,
- struct fotg210_itd *itd)
+static inline void itd_init(struct fotg210_hcd *fotg210,
+ struct fotg210_iso_stream *stream, struct fotg210_itd *itd)
{
int i;
@@ -4511,17 +4328,12 @@ itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream,
/* All other fields are filled when scheduling */
}
-static inline void
-itd_patch(
- struct fotg210_hcd *fotg210,
- struct fotg210_itd *itd,
- struct fotg210_iso_sched *iso_sched,
- unsigned index,
- u16 uframe
-)
+static inline void itd_patch(struct fotg210_hcd *fotg210,
+ struct fotg210_itd *itd, struct fotg210_iso_sched *iso_sched,
+ unsigned index, u16 uframe)
{
- struct fotg210_iso_packet *uf = &iso_sched->packet[index];
- unsigned pg = itd->pg;
+ struct fotg210_iso_packet *uf = &iso_sched->packet[index];
+ unsigned pg = itd->pg;
uframe &= 0x07;
itd->index[uframe] = index;
@@ -4533,7 +4345,7 @@ itd_patch(
/* iso_frame_desc[].offset must be strictly increasing */
if (unlikely(uf->cross)) {
- u64 bufp = uf->bufp + 4096;
+ u64 bufp = uf->bufp + 4096;
itd->pg = ++pg;
itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
@@ -4541,13 +4353,13 @@ itd_patch(
}
}
-static inline void
-itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd)
+static inline void itd_link(struct fotg210_hcd *fotg210, unsigned frame,
+ struct fotg210_itd *itd)
{
- union fotg210_shadow *prev = &fotg210->pshadow[frame];
- __hc32 *hw_p = &fotg210->periodic[frame];
- union fotg210_shadow here = *prev;
- __hc32 type = 0;
+ union fotg210_shadow *prev = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
/* skip any iso nodes which might belong to previous microframes */
while (here.ptr) {
@@ -4568,17 +4380,13 @@ itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd)
}
/* fit urb's itds into the selected schedule slot; activate as needed */
-static void itd_link_urb(
- struct fotg210_hcd *fotg210,
- struct urb *urb,
- unsigned mod,
- struct fotg210_iso_stream *stream
-)
-{
- int packet;
- unsigned next_uframe, uframe, frame;
- struct fotg210_iso_sched *iso_sched = urb->hcpriv;
- struct fotg210_itd *itd;
+static void itd_link_urb(struct fotg210_hcd *fotg210, struct urb *urb,
+ unsigned mod, struct fotg210_iso_stream *stream)
+{
+ int packet;
+ unsigned next_uframe, uframe, frame;
+ struct fotg210_iso_sched *iso_sched = urb->hcpriv;
+ struct fotg210_itd *itd;
next_uframe = stream->next_uframe & (mod - 1);
@@ -4621,7 +4429,7 @@ static void itd_link_urb(
if (((next_uframe >> 3) != frame)
|| packet == urb->number_of_packets) {
itd_link(fotg210, frame & (fotg210->periodic_size - 1),
- itd);
+ itd);
itd = NULL;
}
}
@@ -4635,8 +4443,8 @@ static void itd_link_urb(
enable_periodic(fotg210);
}
-#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
- FOTG210_ISOC_XACTERR)
+#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
+ FOTG210_ISOC_XACTERR)
/* Process and recycle a completed ITD. Return true iff its urb completed,
* and hence its completion callback probably added things to the hardware
@@ -4650,14 +4458,14 @@ static void itd_link_urb(
*/
static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
{
- struct urb *urb = itd->urb;
- struct usb_iso_packet_descriptor *desc;
- u32 t;
- unsigned uframe;
- int urb_index = -1;
- struct fotg210_iso_stream *stream = itd->stream;
- struct usb_device *dev;
- bool retval = false;
+ struct urb *urb = itd->urb;
+ struct usb_iso_packet_descriptor *desc;
+ u32 t;
+ unsigned uframe;
+ int urb_index = -1;
+ struct fotg210_iso_stream *stream = itd->stream;
+ struct usb_device *dev;
+ bool retval = false;
/* for each uframe with a packet */
for (uframe = 0; uframe < 8; uframe++) {
@@ -4702,8 +4510,8 @@ static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
goto done;
/* ASSERT: it's really the last itd for this urb
- list_for_each_entry (itd, &stream->td_list, itd_list)
- BUG_ON (itd->urb == urb);
+ * list_for_each_entry (itd, &stream->td_list, itd_list)
+ * BUG_ON (itd->urb == urb);
*/
/* give urb back to the driver; completion often (re)submits */
@@ -4740,14 +4548,12 @@ done:
return retval;
}
-/*-------------------------------------------------------------------------*/
-
static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
- gfp_t mem_flags)
+ gfp_t mem_flags)
{
- int status = -EINVAL;
- unsigned long flags;
- struct fotg210_iso_stream *stream;
+ int status = -EINVAL;
+ unsigned long flags;
+ struct fotg210_iso_stream *stream;
/* Get iso_stream head */
stream = iso_stream_find(fotg210, urb);
@@ -4756,22 +4562,22 @@ static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
return -ENOMEM;
}
if (unlikely(urb->interval != stream->interval &&
- fotg210_port_speed(fotg210, 0) ==
- USB_PORT_STAT_HIGH_SPEED)) {
- fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
+ fotg210_port_speed(fotg210, 0) ==
+ USB_PORT_STAT_HIGH_SPEED)) {
+ fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
stream->interval, urb->interval);
- goto done;
+ goto done;
}
#ifdef FOTG210_URB_TRACE
fotg210_dbg(fotg210,
- "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
- __func__, urb->dev->devpath, urb,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->transfer_buffer_length,
- urb->number_of_packets, urb->interval,
- stream);
+ "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->transfer_buffer_length,
+ urb->number_of_packets, urb->interval,
+ stream);
#endif
/* allocate ITDs w/o locking anything */
@@ -4795,19 +4601,87 @@ static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
else
usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
- done_not_linked:
+done_not_linked:
spin_unlock_irqrestore(&fotg210->lock, flags);
- done:
+done:
return status;
}
-/*-------------------------------------------------------------------------*/
+static inline int scan_frame_queue(struct fotg210_hcd *fotg210, unsigned frame,
+ unsigned now_frame, bool live)
+{
+ unsigned uf;
+ bool modified;
+ union fotg210_shadow q, *q_p;
+ __hc32 type, *hw_p;
+
+ /* scan each element in frame's queue for completions */
+ q_p = &fotg210->pshadow[frame];
+ hw_p = &fotg210->periodic[frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ modified = false;
+
+ while (q.ptr) {
+ switch (hc32_to_cpu(fotg210, type)) {
+ case Q_TYPE_ITD:
+ /* If this ITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (frame == now_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(fotg210))
+ break;
+ }
+ if (uf < 8) {
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210,
+ q.itd->hw_next);
+ q = *q_p;
+ break;
+ }
+ }
+
+ /* Take finished ITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion. HC won't cache the
+ * pointer for much longer, if at all.
+ */
+ *q_p = q.itd->itd_next;
+ *hw_p = q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
+ wmb();
+ modified = itd_complete(fotg210, q.itd);
+ q = *q_p;
+ break;
+ default:
+ fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
+ type, frame, q.ptr);
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
+ q.ptr = NULL;
+ break;
+ }
+
+ /* assume completion callbacks modify the queue */
+ if (unlikely(modified && fotg210->isoc_count > 0))
+ return -EINVAL;
+ }
+ return 0;
+}
static void scan_isoc(struct fotg210_hcd *fotg210)
{
- unsigned uf, now_frame, frame;
- unsigned fmask = fotg210->periodic_size - 1;
- bool modified, live;
+ unsigned uf, now_frame, frame, ret;
+ unsigned fmask = fotg210->periodic_size - 1;
+ bool live;
/*
* When running, scan from last scan point up to "now"
@@ -4826,69 +4700,10 @@ static void scan_isoc(struct fotg210_hcd *fotg210)
frame = fotg210->next_frame;
for (;;) {
- union fotg210_shadow q, *q_p;
- __hc32 type, *hw_p;
-
-restart:
- /* scan each element in frame's queue for completions */
- q_p = &fotg210->pshadow[frame];
- hw_p = &fotg210->periodic[frame];
- q.ptr = q_p->ptr;
- type = Q_NEXT_TYPE(fotg210, *hw_p);
- modified = false;
-
- while (q.ptr != NULL) {
- switch (hc32_to_cpu(fotg210, type)) {
- case Q_TYPE_ITD:
- /* If this ITD is still active, leave it for
- * later processing ... check the next entry.
- * No need to check for activity unless the
- * frame is current.
- */
- if (frame == now_frame && live) {
- rmb();
- for (uf = 0; uf < 8; uf++) {
- if (q.itd->hw_transaction[uf] &
- ITD_ACTIVE(fotg210))
- break;
- }
- if (uf < 8) {
- q_p = &q.itd->itd_next;
- hw_p = &q.itd->hw_next;
- type = Q_NEXT_TYPE(fotg210,
- q.itd->hw_next);
- q = *q_p;
- break;
- }
- }
-
- /* Take finished ITDs out of the schedule
- * and process them: recycle, maybe report
- * URB completion. HC won't cache the
- * pointer for much longer, if at all.
- */
- *q_p = q.itd->itd_next;
- *hw_p = q.itd->hw_next;
- type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
- wmb();
- modified = itd_complete(fotg210, q.itd);
- q = *q_p;
- break;
- default:
- fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
- type, frame, q.ptr);
- /* FALL THROUGH */
- case Q_TYPE_QH:
- case Q_TYPE_FSTN:
- /* End of the iTDs and siTDs */
- q.ptr = NULL;
- break;
- }
-
- /* assume completion callbacks modify the queue */
- if (unlikely(modified && fotg210->isoc_count > 0))
- goto restart;
- }
+ ret = 1;
+ while (ret != 0)
+ ret = scan_frame_queue(fotg210, frame,
+ now_frame, live);
/* Stop when we have reached the current frame */
if (frame == now_frame)
@@ -4897,16 +4712,14 @@ restart:
}
fotg210->next_frame = now_frame;
}
-/*-------------------------------------------------------------------------*/
-/*
- * Display / Set uframe_periodic_max
+
+/* Display / Set uframe_periodic_max
*/
static ssize_t show_uframe_periodic_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct fotg210_hcd *fotg210;
- int n;
+ struct fotg210_hcd *fotg210;
+ int n;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
@@ -4915,15 +4728,14 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
static ssize_t store_uframe_periodic_max(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+ struct device_attribute *attr, const char *buf, size_t count)
{
- struct fotg210_hcd *fotg210;
- unsigned uframe_periodic_max;
- unsigned frame, uframe;
- unsigned short allocated_max;
- unsigned long flags;
- ssize_t ret;
+ struct fotg210_hcd *fotg210;
+ unsigned uframe_periodic_max;
+ unsigned frame, uframe;
+ unsigned short allocated_max;
+ unsigned long flags;
+ ssize_t ret;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
@@ -4931,7 +4743,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
- uframe_periodic_max);
+ uframe_periodic_max);
return -EINVAL;
}
@@ -4954,22 +4766,22 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
for (frame = 0; frame < fotg210->periodic_size; ++frame)
for (uframe = 0; uframe < 7; ++uframe)
allocated_max = max(allocated_max,
- periodic_usecs(fotg210, frame, uframe));
+ periodic_usecs(fotg210, frame,
+ uframe));
if (allocated_max > uframe_periodic_max) {
fotg210_info(fotg210,
- "cannot decrease uframe_periodic_max because "
- "periodic bandwidth is already allocated "
- "(%u > %u)\n",
- allocated_max, uframe_periodic_max);
+ "cannot decrease uframe_periodic_max because periodic bandwidth is already allocated (%u > %u)\n",
+ allocated_max, uframe_periodic_max);
goto out_unlock;
}
}
/* increasing is always ok */
- fotg210_info(fotg210, "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
- 100 * uframe_periodic_max/125, uframe_periodic_max);
+ fotg210_info(fotg210,
+ "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
+ 100 * uframe_periodic_max/125, uframe_periodic_max);
if (uframe_periodic_max != 100)
fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
@@ -4987,8 +4799,8 @@ static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max,
static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
{
- struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
- int i = 0;
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+ int i = 0;
if (i)
goto out;
@@ -5000,12 +4812,10 @@ out:
static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
{
- struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
device_remove_file(controller, &dev_attr_uframe_periodic_max);
}
-/*-------------------------------------------------------------------------*/
-
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
@@ -5017,8 +4827,7 @@ static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
}
-/*
- * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+/* Halt HC, turn off all ports, and let the BIOS use the companion controllers.
* Must be called with interrupts enabled and the lock not held.
*/
static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
@@ -5037,7 +4846,7 @@ static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
*/
static void fotg210_shutdown(struct usb_hcd *hcd)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
spin_lock_irq(&fotg210->lock);
fotg210->shutdown = true;
@@ -5050,10 +4859,7 @@ static void fotg210_shutdown(struct usb_hcd *hcd)
hrtimer_cancel(&fotg210->hrtimer);
}
-/*-------------------------------------------------------------------------*/
-
-/*
- * fotg210_work is called from some interrupts, timers, and so on.
+/* fotg210_work is called from some interrupts, timers, and so on.
* it calls driver completion functions, after dropping fotg210->lock.
*/
static void fotg210_work(struct fotg210_hcd *fotg210)
@@ -5068,7 +4874,7 @@ static void fotg210_work(struct fotg210_hcd *fotg210)
}
fotg210->scanning = true;
- rescan:
+rescan:
fotg210->need_rescan = false;
if (fotg210->async_count)
scan_async(fotg210);
@@ -5087,12 +4893,11 @@ static void fotg210_work(struct fotg210_hcd *fotg210)
turn_on_io_watchdog(fotg210);
}
-/*
- * Called when the fotg210_hcd module is removed.
+/* Called when the fotg210_hcd module is removed.
*/
static void fotg210_stop(struct usb_hcd *hcd)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
fotg210_dbg(fotg210, "stop\n");
@@ -5116,26 +4921,26 @@ static void fotg210_stop(struct usb_hcd *hcd)
spin_unlock_irq(&fotg210->lock);
fotg210_mem_cleanup(fotg210);
-#ifdef FOTG210_STATS
+#ifdef FOTG210_STATS
fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
- fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
- fotg210->stats.lost_iaa);
+ fotg210->stats.normal, fotg210->stats.error,
+ fotg210->stats.iaa, fotg210->stats.lost_iaa);
fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
- fotg210->stats.complete, fotg210->stats.unlink);
+ fotg210->stats.complete, fotg210->stats.unlink);
#endif
dbg_status(fotg210, "fotg210_stop completed",
- fotg210_readl(fotg210, &fotg210->regs->status));
+ fotg210_readl(fotg210, &fotg210->regs->status));
}
/* one-time init, only for memory state */
static int hcd_fotg210_init(struct usb_hcd *hcd)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- u32 temp;
- int retval;
- u32 hcc_params;
- struct fotg210_qh_hw *hw;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ int retval;
+ u32 hcc_params;
+ struct fotg210_qh_hw *hw;
spin_lock_init(&fotg210->lock);
@@ -5238,18 +5043,18 @@ static int hcd_fotg210_init(struct usb_hcd *hcd)
/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
static int fotg210_run(struct usb_hcd *hcd)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- u32 temp;
- u32 hcc_params;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ u32 hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
fotg210_writel(fotg210, fotg210->periodic_dma,
- &fotg210->regs->frame_list);
+ &fotg210->regs->frame_list);
fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
- &fotg210->regs->async_next);
+ &fotg210->regs->async_next);
/*
* hcc_params controls whether fotg210->regs->segment must (!!!)
@@ -5292,19 +5097,19 @@ static int fotg210_run(struct usb_hcd *hcd)
fotg210->rh_state = FOTG210_RH_RUNNING;
/* unblock posted writes */
fotg210_readl(fotg210, &fotg210->regs->command);
- msleep(5);
+ usleep_range(5000, 10000);
up_write(&ehci_cf_port_reset_rwsem);
fotg210->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(fotg210,
- fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
fotg210_info(fotg210,
- "USB %x.%x started, EHCI %x.%02x\n",
- ((fotg210->sbrn & 0xf0)>>4), (fotg210->sbrn & 0x0f),
- temp >> 8, temp & 0xff);
+ "USB %x.%x started, EHCI %x.%02x\n",
+ ((fotg210->sbrn & 0xf0) >> 4), (fotg210->sbrn & 0x0f),
+ temp >> 8, temp & 0xff);
fotg210_writel(fotg210, INTR_MASK,
- &fotg210->regs->intr_enable); /* Turn On Interrupts */
+ &fotg210->regs->intr_enable); /* Turn On Interrupts */
/* GRR this is run-once init(), being done every time the HC starts.
* So long as they're part of class devices, we can't do it init()
@@ -5322,14 +5127,14 @@ static int fotg210_setup(struct usb_hcd *hcd)
int retval;
fotg210->regs = (void __iomem *)fotg210->caps +
- HC_LENGTH(fotg210,
- fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ HC_LENGTH(fotg210,
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
dbg_hcs_params(fotg210, "reset");
dbg_hcc_params(fotg210, "reset");
/* cache this readonly data; minimize chip reads */
fotg210->hcs_params = fotg210_readl(fotg210,
- &fotg210->caps->hcs_params);
+ &fotg210->caps->hcs_params);
fotg210->sbrn = HCD_USB2;
@@ -5347,13 +5152,11 @@ static int fotg210_setup(struct usb_hcd *hcd)
return 0;
}
-/*-------------------------------------------------------------------------*/
-
static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- u32 status, masked_status, pcd_status = 0, cmd;
- int bh;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
spin_lock(&fotg210->lock);
@@ -5373,7 +5176,7 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
/* Shared IRQ? */
if (!masked_status ||
- unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
+ unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
spin_unlock(&fotg210->lock);
return IRQ_NONE;
}
@@ -5440,7 +5243,7 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
if (test_bit(0, &fotg210->suspended_ports) &&
((pstatus & PORT_RESUME) ||
- !(pstatus & PORT_SUSPEND)) &&
+ !(pstatus & PORT_SUSPEND)) &&
(pstatus & PORT_PE) &&
fotg210->reset_done[0] == 0) {
@@ -5469,7 +5272,7 @@ dead:
fotg210->rh_state = FOTG210_RH_STOPPING;
fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
fotg210_writel(fotg210, fotg210->command,
- &fotg210->regs->command);
+ &fotg210->regs->command);
fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
fotg210_handle_controller_death(fotg210);
@@ -5485,10 +5288,7 @@ dead:
return IRQ_HANDLED;
}
-/*-------------------------------------------------------------------------*/
-
-/*
- * non-error returns are a promise to giveback() the urb later
+/* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
@@ -5499,13 +5299,11 @@ dead:
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
-static int fotg210_urb_enqueue(
- struct usb_hcd *hcd,
- struct urb *urb,
- gfp_t mem_flags
-) {
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- struct list_head qtd_list;
+static int fotg210_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct list_head qtd_list;
INIT_LIST_HEAD(&qtd_list);
@@ -5539,10 +5337,10 @@ static int fotg210_urb_enqueue(
static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- struct fotg210_qh *qh;
- unsigned long flags;
- int rc;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ unsigned long flags;
+ int rc;
spin_lock_irqsave(&fotg210->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
@@ -5603,16 +5401,14 @@ done:
return rc;
}
-/*-------------------------------------------------------------------------*/
-
/* bulk qh holds the data toggle */
-static void
-fotg210_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+static void fotg210_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- unsigned long flags;
- struct fotg210_qh *qh, *tmp;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ unsigned long flags;
+ struct fotg210_qh *qh, *tmp;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
@@ -5627,7 +5423,7 @@ rescan:
* accelerate iso completions ... so spin a while.
*/
if (qh->hw == NULL) {
- struct fotg210_iso_stream *stream = ep->hcpriv;
+ struct fotg210_iso_stream *stream = ep->hcpriv;
if (!list_empty(&stream->td_list))
goto idle_timeout;
@@ -5671,24 +5467,24 @@ idle_timeout:
* that's not our job. just leak this memory.
*/
fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
- qh, ep->desc.bEndpointAddress, qh->qh_state,
- list_empty(&qh->qtd_list) ? "" : "(has tds)");
+ qh, ep->desc.bEndpointAddress, qh->qh_state,
+ list_empty(&qh->qtd_list) ? "" : "(has tds)");
break;
}
- done:
+done:
ep->hcpriv = NULL;
spin_unlock_irqrestore(&fotg210->lock, flags);
}
-static void
-fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+static void fotg210_endpoint_reset(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- struct fotg210_qh *qh;
- int eptype = usb_endpoint_type(&ep->desc);
- int epnum = usb_endpoint_num(&ep->desc);
- int is_out = usb_endpoint_dir_out(&ep->desc);
- unsigned long flags;
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ unsigned long flags;
if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
return;
@@ -5723,15 +5519,13 @@ fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
static int fotg210_get_frame(struct usb_hcd *hcd)
{
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
return (fotg210_read_frame_index(fotg210) >> 3) %
fotg210->periodic_size;
}
-/*-------------------------------------------------------------------------*/
-
-/*
- * The EHCI in ChipIdea HDRC cannot be a separate module or device,
+/* The EHCI in ChipIdea HDRC cannot be a separate module or device,
* because its registers (and irq) are shared between host/gadget/otg
* functions and in order to facilitate role switching we cannot
* give the fotg210 driver exclusive access to those.
@@ -5791,7 +5585,7 @@ static void fotg210_init(struct fotg210_hcd *fotg210)
u32 value;
iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
- &fotg210->regs->gmir);
+ &fotg210->regs->gmir);
value = ioread32(&fotg210->regs->otgcsr);
value &= ~OTGCSR_A_BUS_DROP;
@@ -5808,12 +5602,12 @@ static void fotg210_init(struct fotg210_hcd *fotg210)
*/
static int fotg210_hcd_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd;
- struct resource *res;
- int irq;
- int retval = -ENODEV;
- struct fotg210_hcd *fotg210;
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int retval = -ENODEV;
+ struct fotg210_hcd *fotg210;
if (usb_disabled())
return -ENODEV;
@@ -5822,9 +5616,8 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
- dev_err(dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(dev));
+ dev_err(dev, "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(dev));
return -ENODEV;
}
@@ -5883,8 +5676,8 @@ fail_create_hcd:
*/
static int fotg210_hcd_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
if (!hcd)
return 0;
@@ -5914,12 +5707,12 @@ static int __init fotg210_hcd_init(void)
set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
- pr_warn(KERN_WARNING "Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
+ pr_warn("Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
- hcd_name,
- sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd),
- sizeof(struct fotg210_itd));
+ hcd_name, sizeof(struct fotg210_qh),
+ sizeof(struct fotg210_qtd),
+ sizeof(struct fotg210_itd));
fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
if (!fotg210_debug_root) {
@@ -5932,7 +5725,6 @@ static int __init fotg210_hcd_init(void)
goto clean;
return retval;
- platform_driver_unregister(&fotg210_hcd_driver);
clean:
debugfs_remove(fotg210_debug_root);
fotg210_debug_root = NULL;
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index 3bad17859cd7..b5cfa7aeb277 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -137,19 +137,25 @@ struct fotg210_hcd { /* one per controller */
/* per root hub port */
unsigned long reset_done[FOTG210_MAX_ROOT_PORTS];
- /* bit vectors (one bit per port) */
- unsigned long bus_suspended; /* which ports were
- already suspended at the start of a bus suspend */
- unsigned long companion_ports; /* which ports are
- dedicated to the companion controller */
- unsigned long owned_ports; /* which ports are
- owned by the companion during a bus suspend */
- unsigned long port_c_suspend; /* which ports have
- the change-suspend feature turned on */
- unsigned long suspended_ports; /* which ports are
- suspended */
- unsigned long resuming_ports; /* which ports have
- started to resume */
+ /* bit vectors (one bit per port)
+ * which ports were already suspended at the start of a bus suspend
+ */
+ unsigned long bus_suspended;
+
+ /* which ports are edicated to the companion controller */
+ unsigned long companion_ports;
+
+ /* which ports are owned by the companion during a bus suspend */
+ unsigned long owned_ports;
+
+ /* which ports have the change-suspend feature turned on */
+ unsigned long port_c_suspend;
+
+ /* which ports are suspended */
+ unsigned long suspended_ports;
+
+ /* which ports have started to resume */
+ unsigned long resuming_ports;
/* per-HC memory pools (could be per-bus, but ...) */
struct dma_pool *qh_pool; /* qh per active urb */
@@ -585,10 +591,10 @@ struct fotg210_fstn {
/* Prepare the PORTSC wakeup flags during controller suspend/resume */
#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
- fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup);
+ fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup)
#define fotg210_prepare_ports_for_controller_resume(fotg210) \
- fotg210_adjust_port_wakeup_flags(fotg210, false, false);
+ fotg210_adjust_port_wakeup_flags(fotg210, false, false)
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 534c4c5d278a..0c382652a399 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -351,6 +351,7 @@ static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
#endif
{},
};
+MODULE_DEVICE_TABLE(of, fsl_usb2_mph_dr_of_match);
static struct platform_driver fsl_usb2_mph_dr_driver = {
.driver = {
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
deleted file mode 100644
index 1fd8718a9f11..000000000000
--- a/drivers/usb/host/fusbh200-hcd.c
+++ /dev/null
@@ -1,5894 +0,0 @@
-/*
- * Faraday FUSBH200 EHCI-like driver
- *
- * Copyright (c) 2013 Faraday Technology Corporation
- *
- * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
- * Feng-Hsin Chiang <john453@faraday-tech.com>
- * Po-Yu Chuang <ratbert.chuang@gmail.com>
- *
- * Most of code borrowed from the Linux-3.7 EHCI driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/hrtimer.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-#include <linux/moduleparam.h>
-#include <linux/dma-mapping.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/platform_device.h>
-
-#include <asm/byteorder.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/unaligned.h>
-
-/*-------------------------------------------------------------------------*/
-#define DRIVER_AUTHOR "Yuan-Hsin Chen"
-#define DRIVER_DESC "FUSBH200 Host Controller (EHCI) Driver"
-
-static const char hcd_name [] = "fusbh200_hcd";
-
-#undef FUSBH200_URB_TRACE
-
-/* magic numbers that can affect system performance */
-#define FUSBH200_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
-#define FUSBH200_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
-#define FUSBH200_TUNE_RL_TT 0
-#define FUSBH200_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
-#define FUSBH200_TUNE_MULT_TT 1
-/*
- * Some drivers think it's safe to schedule isochronous transfers more than
- * 256 ms into the future (partly as a result of an old bug in the scheduling
- * code). In an attempt to avoid trouble, we will use a minimum scheduling
- * length of 512 frames instead of 256.
- */
-#define FUSBH200_TUNE_FLS 1 /* (medium) 512-frame schedule */
-
-/* Initial IRQ latency: faster than hw default */
-static int log2_irq_thresh = 0; // 0 to 6
-module_param (log2_irq_thresh, int, S_IRUGO);
-MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
-
-/* initial park setting: slower than hw default */
-static unsigned park = 0;
-module_param (park, uint, S_IRUGO);
-MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
-
-/* for link power management(LPM) feature */
-static unsigned int hird;
-module_param(hird, int, S_IRUGO);
-MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
-
-#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
-
-#include "fusbh200.h"
-
-/*-------------------------------------------------------------------------*/
-
-#define fusbh200_dbg(fusbh200, fmt, args...) \
- dev_dbg (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
-#define fusbh200_err(fusbh200, fmt, args...) \
- dev_err (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
-#define fusbh200_info(fusbh200, fmt, args...) \
- dev_info (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
-#define fusbh200_warn(fusbh200, fmt, args...) \
- dev_warn (fusbh200_to_hcd(fusbh200)->self.controller , fmt , ## args )
-
-/* check the values in the HCSPARAMS register
- * (host controller _Structural_ parameters)
- * see EHCI spec, Table 2-4 for each value
- */
-static void dbg_hcs_params (struct fusbh200_hcd *fusbh200, char *label)
-{
- u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
-
- fusbh200_dbg (fusbh200,
- "%s hcs_params 0x%x ports=%d\n",
- label, params,
- HCS_N_PORTS (params)
- );
-}
-
-/* check the values in the HCCPARAMS register
- * (host controller _Capability_ parameters)
- * see EHCI Spec, Table 2-5 for each value
- * */
-static void dbg_hcc_params (struct fusbh200_hcd *fusbh200, char *label)
-{
- u32 params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
-
- fusbh200_dbg (fusbh200,
- "%s hcc_params %04x uframes %s%s\n",
- label,
- params,
- HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
- HCC_CANPARK(params) ? " park" : "");
-}
-
-static void __maybe_unused
-dbg_qtd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
-{
- fusbh200_dbg(fusbh200, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
- hc32_to_cpup(fusbh200, &qtd->hw_next),
- hc32_to_cpup(fusbh200, &qtd->hw_alt_next),
- hc32_to_cpup(fusbh200, &qtd->hw_token),
- hc32_to_cpup(fusbh200, &qtd->hw_buf [0]));
- if (qtd->hw_buf [1])
- fusbh200_dbg(fusbh200, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
- hc32_to_cpup(fusbh200, &qtd->hw_buf[1]),
- hc32_to_cpup(fusbh200, &qtd->hw_buf[2]),
- hc32_to_cpup(fusbh200, &qtd->hw_buf[3]),
- hc32_to_cpup(fusbh200, &qtd->hw_buf[4]));
-}
-
-static void __maybe_unused
-dbg_qh (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- struct fusbh200_qh_hw *hw = qh->hw;
-
- fusbh200_dbg (fusbh200, "%s qh %p n%08x info %x %x qtd %x\n", label,
- qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
- dbg_qtd("overlay", fusbh200, (struct fusbh200_qtd *) &hw->hw_qtd_next);
-}
-
-static void __maybe_unused
-dbg_itd (const char *label, struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
-{
- fusbh200_dbg (fusbh200, "%s [%d] itd %p, next %08x, urb %p\n",
- label, itd->frame, itd, hc32_to_cpu(fusbh200, itd->hw_next),
- itd->urb);
- fusbh200_dbg (fusbh200,
- " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- hc32_to_cpu(fusbh200, itd->hw_transaction[0]),
- hc32_to_cpu(fusbh200, itd->hw_transaction[1]),
- hc32_to_cpu(fusbh200, itd->hw_transaction[2]),
- hc32_to_cpu(fusbh200, itd->hw_transaction[3]),
- hc32_to_cpu(fusbh200, itd->hw_transaction[4]),
- hc32_to_cpu(fusbh200, itd->hw_transaction[5]),
- hc32_to_cpu(fusbh200, itd->hw_transaction[6]),
- hc32_to_cpu(fusbh200, itd->hw_transaction[7]));
- fusbh200_dbg (fusbh200,
- " buf: %08x %08x %08x %08x %08x %08x %08x\n",
- hc32_to_cpu(fusbh200, itd->hw_bufp[0]),
- hc32_to_cpu(fusbh200, itd->hw_bufp[1]),
- hc32_to_cpu(fusbh200, itd->hw_bufp[2]),
- hc32_to_cpu(fusbh200, itd->hw_bufp[3]),
- hc32_to_cpu(fusbh200, itd->hw_bufp[4]),
- hc32_to_cpu(fusbh200, itd->hw_bufp[5]),
- hc32_to_cpu(fusbh200, itd->hw_bufp[6]));
- fusbh200_dbg (fusbh200, " index: %d %d %d %d %d %d %d %d\n",
- itd->index[0], itd->index[1], itd->index[2],
- itd->index[3], itd->index[4], itd->index[5],
- itd->index[6], itd->index[7]);
-}
-
-static int __maybe_unused
-dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
-{
- return scnprintf (buf, len,
- "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
- label, label [0] ? " " : "", status,
- (status & STS_ASS) ? " Async" : "",
- (status & STS_PSS) ? " Periodic" : "",
- (status & STS_RECL) ? " Recl" : "",
- (status & STS_HALT) ? " Halt" : "",
- (status & STS_IAA) ? " IAA" : "",
- (status & STS_FATAL) ? " FATAL" : "",
- (status & STS_FLR) ? " FLR" : "",
- (status & STS_PCD) ? " PCD" : "",
- (status & STS_ERR) ? " ERR" : "",
- (status & STS_INT) ? " INT" : ""
- );
-}
-
-static int __maybe_unused
-dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
-{
- return scnprintf (buf, len,
- "%s%sintrenable %02x%s%s%s%s%s%s",
- label, label [0] ? " " : "", enable,
- (enable & STS_IAA) ? " IAA" : "",
- (enable & STS_FATAL) ? " FATAL" : "",
- (enable & STS_FLR) ? " FLR" : "",
- (enable & STS_PCD) ? " PCD" : "",
- (enable & STS_ERR) ? " ERR" : "",
- (enable & STS_INT) ? " INT" : ""
- );
-}
-
-static const char *const fls_strings [] =
- { "1024", "512", "256", "??" };
-
-static int
-dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
-{
- return scnprintf (buf, len,
- "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
- "period=%s%s %s",
- label, label [0] ? " " : "", command,
- (command & CMD_PARK) ? " park" : "(park)",
- CMD_PARK_CNT (command),
- (command >> 16) & 0x3f,
- (command & CMD_IAAD) ? " IAAD" : "",
- (command & CMD_ASE) ? " Async" : "",
- (command & CMD_PSE) ? " Periodic" : "",
- fls_strings [(command >> 2) & 0x3],
- (command & CMD_RESET) ? " Reset" : "",
- (command & CMD_RUN) ? "RUN" : "HALT"
- );
-}
-
-static int
-dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
-{
- char *sig;
-
- /* signaling state */
- switch (status & (3 << 10)) {
- case 0 << 10: sig = "se0"; break;
- case 1 << 10: sig = "k"; break; /* low speed */
- case 2 << 10: sig = "j"; break;
- default: sig = "?"; break;
- }
-
- return scnprintf (buf, len,
- "%s%sport:%d status %06x %d "
- "sig=%s%s%s%s%s%s%s%s",
- label, label [0] ? " " : "", port, status,
- status>>25,/*device address */
- sig,
- (status & PORT_RESET) ? " RESET" : "",
- (status & PORT_SUSPEND) ? " SUSPEND" : "",
- (status & PORT_RESUME) ? " RESUME" : "",
- (status & PORT_PEC) ? " PEC" : "",
- (status & PORT_PE) ? " PE" : "",
- (status & PORT_CSC) ? " CSC" : "",
- (status & PORT_CONNECT) ? " CONNECT" : "");
-}
-
-/* functions have the "wrong" filename when they're output... */
-#define dbg_status(fusbh200, label, status) { \
- char _buf [80]; \
- dbg_status_buf (_buf, sizeof _buf, label, status); \
- fusbh200_dbg (fusbh200, "%s\n", _buf); \
-}
-
-#define dbg_cmd(fusbh200, label, command) { \
- char _buf [80]; \
- dbg_command_buf (_buf, sizeof _buf, label, command); \
- fusbh200_dbg (fusbh200, "%s\n", _buf); \
-}
-
-#define dbg_port(fusbh200, label, port, status) { \
- char _buf [80]; \
- dbg_port_buf (_buf, sizeof _buf, label, port, status); \
- fusbh200_dbg (fusbh200, "%s\n", _buf); \
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* troubleshooting help: expose state in debugfs */
-
-static int debug_async_open(struct inode *, struct file *);
-static int debug_periodic_open(struct inode *, struct file *);
-static int debug_registers_open(struct inode *, struct file *);
-static int debug_async_open(struct inode *, struct file *);
-
-static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
-static int debug_close(struct inode *, struct file *);
-
-static const struct file_operations debug_async_fops = {
- .owner = THIS_MODULE,
- .open = debug_async_open,
- .read = debug_output,
- .release = debug_close,
- .llseek = default_llseek,
-};
-static const struct file_operations debug_periodic_fops = {
- .owner = THIS_MODULE,
- .open = debug_periodic_open,
- .read = debug_output,
- .release = debug_close,
- .llseek = default_llseek,
-};
-static const struct file_operations debug_registers_fops = {
- .owner = THIS_MODULE,
- .open = debug_registers_open,
- .read = debug_output,
- .release = debug_close,
- .llseek = default_llseek,
-};
-
-static struct dentry *fusbh200_debug_root;
-
-struct debug_buffer {
- ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
- struct usb_bus *bus;
- struct mutex mutex; /* protect filling of buffer */
- size_t count; /* number of characters filled into buffer */
- char *output_buf;
- size_t alloc_size;
-};
-
-#define speed_char(info1) ({ char tmp; \
- switch (info1 & (3 << 12)) { \
- case QH_FULL_SPEED: tmp = 'f'; break; \
- case QH_LOW_SPEED: tmp = 'l'; break; \
- case QH_HIGH_SPEED: tmp = 'h'; break; \
- default: tmp = '?'; break; \
- } tmp; })
-
-static inline char token_mark(struct fusbh200_hcd *fusbh200, __hc32 token)
-{
- __u32 v = hc32_to_cpu(fusbh200, token);
-
- if (v & QTD_STS_ACTIVE)
- return '*';
- if (v & QTD_STS_HALT)
- return '-';
- if (!IS_SHORT_READ (v))
- return ' ';
- /* tries to advance through hw_alt_next */
- return '/';
-}
-
-static void qh_lines (
- struct fusbh200_hcd *fusbh200,
- struct fusbh200_qh *qh,
- char **nextp,
- unsigned *sizep
-)
-{
- u32 scratch;
- u32 hw_curr;
- struct fusbh200_qtd *td;
- unsigned temp;
- unsigned size = *sizep;
- char *next = *nextp;
- char mark;
- __le32 list_end = FUSBH200_LIST_END(fusbh200);
- struct fusbh200_qh_hw *hw = qh->hw;
-
- if (hw->hw_qtd_next == list_end) /* NEC does this */
- mark = '@';
- else
- mark = token_mark(fusbh200, hw->hw_token);
- if (mark == '/') { /* qh_alt_next controls qh advance? */
- if ((hw->hw_alt_next & QTD_MASK(fusbh200))
- == fusbh200->async->hw->hw_alt_next)
- mark = '#'; /* blocked */
- else if (hw->hw_alt_next == list_end)
- mark = '.'; /* use hw_qtd_next */
- /* else alt_next points to some other qtd */
- }
- scratch = hc32_to_cpup(fusbh200, &hw->hw_info1);
- hw_curr = (mark == '*') ? hc32_to_cpup(fusbh200, &hw->hw_current) : 0;
- temp = scnprintf (next, size,
- "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
- qh, scratch & 0x007f,
- speed_char (scratch),
- (scratch >> 8) & 0x000f,
- scratch, hc32_to_cpup(fusbh200, &hw->hw_info2),
- hc32_to_cpup(fusbh200, &hw->hw_token), mark,
- (cpu_to_hc32(fusbh200, QTD_TOGGLE) & hw->hw_token)
- ? "data1" : "data0",
- (hc32_to_cpup(fusbh200, &hw->hw_alt_next) >> 1) & 0x0f);
- size -= temp;
- next += temp;
-
- /* hc may be modifying the list as we read it ... */
- list_for_each_entry(td, &qh->qtd_list, qtd_list) {
- scratch = hc32_to_cpup(fusbh200, &td->hw_token);
- mark = ' ';
- if (hw_curr == td->qtd_dma)
- mark = '*';
- else if (hw->hw_qtd_next == cpu_to_hc32(fusbh200, td->qtd_dma))
- mark = '+';
- else if (QTD_LENGTH (scratch)) {
- if (td->hw_alt_next == fusbh200->async->hw->hw_alt_next)
- mark = '#';
- else if (td->hw_alt_next != list_end)
- mark = '/';
- }
- temp = snprintf (next, size,
- "\n\t%p%c%s len=%d %08x urb %p",
- td, mark, ({ char *tmp;
- switch ((scratch>>8)&0x03) {
- case 0: tmp = "out"; break;
- case 1: tmp = "in"; break;
- case 2: tmp = "setup"; break;
- default: tmp = "?"; break;
- } tmp;}),
- (scratch >> 16) & 0x7fff,
- scratch,
- td->urb);
- if (size < temp)
- temp = size;
- size -= temp;
- next += temp;
- if (temp == size)
- goto done;
- }
-
- temp = snprintf (next, size, "\n");
- if (size < temp)
- temp = size;
- size -= temp;
- next += temp;
-
-done:
- *sizep = size;
- *nextp = next;
-}
-
-static ssize_t fill_async_buffer(struct debug_buffer *buf)
-{
- struct usb_hcd *hcd;
- struct fusbh200_hcd *fusbh200;
- unsigned long flags;
- unsigned temp, size;
- char *next;
- struct fusbh200_qh *qh;
-
- hcd = bus_to_hcd(buf->bus);
- fusbh200 = hcd_to_fusbh200 (hcd);
- next = buf->output_buf;
- size = buf->alloc_size;
-
- *next = 0;
-
- /* dumps a snapshot of the async schedule.
- * usually empty except for long-term bulk reads, or head.
- * one QH per line, and TDs we know about
- */
- spin_lock_irqsave (&fusbh200->lock, flags);
- for (qh = fusbh200->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
- qh_lines (fusbh200, qh, &next, &size);
- if (fusbh200->async_unlink && size > 0) {
- temp = scnprintf(next, size, "\nunlink =\n");
- size -= temp;
- next += temp;
-
- for (qh = fusbh200->async_unlink; size > 0 && qh;
- qh = qh->unlink_next)
- qh_lines (fusbh200, qh, &next, &size);
- }
- spin_unlock_irqrestore (&fusbh200->lock, flags);
-
- return strlen(buf->output_buf);
-}
-
-#define DBG_SCHED_LIMIT 64
-static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
-{
- struct usb_hcd *hcd;
- struct fusbh200_hcd *fusbh200;
- unsigned long flags;
- union fusbh200_shadow p, *seen;
- unsigned temp, size, seen_count;
- char *next;
- unsigned i;
- __hc32 tag;
-
- seen = kmalloc(DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC);
- if (!seen)
- return 0;
- seen_count = 0;
-
- hcd = bus_to_hcd(buf->bus);
- fusbh200 = hcd_to_fusbh200 (hcd);
- next = buf->output_buf;
- size = buf->alloc_size;
-
- temp = scnprintf (next, size, "size = %d\n", fusbh200->periodic_size);
- size -= temp;
- next += temp;
-
- /* dump a snapshot of the periodic schedule.
- * iso changes, interrupt usually doesn't.
- */
- spin_lock_irqsave (&fusbh200->lock, flags);
- for (i = 0; i < fusbh200->periodic_size; i++) {
- p = fusbh200->pshadow [i];
- if (likely (!p.ptr))
- continue;
- tag = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [i]);
-
- temp = scnprintf (next, size, "%4d: ", i);
- size -= temp;
- next += temp;
-
- do {
- struct fusbh200_qh_hw *hw;
-
- switch (hc32_to_cpu(fusbh200, tag)) {
- case Q_TYPE_QH:
- hw = p.qh->hw;
- temp = scnprintf (next, size, " qh%d-%04x/%p",
- p.qh->period,
- hc32_to_cpup(fusbh200,
- &hw->hw_info2)
- /* uframe masks */
- & (QH_CMASK | QH_SMASK),
- p.qh);
- size -= temp;
- next += temp;
- /* don't repeat what follows this qh */
- for (temp = 0; temp < seen_count; temp++) {
- if (seen [temp].ptr != p.ptr)
- continue;
- if (p.qh->qh_next.ptr) {
- temp = scnprintf (next, size,
- " ...");
- size -= temp;
- next += temp;
- }
- break;
- }
- /* show more info the first time around */
- if (temp == seen_count) {
- u32 scratch = hc32_to_cpup(fusbh200,
- &hw->hw_info1);
- struct fusbh200_qtd *qtd;
- char *type = "";
-
- /* count tds, get ep direction */
- temp = 0;
- list_for_each_entry (qtd,
- &p.qh->qtd_list,
- qtd_list) {
- temp++;
- switch (0x03 & (hc32_to_cpu(
- fusbh200,
- qtd->hw_token) >> 8)) {
- case 0: type = "out"; continue;
- case 1: type = "in"; continue;
- }
- }
-
- temp = scnprintf (next, size,
- " (%c%d ep%d%s "
- "[%d/%d] q%d p%d)",
- speed_char (scratch),
- scratch & 0x007f,
- (scratch >> 8) & 0x000f, type,
- p.qh->usecs, p.qh->c_usecs,
- temp,
- 0x7ff & (scratch >> 16));
-
- if (seen_count < DBG_SCHED_LIMIT)
- seen [seen_count++].qh = p.qh;
- } else
- temp = 0;
- tag = Q_NEXT_TYPE(fusbh200, hw->hw_next);
- p = p.qh->qh_next;
- break;
- case Q_TYPE_FSTN:
- temp = scnprintf (next, size,
- " fstn-%8x/%p", p.fstn->hw_prev,
- p.fstn);
- tag = Q_NEXT_TYPE(fusbh200, p.fstn->hw_next);
- p = p.fstn->fstn_next;
- break;
- case Q_TYPE_ITD:
- temp = scnprintf (next, size,
- " itd/%p", p.itd);
- tag = Q_NEXT_TYPE(fusbh200, p.itd->hw_next);
- p = p.itd->itd_next;
- break;
- }
- size -= temp;
- next += temp;
- } while (p.ptr);
-
- temp = scnprintf (next, size, "\n");
- size -= temp;
- next += temp;
- }
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- kfree (seen);
-
- return buf->alloc_size - size;
-}
-#undef DBG_SCHED_LIMIT
-
-static const char *rh_state_string(struct fusbh200_hcd *fusbh200)
-{
- switch (fusbh200->rh_state) {
- case FUSBH200_RH_HALTED:
- return "halted";
- case FUSBH200_RH_SUSPENDED:
- return "suspended";
- case FUSBH200_RH_RUNNING:
- return "running";
- case FUSBH200_RH_STOPPING:
- return "stopping";
- }
- return "?";
-}
-
-static ssize_t fill_registers_buffer(struct debug_buffer *buf)
-{
- struct usb_hcd *hcd;
- struct fusbh200_hcd *fusbh200;
- unsigned long flags;
- unsigned temp, size, i;
- char *next, scratch [80];
- static char fmt [] = "%*s\n";
- static char label [] = "";
-
- hcd = bus_to_hcd(buf->bus);
- fusbh200 = hcd_to_fusbh200 (hcd);
- next = buf->output_buf;
- size = buf->alloc_size;
-
- spin_lock_irqsave (&fusbh200->lock, flags);
-
- if (!HCD_HW_ACCESSIBLE(hcd)) {
- size = scnprintf (next, size,
- "bus %s, device %s\n"
- "%s\n"
- "SUSPENDED (no register access)\n",
- hcd->self.controller->bus->name,
- dev_name(hcd->self.controller),
- hcd->product_desc);
- goto done;
- }
-
- /* Capability Registers */
- i = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
- temp = scnprintf (next, size,
- "bus %s, device %s\n"
- "%s\n"
- "EHCI %x.%02x, rh state %s\n",
- hcd->self.controller->bus->name,
- dev_name(hcd->self.controller),
- hcd->product_desc,
- i >> 8, i & 0x0ff, rh_state_string(fusbh200));
- size -= temp;
- next += temp;
-
- // FIXME interpret both types of params
- i = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
- temp = scnprintf (next, size, "structural params 0x%08x\n", i);
- size -= temp;
- next += temp;
-
- i = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
- temp = scnprintf (next, size, "capability params 0x%08x\n", i);
- size -= temp;
- next += temp;
-
- /* Operational Registers */
- temp = dbg_status_buf (scratch, sizeof scratch, label,
- fusbh200_readl(fusbh200, &fusbh200->regs->status));
- temp = scnprintf (next, size, fmt, temp, scratch);
- size -= temp;
- next += temp;
-
- temp = dbg_command_buf (scratch, sizeof scratch, label,
- fusbh200_readl(fusbh200, &fusbh200->regs->command));
- temp = scnprintf (next, size, fmt, temp, scratch);
- size -= temp;
- next += temp;
-
- temp = dbg_intr_buf (scratch, sizeof scratch, label,
- fusbh200_readl(fusbh200, &fusbh200->regs->intr_enable));
- temp = scnprintf (next, size, fmt, temp, scratch);
- size -= temp;
- next += temp;
-
- temp = scnprintf (next, size, "uframe %04x\n",
- fusbh200_read_frame_index(fusbh200));
- size -= temp;
- next += temp;
-
- if (fusbh200->async_unlink) {
- temp = scnprintf(next, size, "async unlink qh %p\n",
- fusbh200->async_unlink);
- size -= temp;
- next += temp;
- }
-
- temp = scnprintf (next, size,
- "irq normal %ld err %ld iaa %ld (lost %ld)\n",
- fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
- fusbh200->stats.lost_iaa);
- size -= temp;
- next += temp;
-
- temp = scnprintf (next, size, "complete %ld unlink %ld\n",
- fusbh200->stats.complete, fusbh200->stats.unlink);
- size -= temp;
- next += temp;
-
-done:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
-
- return buf->alloc_size - size;
-}
-
-static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
- ssize_t (*fill_func)(struct debug_buffer *))
-{
- struct debug_buffer *buf;
-
- buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
-
- if (buf) {
- buf->bus = bus;
- buf->fill_func = fill_func;
- mutex_init(&buf->mutex);
- buf->alloc_size = PAGE_SIZE;
- }
-
- return buf;
-}
-
-static int fill_buffer(struct debug_buffer *buf)
-{
- int ret = 0;
-
- if (!buf->output_buf)
- buf->output_buf = vmalloc(buf->alloc_size);
-
- if (!buf->output_buf) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = buf->fill_func(buf);
-
- if (ret >= 0) {
- buf->count = ret;
- ret = 0;
- }
-
-out:
- return ret;
-}
-
-static ssize_t debug_output(struct file *file, char __user *user_buf,
- size_t len, loff_t *offset)
-{
- struct debug_buffer *buf = file->private_data;
- int ret = 0;
-
- mutex_lock(&buf->mutex);
- if (buf->count == 0) {
- ret = fill_buffer(buf);
- if (ret != 0) {
- mutex_unlock(&buf->mutex);
- goto out;
- }
- }
- mutex_unlock(&buf->mutex);
-
- ret = simple_read_from_buffer(user_buf, len, offset,
- buf->output_buf, buf->count);
-
-out:
- return ret;
-
-}
-
-static int debug_close(struct inode *inode, struct file *file)
-{
- struct debug_buffer *buf = file->private_data;
-
- if (buf) {
- vfree(buf->output_buf);
- kfree(buf);
- }
-
- return 0;
-}
-static int debug_async_open(struct inode *inode, struct file *file)
-{
- file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
-
- return file->private_data ? 0 : -ENOMEM;
-}
-
-static int debug_periodic_open(struct inode *inode, struct file *file)
-{
- struct debug_buffer *buf;
- buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
- if (!buf)
- return -ENOMEM;
-
- buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
- file->private_data = buf;
- return 0;
-}
-
-static int debug_registers_open(struct inode *inode, struct file *file)
-{
- file->private_data = alloc_buffer(inode->i_private,
- fill_registers_buffer);
-
- return file->private_data ? 0 : -ENOMEM;
-}
-
-static inline void create_debug_files (struct fusbh200_hcd *fusbh200)
-{
- struct usb_bus *bus = &fusbh200_to_hcd(fusbh200)->self;
-
- fusbh200->debug_dir = debugfs_create_dir(bus->bus_name, fusbh200_debug_root);
- if (!fusbh200->debug_dir)
- return;
-
- if (!debugfs_create_file("async", S_IRUGO, fusbh200->debug_dir, bus,
- &debug_async_fops))
- goto file_error;
-
- if (!debugfs_create_file("periodic", S_IRUGO, fusbh200->debug_dir, bus,
- &debug_periodic_fops))
- goto file_error;
-
- if (!debugfs_create_file("registers", S_IRUGO, fusbh200->debug_dir, bus,
- &debug_registers_fops))
- goto file_error;
-
- return;
-
-file_error:
- debugfs_remove_recursive(fusbh200->debug_dir);
-}
-
-static inline void remove_debug_files (struct fusbh200_hcd *fusbh200)
-{
- debugfs_remove_recursive(fusbh200->debug_dir);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * handshake - spin reading hc until handshake completes or fails
- * @ptr: address of hc register to be read
- * @mask: bits to look at in result of read
- * @done: value of those bits when handshake succeeds
- * @usec: timeout in microseconds
- *
- * Returns negative errno, or zero on success
- *
- * Success happens when the "mask" bits have the specified value (hardware
- * handshake done). There are two failure modes: "usec" have passed (major
- * hardware flakeout), or the register reads as all-ones (hardware removed).
- *
- * That last failure should_only happen in cases like physical cardbus eject
- * before driver shutdown. But it also seems to be caused by bugs in cardbus
- * bridge shutdown: shutting down the bridge before the devices using it.
- */
-static int handshake (struct fusbh200_hcd *fusbh200, void __iomem *ptr,
- u32 mask, u32 done, int usec)
-{
- u32 result;
-
- do {
- result = fusbh200_readl(fusbh200, ptr);
- if (result == ~(u32)0) /* card removed */
- return -ENODEV;
- result &= mask;
- if (result == done)
- return 0;
- udelay (1);
- usec--;
- } while (usec > 0);
- return -ETIMEDOUT;
-}
-
-/*
- * Force HC to halt state from unknown (EHCI spec section 2.3).
- * Must be called with interrupts enabled and the lock not held.
- */
-static int fusbh200_halt (struct fusbh200_hcd *fusbh200)
-{
- u32 temp;
-
- spin_lock_irq(&fusbh200->lock);
-
- /* disable any irqs left enabled by previous code */
- fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
-
- /*
- * This routine gets called during probe before fusbh200->command
- * has been initialized, so we can't rely on its value.
- */
- fusbh200->command &= ~CMD_RUN;
- temp = fusbh200_readl(fusbh200, &fusbh200->regs->command);
- temp &= ~(CMD_RUN | CMD_IAAD);
- fusbh200_writel(fusbh200, temp, &fusbh200->regs->command);
-
- spin_unlock_irq(&fusbh200->lock);
- synchronize_irq(fusbh200_to_hcd(fusbh200)->irq);
-
- return handshake(fusbh200, &fusbh200->regs->status,
- STS_HALT, STS_HALT, 16 * 125);
-}
-
-/*
- * Reset a non-running (STS_HALT == 1) controller.
- * Must be called with interrupts enabled and the lock not held.
- */
-static int fusbh200_reset (struct fusbh200_hcd *fusbh200)
-{
- int retval;
- u32 command = fusbh200_readl(fusbh200, &fusbh200->regs->command);
-
- /* If the EHCI debug controller is active, special care must be
- * taken before and after a host controller reset */
- if (fusbh200->debug && !dbgp_reset_prep(fusbh200_to_hcd(fusbh200)))
- fusbh200->debug = NULL;
-
- command |= CMD_RESET;
- dbg_cmd (fusbh200, "reset", command);
- fusbh200_writel(fusbh200, command, &fusbh200->regs->command);
- fusbh200->rh_state = FUSBH200_RH_HALTED;
- fusbh200->next_statechange = jiffies;
- retval = handshake (fusbh200, &fusbh200->regs->command,
- CMD_RESET, 0, 250 * 1000);
-
- if (retval)
- return retval;
-
- if (fusbh200->debug)
- dbgp_external_startup(fusbh200_to_hcd(fusbh200));
-
- fusbh200->port_c_suspend = fusbh200->suspended_ports =
- fusbh200->resuming_ports = 0;
- return retval;
-}
-
-/*
- * Idle the controller (turn off the schedules).
- * Must be called with interrupts enabled and the lock not held.
- */
-static void fusbh200_quiesce (struct fusbh200_hcd *fusbh200)
-{
- u32 temp;
-
- if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
- return;
-
- /* wait for any schedule enables/disables to take effect */
- temp = (fusbh200->command << 10) & (STS_ASS | STS_PSS);
- handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, temp, 16 * 125);
-
- /* then disable anything that's still active */
- spin_lock_irq(&fusbh200->lock);
- fusbh200->command &= ~(CMD_ASE | CMD_PSE);
- fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
- spin_unlock_irq(&fusbh200->lock);
-
- /* hardware can take 16 microframes to turn off ... */
- handshake(fusbh200, &fusbh200->regs->status, STS_ASS | STS_PSS, 0, 16 * 125);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void end_unlink_async(struct fusbh200_hcd *fusbh200);
-static void unlink_empty_async(struct fusbh200_hcd *fusbh200);
-static void fusbh200_work(struct fusbh200_hcd *fusbh200);
-static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
-static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
-
-/*-------------------------------------------------------------------------*/
-
-/* Set a bit in the USBCMD register */
-static void fusbh200_set_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
-{
- fusbh200->command |= bit;
- fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
-
- /* unblock posted write */
- fusbh200_readl(fusbh200, &fusbh200->regs->command);
-}
-
-/* Clear a bit in the USBCMD register */
-static void fusbh200_clear_command_bit(struct fusbh200_hcd *fusbh200, u32 bit)
-{
- fusbh200->command &= ~bit;
- fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
-
- /* unblock posted write */
- fusbh200_readl(fusbh200, &fusbh200->regs->command);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * EHCI timer support... Now using hrtimers.
- *
- * Lots of different events are triggered from fusbh200->hrtimer. Whenever
- * the timer routine runs, it checks each possible event; events that are
- * currently enabled and whose expiration time has passed get handled.
- * The set of enabled events is stored as a collection of bitflags in
- * fusbh200->enabled_hrtimer_events, and they are numbered in order of
- * increasing delay values (ranging between 1 ms and 100 ms).
- *
- * Rather than implementing a sorted list or tree of all pending events,
- * we keep track only of the lowest-numbered pending event, in
- * fusbh200->next_hrtimer_event. Whenever fusbh200->hrtimer gets restarted, its
- * expiration time is set to the timeout value for this event.
- *
- * As a result, events might not get handled right away; the actual delay
- * could be anywhere up to twice the requested delay. This doesn't
- * matter, because none of the events are especially time-critical. The
- * ones that matter most all have a delay of 1 ms, so they will be
- * handled after 2 ms at most, which is okay. In addition to this, we
- * allow for an expiration range of 1 ms.
- */
-
-/*
- * Delay lengths for the hrtimer event types.
- * Keep this list sorted by delay length, in the same order as
- * the event types indexed by enum fusbh200_hrtimer_event in fusbh200.h.
- */
-static unsigned event_delays_ns[] = {
- 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_ASS */
- 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_PSS */
- 1 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_POLL_DEAD */
- 1125 * NSEC_PER_USEC, /* FUSBH200_HRTIMER_UNLINK_INTR */
- 2 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_FREE_ITDS */
- 6 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
- 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
- 10 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
- 15 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
- 100 * NSEC_PER_MSEC, /* FUSBH200_HRTIMER_IO_WATCHDOG */
-};
-
-/* Enable a pending hrtimer event */
-static void fusbh200_enable_event(struct fusbh200_hcd *fusbh200, unsigned event,
- bool resched)
-{
- ktime_t *timeout = &fusbh200->hr_timeouts[event];
-
- if (resched)
- *timeout = ktime_add(ktime_get(),
- ktime_set(0, event_delays_ns[event]));
- fusbh200->enabled_hrtimer_events |= (1 << event);
-
- /* Track only the lowest-numbered pending event */
- if (event < fusbh200->next_hrtimer_event) {
- fusbh200->next_hrtimer_event = event;
- hrtimer_start_range_ns(&fusbh200->hrtimer, *timeout,
- NSEC_PER_MSEC, HRTIMER_MODE_ABS);
- }
-}
-
-
-/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
-static void fusbh200_poll_ASS(struct fusbh200_hcd *fusbh200)
-{
- unsigned actual, want;
-
- /* Don't enable anything if the controller isn't running (e.g., died) */
- if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
- return;
-
- want = (fusbh200->command & CMD_ASE) ? STS_ASS : 0;
- actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_ASS;
-
- if (want != actual) {
-
- /* Poll again later, but give up after about 20 ms */
- if (fusbh200->ASS_poll_count++ < 20) {
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_ASS, true);
- return;
- }
- fusbh200_dbg(fusbh200, "Waited too long for the async schedule status (%x/%x), giving up\n",
- want, actual);
- }
- fusbh200->ASS_poll_count = 0;
-
- /* The status is up-to-date; restart or stop the schedule as needed */
- if (want == 0) { /* Stopped */
- if (fusbh200->async_count > 0)
- fusbh200_set_command_bit(fusbh200, CMD_ASE);
-
- } else { /* Running */
- if (fusbh200->async_count == 0) {
-
- /* Turn off the schedule after a while */
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_ASYNC,
- true);
- }
- }
-}
-
-/* Turn off the async schedule after a brief delay */
-static void fusbh200_disable_ASE(struct fusbh200_hcd *fusbh200)
-{
- fusbh200_clear_command_bit(fusbh200, CMD_ASE);
-}
-
-
-/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
-static void fusbh200_poll_PSS(struct fusbh200_hcd *fusbh200)
-{
- unsigned actual, want;
-
- /* Don't do anything if the controller isn't running (e.g., died) */
- if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
- return;
-
- want = (fusbh200->command & CMD_PSE) ? STS_PSS : 0;
- actual = fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_PSS;
-
- if (want != actual) {
-
- /* Poll again later, but give up after about 20 ms */
- if (fusbh200->PSS_poll_count++ < 20) {
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_PSS, true);
- return;
- }
- fusbh200_dbg(fusbh200, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
- want, actual);
- }
- fusbh200->PSS_poll_count = 0;
-
- /* The status is up-to-date; restart or stop the schedule as needed */
- if (want == 0) { /* Stopped */
- if (fusbh200->periodic_count > 0)
- fusbh200_set_command_bit(fusbh200, CMD_PSE);
-
- } else { /* Running */
- if (fusbh200->periodic_count == 0) {
-
- /* Turn off the schedule after a while */
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_DISABLE_PERIODIC,
- true);
- }
- }
-}
-
-/* Turn off the periodic schedule after a brief delay */
-static void fusbh200_disable_PSE(struct fusbh200_hcd *fusbh200)
-{
- fusbh200_clear_command_bit(fusbh200, CMD_PSE);
-}
-
-
-/* Poll the STS_HALT status bit; see when a dead controller stops */
-static void fusbh200_handle_controller_death(struct fusbh200_hcd *fusbh200)
-{
- if (!(fusbh200_readl(fusbh200, &fusbh200->regs->status) & STS_HALT)) {
-
- /* Give up after a few milliseconds */
- if (fusbh200->died_poll_count++ < 5) {
- /* Try again later */
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_POLL_DEAD, true);
- return;
- }
- fusbh200_warn(fusbh200, "Waited too long for the controller to stop, giving up\n");
- }
-
- /* Clean up the mess */
- fusbh200->rh_state = FUSBH200_RH_HALTED;
- fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
- fusbh200_work(fusbh200);
- end_unlink_async(fusbh200);
-
- /* Not in process context, so don't try to reset the controller */
-}
-
-
-/* Handle unlinked interrupt QHs once they are gone from the hardware */
-static void fusbh200_handle_intr_unlinks(struct fusbh200_hcd *fusbh200)
-{
- bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
-
- /*
- * Process all the QHs on the intr_unlink list that were added
- * before the current unlink cycle began. The list is in
- * temporal order, so stop when we reach the first entry in the
- * current cycle. But if the root hub isn't running then
- * process all the QHs on the list.
- */
- fusbh200->intr_unlinking = true;
- while (fusbh200->intr_unlink) {
- struct fusbh200_qh *qh = fusbh200->intr_unlink;
-
- if (!stopped && qh->unlink_cycle == fusbh200->intr_unlink_cycle)
- break;
- fusbh200->intr_unlink = qh->unlink_next;
- qh->unlink_next = NULL;
- end_unlink_intr(fusbh200, qh);
- }
-
- /* Handle remaining entries later */
- if (fusbh200->intr_unlink) {
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
- ++fusbh200->intr_unlink_cycle;
- }
- fusbh200->intr_unlinking = false;
-}
-
-
-/* Start another free-iTDs/siTDs cycle */
-static void start_free_itds(struct fusbh200_hcd *fusbh200)
-{
- if (!(fusbh200->enabled_hrtimer_events & BIT(FUSBH200_HRTIMER_FREE_ITDS))) {
- fusbh200->last_itd_to_free = list_entry(
- fusbh200->cached_itd_list.prev,
- struct fusbh200_itd, itd_list);
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_FREE_ITDS, true);
- }
-}
-
-/* Wait for controller to stop using old iTDs and siTDs */
-static void end_free_itds(struct fusbh200_hcd *fusbh200)
-{
- struct fusbh200_itd *itd, *n;
-
- if (fusbh200->rh_state < FUSBH200_RH_RUNNING) {
- fusbh200->last_itd_to_free = NULL;
- }
-
- list_for_each_entry_safe(itd, n, &fusbh200->cached_itd_list, itd_list) {
- list_del(&itd->itd_list);
- dma_pool_free(fusbh200->itd_pool, itd, itd->itd_dma);
- if (itd == fusbh200->last_itd_to_free)
- break;
- }
-
- if (!list_empty(&fusbh200->cached_itd_list))
- start_free_itds(fusbh200);
-}
-
-
-/* Handle lost (or very late) IAA interrupts */
-static void fusbh200_iaa_watchdog(struct fusbh200_hcd *fusbh200)
-{
- if (fusbh200->rh_state != FUSBH200_RH_RUNNING)
- return;
-
- /*
- * Lost IAA irqs wedge things badly; seen first with a vt8235.
- * So we need this watchdog, but must protect it against both
- * (a) SMP races against real IAA firing and retriggering, and
- * (b) clean HC shutdown, when IAA watchdog was pending.
- */
- if (fusbh200->async_iaa) {
- u32 cmd, status;
-
- /* If we get here, IAA is *REALLY* late. It's barely
- * conceivable that the system is so busy that CMD_IAAD
- * is still legitimately set, so let's be sure it's
- * clear before we read STS_IAA. (The HC should clear
- * CMD_IAAD when it sets STS_IAA.)
- */
- cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
-
- /*
- * If IAA is set here it either legitimately triggered
- * after the watchdog timer expired (_way_ late, so we'll
- * still count it as lost) ... or a silicon erratum:
- * - VIA seems to set IAA without triggering the IRQ;
- * - IAAD potentially cleared without setting IAA.
- */
- status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
- if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT(fusbh200->stats.lost_iaa);
- fusbh200_writel(fusbh200, STS_IAA, &fusbh200->regs->status);
- }
-
- fusbh200_dbg(fusbh200, "IAA watchdog: status %x cmd %x\n",
- status, cmd);
- end_unlink_async(fusbh200);
- }
-}
-
-
-/* Enable the I/O watchdog, if appropriate */
-static void turn_on_io_watchdog(struct fusbh200_hcd *fusbh200)
-{
- /* Not needed if the controller isn't running or it's already enabled */
- if (fusbh200->rh_state != FUSBH200_RH_RUNNING ||
- (fusbh200->enabled_hrtimer_events &
- BIT(FUSBH200_HRTIMER_IO_WATCHDOG)))
- return;
-
- /*
- * Isochronous transfers always need the watchdog.
- * For other sorts we use it only if the flag is set.
- */
- if (fusbh200->isoc_count > 0 || (fusbh200->need_io_watchdog &&
- fusbh200->async_count + fusbh200->intr_count > 0))
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IO_WATCHDOG, true);
-}
-
-
-/*
- * Handler functions for the hrtimer event types.
- * Keep this array in the same order as the event types indexed by
- * enum fusbh200_hrtimer_event in fusbh200.h.
- */
-static void (*event_handlers[])(struct fusbh200_hcd *) = {
- fusbh200_poll_ASS, /* FUSBH200_HRTIMER_POLL_ASS */
- fusbh200_poll_PSS, /* FUSBH200_HRTIMER_POLL_PSS */
- fusbh200_handle_controller_death, /* FUSBH200_HRTIMER_POLL_DEAD */
- fusbh200_handle_intr_unlinks, /* FUSBH200_HRTIMER_UNLINK_INTR */
- end_free_itds, /* FUSBH200_HRTIMER_FREE_ITDS */
- unlink_empty_async, /* FUSBH200_HRTIMER_ASYNC_UNLINKS */
- fusbh200_iaa_watchdog, /* FUSBH200_HRTIMER_IAA_WATCHDOG */
- fusbh200_disable_PSE, /* FUSBH200_HRTIMER_DISABLE_PERIODIC */
- fusbh200_disable_ASE, /* FUSBH200_HRTIMER_DISABLE_ASYNC */
- fusbh200_work, /* FUSBH200_HRTIMER_IO_WATCHDOG */
-};
-
-static enum hrtimer_restart fusbh200_hrtimer_func(struct hrtimer *t)
-{
- struct fusbh200_hcd *fusbh200 = container_of(t, struct fusbh200_hcd, hrtimer);
- ktime_t now;
- unsigned long events;
- unsigned long flags;
- unsigned e;
-
- spin_lock_irqsave(&fusbh200->lock, flags);
-
- events = fusbh200->enabled_hrtimer_events;
- fusbh200->enabled_hrtimer_events = 0;
- fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
-
- /*
- * Check each pending event. If its time has expired, handle
- * the event; otherwise re-enable it.
- */
- now = ktime_get();
- for_each_set_bit(e, &events, FUSBH200_HRTIMER_NUM_EVENTS) {
- if (now.tv64 >= fusbh200->hr_timeouts[e].tv64)
- event_handlers[e](fusbh200);
- else
- fusbh200_enable_event(fusbh200, e, false);
- }
-
- spin_unlock_irqrestore(&fusbh200->lock, flags);
- return HRTIMER_NORESTART;
-}
-
-/*-------------------------------------------------------------------------*/
-
-#define fusbh200_bus_suspend NULL
-#define fusbh200_bus_resume NULL
-
-/*-------------------------------------------------------------------------*/
-
-static int check_reset_complete (
- struct fusbh200_hcd *fusbh200,
- int index,
- u32 __iomem *status_reg,
- int port_status
-) {
- if (!(port_status & PORT_CONNECT))
- return port_status;
-
- /* if reset finished and it's still not enabled -- handoff */
- if (!(port_status & PORT_PE)) {
- /* with integrated TT, there's nobody to hand it to! */
- fusbh200_dbg (fusbh200,
- "Failed to enable port %d on root hub TT\n",
- index+1);
- return port_status;
- } else {
- fusbh200_dbg(fusbh200, "port %d reset complete, port enabled\n",
- index + 1);
- }
-
- return port_status;
-}
-
-/*-------------------------------------------------------------------------*/
-
-
-/* build "status change" packet (one or two bytes) from HC registers */
-
-static int
-fusbh200_hub_status_data (struct usb_hcd *hcd, char *buf)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- u32 temp, status;
- u32 mask;
- int retval = 1;
- unsigned long flags;
-
- /* init status to no-changes */
- buf [0] = 0;
-
- /* Inform the core about resumes-in-progress by returning
- * a non-zero value even if there are no status changes.
- */
- status = fusbh200->resuming_ports;
-
- mask = PORT_CSC | PORT_PEC;
- // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND
-
- /* no hub change reports (bit 0) for now (power, ...) */
-
- /* port N changes (bit N)? */
- spin_lock_irqsave (&fusbh200->lock, flags);
-
- temp = fusbh200_readl(fusbh200, &fusbh200->regs->port_status);
-
- /*
- * Return status information even for ports with OWNER set.
- * Otherwise hub_wq wouldn't see the disconnect event when a
- * high-speed device is switched over to the companion
- * controller by the user.
- */
-
- if ((temp & mask) != 0 || test_bit(0, &fusbh200->port_c_suspend)
- || (fusbh200->reset_done[0] && time_after_eq(
- jiffies, fusbh200->reset_done[0]))) {
- buf [0] |= 1 << 1;
- status = STS_PCD;
- }
- /* FIXME autosuspend idle root hubs */
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- return status ? retval : 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void
-fusbh200_hub_descriptor (
- struct fusbh200_hcd *fusbh200,
- struct usb_hub_descriptor *desc
-) {
- int ports = HCS_N_PORTS (fusbh200->hcs_params);
- u16 temp;
-
- desc->bDescriptorType = USB_DT_HUB;
- desc->bPwrOn2PwrGood = 10; /* fusbh200 1.0, 2.3.9 says 20ms max */
- desc->bHubContrCurrent = 0;
-
- desc->bNbrPorts = ports;
- temp = 1 + (ports / 8);
- desc->bDescLength = 7 + 2 * temp;
-
- /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
- memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
- memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
-
- temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
- temp |= HUB_CHAR_NO_LPSM; /* no power switching */
- desc->wHubCharacteristics = cpu_to_le16(temp);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int fusbh200_hub_control (
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-) {
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- int ports = HCS_N_PORTS (fusbh200->hcs_params);
- u32 __iomem *status_reg = &fusbh200->regs->port_status;
- u32 temp, temp1, status;
- unsigned long flags;
- int retval = 0;
- unsigned selector;
-
- /*
- * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
- * HCS_INDICATOR may say we can change LEDs to off/amber/green.
- * (track current state ourselves) ... blink for diagnostics,
- * power, "this is the one", etc. EHCI spec supports this.
- */
-
- spin_lock_irqsave (&fusbh200->lock, flags);
- switch (typeReq) {
- case ClearHubFeature:
- switch (wValue) {
- case C_HUB_LOCAL_POWER:
- case C_HUB_OVER_CURRENT:
- /* no hub-wide feature/status flags */
- break;
- default:
- goto error;
- }
- break;
- case ClearPortFeature:
- if (!wIndex || wIndex > ports)
- goto error;
- wIndex--;
- temp = fusbh200_readl(fusbh200, status_reg);
- temp &= ~PORT_RWC_BITS;
-
- /*
- * Even if OWNER is set, so the port is owned by the
- * companion controller, hub_wq needs to be able to clear
- * the port-change status bits (especially
- * USB_PORT_STAT_C_CONNECTION).
- */
-
- switch (wValue) {
- case USB_PORT_FEAT_ENABLE:
- fusbh200_writel(fusbh200, temp & ~PORT_PE, status_reg);
- break;
- case USB_PORT_FEAT_C_ENABLE:
- fusbh200_writel(fusbh200, temp | PORT_PEC, status_reg);
- break;
- case USB_PORT_FEAT_SUSPEND:
- if (temp & PORT_RESET)
- goto error;
- if (!(temp & PORT_SUSPEND))
- break;
- if ((temp & PORT_PE) == 0)
- goto error;
-
- fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
- fusbh200->reset_done[wIndex] = jiffies
- + msecs_to_jiffies(USB_RESUME_TIMEOUT);
- break;
- case USB_PORT_FEAT_C_SUSPEND:
- clear_bit(wIndex, &fusbh200->port_c_suspend);
- break;
- case USB_PORT_FEAT_C_CONNECTION:
- fusbh200_writel(fusbh200, temp | PORT_CSC, status_reg);
- break;
- case USB_PORT_FEAT_C_OVER_CURRENT:
- fusbh200_writel(fusbh200, temp | BMISR_OVC, &fusbh200->regs->bmisr);
- break;
- case USB_PORT_FEAT_C_RESET:
- /* GetPortStatus clears reset */
- break;
- default:
- goto error;
- }
- fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted write */
- break;
- case GetHubDescriptor:
- fusbh200_hub_descriptor (fusbh200, (struct usb_hub_descriptor *)
- buf);
- break;
- case GetHubStatus:
- /* no hub-wide feature/status flags */
- memset (buf, 0, 4);
- //cpu_to_le32s ((u32 *) buf);
- break;
- case GetPortStatus:
- if (!wIndex || wIndex > ports)
- goto error;
- wIndex--;
- status = 0;
- temp = fusbh200_readl(fusbh200, status_reg);
-
- // wPortChange bits
- if (temp & PORT_CSC)
- status |= USB_PORT_STAT_C_CONNECTION << 16;
- if (temp & PORT_PEC)
- status |= USB_PORT_STAT_C_ENABLE << 16;
-
- temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
- if (temp1 & BMISR_OVC)
- status |= USB_PORT_STAT_C_OVERCURRENT << 16;
-
- /* whoever resumes must GetPortStatus to complete it!! */
- if (temp & PORT_RESUME) {
-
- /* Remote Wakeup received? */
- if (!fusbh200->reset_done[wIndex]) {
- /* resume signaling for 20 msec */
- fusbh200->reset_done[wIndex] = jiffies
- + msecs_to_jiffies(20);
- /* check the port again */
- mod_timer(&fusbh200_to_hcd(fusbh200)->rh_timer,
- fusbh200->reset_done[wIndex]);
- }
-
- /* resume completed? */
- else if (time_after_eq(jiffies,
- fusbh200->reset_done[wIndex])) {
- clear_bit(wIndex, &fusbh200->suspended_ports);
- set_bit(wIndex, &fusbh200->port_c_suspend);
- fusbh200->reset_done[wIndex] = 0;
-
- /* stop resume signaling */
- temp = fusbh200_readl(fusbh200, status_reg);
- fusbh200_writel(fusbh200,
- temp & ~(PORT_RWC_BITS | PORT_RESUME),
- status_reg);
- clear_bit(wIndex, &fusbh200->resuming_ports);
- retval = handshake(fusbh200, status_reg,
- PORT_RESUME, 0, 2000 /* 2msec */);
- if (retval != 0) {
- fusbh200_err(fusbh200,
- "port %d resume error %d\n",
- wIndex + 1, retval);
- goto error;
- }
- temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
- }
- }
-
- /* whoever resets must GetPortStatus to complete it!! */
- if ((temp & PORT_RESET)
- && time_after_eq(jiffies,
- fusbh200->reset_done[wIndex])) {
- status |= USB_PORT_STAT_C_RESET << 16;
- fusbh200->reset_done [wIndex] = 0;
- clear_bit(wIndex, &fusbh200->resuming_ports);
-
- /* force reset to complete */
- fusbh200_writel(fusbh200, temp & ~(PORT_RWC_BITS | PORT_RESET),
- status_reg);
- /* REVISIT: some hardware needs 550+ usec to clear
- * this bit; seems too long to spin routinely...
- */
- retval = handshake(fusbh200, status_reg,
- PORT_RESET, 0, 1000);
- if (retval != 0) {
- fusbh200_err (fusbh200, "port %d reset error %d\n",
- wIndex + 1, retval);
- goto error;
- }
-
- /* see what we found out */
- temp = check_reset_complete (fusbh200, wIndex, status_reg,
- fusbh200_readl(fusbh200, status_reg));
- }
-
- if (!(temp & (PORT_RESUME|PORT_RESET))) {
- fusbh200->reset_done[wIndex] = 0;
- clear_bit(wIndex, &fusbh200->resuming_ports);
- }
-
- /* transfer dedicated ports to the companion hc */
- if ((temp & PORT_CONNECT) &&
- test_bit(wIndex, &fusbh200->companion_ports)) {
- temp &= ~PORT_RWC_BITS;
- fusbh200_writel(fusbh200, temp, status_reg);
- fusbh200_dbg(fusbh200, "port %d --> companion\n", wIndex + 1);
- temp = fusbh200_readl(fusbh200, status_reg);
- }
-
- /*
- * Even if OWNER is set, there's no harm letting hub_wq
- * see the wPortStatus values (they should all be 0 except
- * for PORT_POWER anyway).
- */
-
- if (temp & PORT_CONNECT) {
- status |= USB_PORT_STAT_CONNECTION;
- status |= fusbh200_port_speed(fusbh200, temp);
- }
- if (temp & PORT_PE)
- status |= USB_PORT_STAT_ENABLE;
-
- /* maybe the port was unsuspended without our knowledge */
- if (temp & (PORT_SUSPEND|PORT_RESUME)) {
- status |= USB_PORT_STAT_SUSPEND;
- } else if (test_bit(wIndex, &fusbh200->suspended_ports)) {
- clear_bit(wIndex, &fusbh200->suspended_ports);
- clear_bit(wIndex, &fusbh200->resuming_ports);
- fusbh200->reset_done[wIndex] = 0;
- if (temp & PORT_PE)
- set_bit(wIndex, &fusbh200->port_c_suspend);
- }
-
- temp1 = fusbh200_readl(fusbh200, &fusbh200->regs->bmisr);
- if (temp1 & BMISR_OVC)
- status |= USB_PORT_STAT_OVERCURRENT;
- if (temp & PORT_RESET)
- status |= USB_PORT_STAT_RESET;
- if (test_bit(wIndex, &fusbh200->port_c_suspend))
- status |= USB_PORT_STAT_C_SUSPEND << 16;
-
- if (status & ~0xffff) /* only if wPortChange is interesting */
- dbg_port(fusbh200, "GetStatus", wIndex + 1, temp);
- put_unaligned_le32(status, buf);
- break;
- case SetHubFeature:
- switch (wValue) {
- case C_HUB_LOCAL_POWER:
- case C_HUB_OVER_CURRENT:
- /* no hub-wide feature/status flags */
- break;
- default:
- goto error;
- }
- break;
- case SetPortFeature:
- selector = wIndex >> 8;
- wIndex &= 0xff;
-
- if (!wIndex || wIndex > ports)
- goto error;
- wIndex--;
- temp = fusbh200_readl(fusbh200, status_reg);
- temp &= ~PORT_RWC_BITS;
- switch (wValue) {
- case USB_PORT_FEAT_SUSPEND:
- if ((temp & PORT_PE) == 0
- || (temp & PORT_RESET) != 0)
- goto error;
-
- /* After above check the port must be connected.
- * Set appropriate bit thus could put phy into low power
- * mode if we have hostpc feature
- */
- fusbh200_writel(fusbh200, temp | PORT_SUSPEND, status_reg);
- set_bit(wIndex, &fusbh200->suspended_ports);
- break;
- case USB_PORT_FEAT_RESET:
- if (temp & PORT_RESUME)
- goto error;
- /* line status bits may report this as low speed,
- * which can be fine if this root hub has a
- * transaction translator built in.
- */
- fusbh200_dbg(fusbh200, "port %d reset\n", wIndex + 1);
- temp |= PORT_RESET;
- temp &= ~PORT_PE;
-
- /*
- * caller must wait, then call GetPortStatus
- * usb 2.0 spec says 50 ms resets on root
- */
- fusbh200->reset_done [wIndex] = jiffies
- + msecs_to_jiffies (50);
- fusbh200_writel(fusbh200, temp, status_reg);
- break;
-
- /* For downstream facing ports (these): one hub port is put
- * into test mode according to USB2 11.24.2.13, then the hub
- * must be reset (which for root hub now means rmmod+modprobe,
- * or else system reboot). See EHCI 2.3.9 and 4.14 for info
- * about the EHCI-specific stuff.
- */
- case USB_PORT_FEAT_TEST:
- if (!selector || selector > 5)
- goto error;
- spin_unlock_irqrestore(&fusbh200->lock, flags);
- fusbh200_quiesce(fusbh200);
- spin_lock_irqsave(&fusbh200->lock, flags);
-
- /* Put all enabled ports into suspend */
- temp = fusbh200_readl(fusbh200, status_reg) & ~PORT_RWC_BITS;
- if (temp & PORT_PE)
- fusbh200_writel(fusbh200, temp | PORT_SUSPEND,
- status_reg);
-
- spin_unlock_irqrestore(&fusbh200->lock, flags);
- fusbh200_halt(fusbh200);
- spin_lock_irqsave(&fusbh200->lock, flags);
-
- temp = fusbh200_readl(fusbh200, status_reg);
- temp |= selector << 16;
- fusbh200_writel(fusbh200, temp, status_reg);
- break;
-
- default:
- goto error;
- }
- fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
- break;
-
- default:
-error:
- /* "stall" on error */
- retval = -EPIPE;
- }
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- return retval;
-}
-
-static void __maybe_unused fusbh200_relinquish_port(struct usb_hcd *hcd,
- int portnum)
-{
- return;
-}
-
-static int __maybe_unused fusbh200_port_handed_over(struct usb_hcd *hcd,
- int portnum)
-{
- return 0;
-}
-/*-------------------------------------------------------------------------*/
-/*
- * There's basically three types of memory:
- * - data used only by the HCD ... kmalloc is fine
- * - async and periodic schedules, shared by HC and HCD ... these
- * need to use dma_pool or dma_alloc_coherent
- * - driver buffers, read/written by HC ... single shot DMA mapped
- *
- * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
- * No memory seen by this driver is pageable.
- */
-
-/*-------------------------------------------------------------------------*/
-
-/* Allocate the key transfer structures from the previously allocated pool */
-
-static inline void fusbh200_qtd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd,
- dma_addr_t dma)
-{
- memset (qtd, 0, sizeof *qtd);
- qtd->qtd_dma = dma;
- qtd->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
- qtd->hw_next = FUSBH200_LIST_END(fusbh200);
- qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
- INIT_LIST_HEAD (&qtd->qtd_list);
-}
-
-static struct fusbh200_qtd *fusbh200_qtd_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
-{
- struct fusbh200_qtd *qtd;
- dma_addr_t dma;
-
- qtd = dma_pool_alloc (fusbh200->qtd_pool, flags, &dma);
- if (qtd != NULL) {
- fusbh200_qtd_init(fusbh200, qtd, dma);
- }
- return qtd;
-}
-
-static inline void fusbh200_qtd_free (struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd)
-{
- dma_pool_free (fusbh200->qtd_pool, qtd, qtd->qtd_dma);
-}
-
-
-static void qh_destroy(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- /* clean qtds first, and know this is not linked */
- if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
- fusbh200_dbg (fusbh200, "unused qh not empty!\n");
- BUG ();
- }
- if (qh->dummy)
- fusbh200_qtd_free (fusbh200, qh->dummy);
- dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
- kfree(qh);
-}
-
-static struct fusbh200_qh *fusbh200_qh_alloc (struct fusbh200_hcd *fusbh200, gfp_t flags)
-{
- struct fusbh200_qh *qh;
- dma_addr_t dma;
-
- qh = kzalloc(sizeof *qh, GFP_ATOMIC);
- if (!qh)
- goto done;
- qh->hw = (struct fusbh200_qh_hw *)
- dma_pool_alloc(fusbh200->qh_pool, flags, &dma);
- if (!qh->hw)
- goto fail;
- memset(qh->hw, 0, sizeof *qh->hw);
- qh->qh_dma = dma;
- // INIT_LIST_HEAD (&qh->qh_list);
- INIT_LIST_HEAD (&qh->qtd_list);
-
- /* dummy td enables safe urb queuing */
- qh->dummy = fusbh200_qtd_alloc (fusbh200, flags);
- if (qh->dummy == NULL) {
- fusbh200_dbg (fusbh200, "no dummy td\n");
- goto fail1;
- }
-done:
- return qh;
-fail1:
- dma_pool_free(fusbh200->qh_pool, qh->hw, qh->qh_dma);
-fail:
- kfree(qh);
- return NULL;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* The queue heads and transfer descriptors are managed from pools tied
- * to each of the "per device" structures.
- * This is the initialisation and cleanup code.
- */
-
-static void fusbh200_mem_cleanup (struct fusbh200_hcd *fusbh200)
-{
- if (fusbh200->async)
- qh_destroy(fusbh200, fusbh200->async);
- fusbh200->async = NULL;
-
- if (fusbh200->dummy)
- qh_destroy(fusbh200, fusbh200->dummy);
- fusbh200->dummy = NULL;
-
- /* DMA consistent memory and pools */
- if (fusbh200->qtd_pool)
- dma_pool_destroy (fusbh200->qtd_pool);
- fusbh200->qtd_pool = NULL;
-
- if (fusbh200->qh_pool) {
- dma_pool_destroy (fusbh200->qh_pool);
- fusbh200->qh_pool = NULL;
- }
-
- if (fusbh200->itd_pool)
- dma_pool_destroy (fusbh200->itd_pool);
- fusbh200->itd_pool = NULL;
-
- if (fusbh200->periodic)
- dma_free_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
- fusbh200->periodic_size * sizeof (u32),
- fusbh200->periodic, fusbh200->periodic_dma);
- fusbh200->periodic = NULL;
-
- /* shadow periodic table */
- kfree(fusbh200->pshadow);
- fusbh200->pshadow = NULL;
-}
-
-/* remember to add cleanup code (above) if you add anything here */
-static int fusbh200_mem_init (struct fusbh200_hcd *fusbh200, gfp_t flags)
-{
- int i;
-
- /* QTDs for control/bulk/intr transfers */
- fusbh200->qtd_pool = dma_pool_create ("fusbh200_qtd",
- fusbh200_to_hcd(fusbh200)->self.controller,
- sizeof (struct fusbh200_qtd),
- 32 /* byte alignment (for hw parts) */,
- 4096 /* can't cross 4K */);
- if (!fusbh200->qtd_pool) {
- goto fail;
- }
-
- /* QHs for control/bulk/intr transfers */
- fusbh200->qh_pool = dma_pool_create ("fusbh200_qh",
- fusbh200_to_hcd(fusbh200)->self.controller,
- sizeof(struct fusbh200_qh_hw),
- 32 /* byte alignment (for hw parts) */,
- 4096 /* can't cross 4K */);
- if (!fusbh200->qh_pool) {
- goto fail;
- }
- fusbh200->async = fusbh200_qh_alloc (fusbh200, flags);
- if (!fusbh200->async) {
- goto fail;
- }
-
- /* ITD for high speed ISO transfers */
- fusbh200->itd_pool = dma_pool_create ("fusbh200_itd",
- fusbh200_to_hcd(fusbh200)->self.controller,
- sizeof (struct fusbh200_itd),
- 64 /* byte alignment (for hw parts) */,
- 4096 /* can't cross 4K */);
- if (!fusbh200->itd_pool) {
- goto fail;
- }
-
- /* Hardware periodic table */
- fusbh200->periodic = (__le32 *)
- dma_alloc_coherent (fusbh200_to_hcd(fusbh200)->self.controller,
- fusbh200->periodic_size * sizeof(__le32),
- &fusbh200->periodic_dma, 0);
- if (fusbh200->periodic == NULL) {
- goto fail;
- }
-
- for (i = 0; i < fusbh200->periodic_size; i++)
- fusbh200->periodic[i] = FUSBH200_LIST_END(fusbh200);
-
- /* software shadow of hardware table */
- fusbh200->pshadow = kcalloc(fusbh200->periodic_size, sizeof(void *), flags);
- if (fusbh200->pshadow != NULL)
- return 0;
-
-fail:
- fusbh200_dbg (fusbh200, "couldn't init memory\n");
- fusbh200_mem_cleanup (fusbh200);
- return -ENOMEM;
-}
-/*-------------------------------------------------------------------------*/
-/*
- * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
- *
- * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
- * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
- * buffers needed for the larger number). We use one QH per endpoint, queue
- * multiple urbs (all three types) per endpoint. URBs may need several qtds.
- *
- * ISO traffic uses "ISO TD" (itd) records, and (along with
- * interrupts) needs careful scheduling. Performance improvements can be
- * an ongoing challenge. That's in "ehci-sched.c".
- *
- * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
- * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
- * (b) special fields in qh entries or (c) split iso entries. TTs will
- * buffer low/full speed data so the host collects it at high speed.
- */
-
-/*-------------------------------------------------------------------------*/
-
-/* fill a qtd, returning how much of the buffer we were able to queue up */
-
-static int
-qtd_fill(struct fusbh200_hcd *fusbh200, struct fusbh200_qtd *qtd, dma_addr_t buf,
- size_t len, int token, int maxpacket)
-{
- int i, count;
- u64 addr = buf;
-
- /* one buffer entry per 4K ... first might be short or unaligned */
- qtd->hw_buf[0] = cpu_to_hc32(fusbh200, (u32)addr);
- qtd->hw_buf_hi[0] = cpu_to_hc32(fusbh200, (u32)(addr >> 32));
- count = 0x1000 - (buf & 0x0fff); /* rest of that page */
- if (likely (len < count)) /* ... iff needed */
- count = len;
- else {
- buf += 0x1000;
- buf &= ~0x0fff;
-
- /* per-qtd limit: from 16K to 20K (best alignment) */
- for (i = 1; count < len && i < 5; i++) {
- addr = buf;
- qtd->hw_buf[i] = cpu_to_hc32(fusbh200, (u32)addr);
- qtd->hw_buf_hi[i] = cpu_to_hc32(fusbh200,
- (u32)(addr >> 32));
- buf += 0x1000;
- if ((count + 0x1000) < len)
- count += 0x1000;
- else
- count = len;
- }
-
- /* short packets may only terminate transfers */
- if (count != len)
- count -= (count % maxpacket);
- }
- qtd->hw_token = cpu_to_hc32(fusbh200, (count << 16) | token);
- qtd->length = count;
-
- return count;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static inline void
-qh_update (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh, struct fusbh200_qtd *qtd)
-{
- struct fusbh200_qh_hw *hw = qh->hw;
-
- /* writes to an active overlay are unsafe */
- BUG_ON(qh->qh_state != QH_STATE_IDLE);
-
- hw->hw_qtd_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
- hw->hw_alt_next = FUSBH200_LIST_END(fusbh200);
-
- /* Except for control endpoints, we make hardware maintain data
- * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
- * and set the pseudo-toggle in udev. Only usb_clear_halt() will
- * ever clear it.
- */
- if (!(hw->hw_info1 & cpu_to_hc32(fusbh200, QH_TOGGLE_CTL))) {
- unsigned is_out, epnum;
-
- is_out = qh->is_out;
- epnum = (hc32_to_cpup(fusbh200, &hw->hw_info1) >> 8) & 0x0f;
- if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
- hw->hw_token &= ~cpu_to_hc32(fusbh200, QTD_TOGGLE);
- usb_settoggle (qh->dev, epnum, is_out, 1);
- }
- }
-
- hw->hw_token &= cpu_to_hc32(fusbh200, QTD_TOGGLE | QTD_STS_PING);
-}
-
-/* if it weren't for a common silicon quirk (writing the dummy into the qh
- * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
- * recovery (including urb dequeue) would need software changes to a QH...
- */
-static void
-qh_refresh (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- struct fusbh200_qtd *qtd;
-
- if (list_empty (&qh->qtd_list))
- qtd = qh->dummy;
- else {
- qtd = list_entry (qh->qtd_list.next,
- struct fusbh200_qtd, qtd_list);
- /*
- * first qtd may already be partially processed.
- * If we come here during unlink, the QH overlay region
- * might have reference to the just unlinked qtd. The
- * qtd is updated in qh_completions(). Update the QH
- * overlay here.
- */
- if (cpu_to_hc32(fusbh200, qtd->qtd_dma) == qh->hw->hw_current) {
- qh->hw->hw_qtd_next = qtd->hw_next;
- qtd = NULL;
- }
- }
-
- if (qtd)
- qh_update (fusbh200, qh, qtd);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void qh_link_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
-
-static void fusbh200_clear_tt_buffer_complete(struct usb_hcd *hcd,
- struct usb_host_endpoint *ep)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
- struct fusbh200_qh *qh = ep->hcpriv;
- unsigned long flags;
-
- spin_lock_irqsave(&fusbh200->lock, flags);
- qh->clearing_tt = 0;
- if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
- && fusbh200->rh_state == FUSBH200_RH_RUNNING)
- qh_link_async(fusbh200, qh);
- spin_unlock_irqrestore(&fusbh200->lock, flags);
-}
-
-static void fusbh200_clear_tt_buffer(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh,
- struct urb *urb, u32 token)
-{
-
- /* If an async split transaction gets an error or is unlinked,
- * the TT buffer may be left in an indeterminate state. We
- * have to clear the TT buffer.
- *
- * Note: this routine is never called for Isochronous transfers.
- */
- if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
- struct usb_device *tt = urb->dev->tt->hub;
-
- dev_dbg(&tt->dev,
- "clear tt buffer port %d, a%d ep%d t%08x\n",
- urb->dev->ttport, urb->dev->devnum,
- usb_pipeendpoint(urb->pipe), token);
-
- if (urb->dev->tt->hub !=
- fusbh200_to_hcd(fusbh200)->self.root_hub) {
- if (usb_hub_clear_tt_buffer(urb) == 0)
- qh->clearing_tt = 1;
- }
- }
-}
-
-static int qtd_copy_status (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- size_t length,
- u32 token
-)
-{
- int status = -EINPROGRESS;
-
- /* count IN/OUT bytes, not SETUP (even short packets) */
- if (likely (QTD_PID (token) != 2))
- urb->actual_length += length - QTD_LENGTH (token);
-
- /* don't modify error codes */
- if (unlikely(urb->unlinked))
- return status;
-
- /* force cleanup after short read; not always an error */
- if (unlikely (IS_SHORT_READ (token)))
- status = -EREMOTEIO;
-
- /* serious "can't proceed" faults reported by the hardware */
- if (token & QTD_STS_HALT) {
- if (token & QTD_STS_BABBLE) {
- /* FIXME "must" disable babbling device's port too */
- status = -EOVERFLOW;
- /* CERR nonzero + halt --> stall */
- } else if (QTD_CERR(token)) {
- status = -EPIPE;
-
- /* In theory, more than one of the following bits can be set
- * since they are sticky and the transaction is retried.
- * Which to test first is rather arbitrary.
- */
- } else if (token & QTD_STS_MMF) {
- /* fs/ls interrupt xfer missed the complete-split */
- status = -EPROTO;
- } else if (token & QTD_STS_DBE) {
- status = (QTD_PID (token) == 1) /* IN ? */
- ? -ENOSR /* hc couldn't read data */
- : -ECOMM; /* hc couldn't write data */
- } else if (token & QTD_STS_XACT) {
- /* timeout, bad CRC, wrong PID, etc */
- fusbh200_dbg(fusbh200, "devpath %s ep%d%s 3strikes\n",
- urb->dev->devpath,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out");
- status = -EPROTO;
- } else { /* unknown */
- status = -EPROTO;
- }
-
- fusbh200_dbg(fusbh200,
- "dev%d ep%d%s qtd token %08x --> status %d\n",
- usb_pipedevice (urb->pipe),
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
- token, status);
- }
-
- return status;
-}
-
-static void
-fusbh200_urb_done(struct fusbh200_hcd *fusbh200, struct urb *urb, int status)
-__releases(fusbh200->lock)
-__acquires(fusbh200->lock)
-{
- if (likely (urb->hcpriv != NULL)) {
- struct fusbh200_qh *qh = (struct fusbh200_qh *) urb->hcpriv;
-
- /* S-mask in a QH means it's an interrupt urb */
- if ((qh->hw->hw_info2 & cpu_to_hc32(fusbh200, QH_SMASK)) != 0) {
-
- /* ... update hc-wide periodic stats (for usbfs) */
- fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs--;
- }
- }
-
- if (unlikely(urb->unlinked)) {
- COUNT(fusbh200->stats.unlink);
- } else {
- /* report non-error and short read status as zero */
- if (status == -EINPROGRESS || status == -EREMOTEIO)
- status = 0;
- COUNT(fusbh200->stats.complete);
- }
-
-#ifdef FUSBH200_URB_TRACE
- fusbh200_dbg (fusbh200,
- "%s %s urb %p ep%d%s status %d len %d/%d\n",
- __func__, urb->dev->devpath, urb,
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
- status,
- urb->actual_length, urb->transfer_buffer_length);
-#endif
-
- /* complete() can reenter this HCD */
- usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
- spin_unlock (&fusbh200->lock);
- usb_hcd_giveback_urb(fusbh200_to_hcd(fusbh200), urb, status);
- spin_lock (&fusbh200->lock);
-}
-
-static int qh_schedule (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh);
-
-/*
- * Process and free completed qtds for a qh, returning URBs to drivers.
- * Chases up to qh->hw_current. Returns number of completions called,
- * indicating how much "real" work we did.
- */
-static unsigned
-qh_completions (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- struct fusbh200_qtd *last, *end = qh->dummy;
- struct list_head *entry, *tmp;
- int last_status;
- int stopped;
- unsigned count = 0;
- u8 state;
- struct fusbh200_qh_hw *hw = qh->hw;
-
- if (unlikely (list_empty (&qh->qtd_list)))
- return count;
-
- /* completions (or tasks on other cpus) must never clobber HALT
- * till we've gone through and cleaned everything up, even when
- * they add urbs to this qh's queue or mark them for unlinking.
- *
- * NOTE: unlinking expects to be done in queue order.
- *
- * It's a bug for qh->qh_state to be anything other than
- * QH_STATE_IDLE, unless our caller is scan_async() or
- * scan_intr().
- */
- state = qh->qh_state;
- qh->qh_state = QH_STATE_COMPLETING;
- stopped = (state == QH_STATE_IDLE);
-
- rescan:
- last = NULL;
- last_status = -EINPROGRESS;
- qh->needs_rescan = 0;
-
- /* remove de-activated QTDs from front of queue.
- * after faults (including short reads), cleanup this urb
- * then let the queue advance.
- * if queue is stopped, handles unlinks.
- */
- list_for_each_safe (entry, tmp, &qh->qtd_list) {
- struct fusbh200_qtd *qtd;
- struct urb *urb;
- u32 token = 0;
-
- qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
- urb = qtd->urb;
-
- /* clean up any state from previous QTD ...*/
- if (last) {
- if (likely (last->urb != urb)) {
- fusbh200_urb_done(fusbh200, last->urb, last_status);
- count++;
- last_status = -EINPROGRESS;
- }
- fusbh200_qtd_free (fusbh200, last);
- last = NULL;
- }
-
- /* ignore urbs submitted during completions we reported */
- if (qtd == end)
- break;
-
- /* hardware copies qtd out of qh overlay */
- rmb ();
- token = hc32_to_cpu(fusbh200, qtd->hw_token);
-
- /* always clean up qtds the hc de-activated */
- retry_xacterr:
- if ((token & QTD_STS_ACTIVE) == 0) {
-
- /* Report Data Buffer Error: non-fatal but useful */
- if (token & QTD_STS_DBE)
- fusbh200_dbg(fusbh200,
- "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- urb,
- usb_endpoint_num(&urb->ep->desc),
- usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
- urb->transfer_buffer_length,
- qtd,
- qh);
-
- /* on STALL, error, and short reads this urb must
- * complete and all its qtds must be recycled.
- */
- if ((token & QTD_STS_HALT) != 0) {
-
- /* retry transaction errors until we
- * reach the software xacterr limit
- */
- if ((token & QTD_STS_XACT) &&
- QTD_CERR(token) == 0 &&
- ++qh->xacterrs < QH_XACTERR_MAX &&
- !urb->unlinked) {
- fusbh200_dbg(fusbh200,
- "detected XactErr len %zu/%zu retry %d\n",
- qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
-
- /* reset the token in the qtd and the
- * qh overlay (which still contains
- * the qtd) so that we pick up from
- * where we left off
- */
- token &= ~QTD_STS_HALT;
- token |= QTD_STS_ACTIVE |
- (FUSBH200_TUNE_CERR << 10);
- qtd->hw_token = cpu_to_hc32(fusbh200,
- token);
- wmb();
- hw->hw_token = cpu_to_hc32(fusbh200,
- token);
- goto retry_xacterr;
- }
- stopped = 1;
-
- /* magic dummy for some short reads; qh won't advance.
- * that silicon quirk can kick in with this dummy too.
- *
- * other short reads won't stop the queue, including
- * control transfers (status stage handles that) or
- * most other single-qtd reads ... the queue stops if
- * URB_SHORT_NOT_OK was set so the driver submitting
- * the urbs could clean it up.
- */
- } else if (IS_SHORT_READ (token)
- && !(qtd->hw_alt_next
- & FUSBH200_LIST_END(fusbh200))) {
- stopped = 1;
- }
-
- /* stop scanning when we reach qtds the hc is using */
- } else if (likely (!stopped
- && fusbh200->rh_state >= FUSBH200_RH_RUNNING)) {
- break;
-
- /* scan the whole queue for unlinks whenever it stops */
- } else {
- stopped = 1;
-
- /* cancel everything if we halt, suspend, etc */
- if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
- last_status = -ESHUTDOWN;
-
- /* this qtd is active; skip it unless a previous qtd
- * for its urb faulted, or its urb was canceled.
- */
- else if (last_status == -EINPROGRESS && !urb->unlinked)
- continue;
-
- /* qh unlinked; token in overlay may be most current */
- if (state == QH_STATE_IDLE
- && cpu_to_hc32(fusbh200, qtd->qtd_dma)
- == hw->hw_current) {
- token = hc32_to_cpu(fusbh200, hw->hw_token);
-
- /* An unlink may leave an incomplete
- * async transaction in the TT buffer.
- * We have to clear it.
- */
- fusbh200_clear_tt_buffer(fusbh200, qh, urb, token);
- }
- }
-
- /* unless we already know the urb's status, collect qtd status
- * and update count of bytes transferred. in common short read
- * cases with only one data qtd (including control transfers),
- * queue processing won't halt. but with two or more qtds (for
- * example, with a 32 KB transfer), when the first qtd gets a
- * short read the second must be removed by hand.
- */
- if (last_status == -EINPROGRESS) {
- last_status = qtd_copy_status(fusbh200, urb,
- qtd->length, token);
- if (last_status == -EREMOTEIO
- && (qtd->hw_alt_next
- & FUSBH200_LIST_END(fusbh200)))
- last_status = -EINPROGRESS;
-
- /* As part of low/full-speed endpoint-halt processing
- * we must clear the TT buffer (11.17.5).
- */
- if (unlikely(last_status != -EINPROGRESS &&
- last_status != -EREMOTEIO)) {
- /* The TT's in some hubs malfunction when they
- * receive this request following a STALL (they
- * stop sending isochronous packets). Since a
- * STALL can't leave the TT buffer in a busy
- * state (if you believe Figures 11-48 - 11-51
- * in the USB 2.0 spec), we won't clear the TT
- * buffer in this case. Strictly speaking this
- * is a violation of the spec.
- */
- if (last_status != -EPIPE)
- fusbh200_clear_tt_buffer(fusbh200, qh, urb,
- token);
- }
- }
-
- /* if we're removing something not at the queue head,
- * patch the hardware queue pointer.
- */
- if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
- last = list_entry (qtd->qtd_list.prev,
- struct fusbh200_qtd, qtd_list);
- last->hw_next = qtd->hw_next;
- }
-
- /* remove qtd; it's recycled after possible urb completion */
- list_del (&qtd->qtd_list);
- last = qtd;
-
- /* reinit the xacterr counter for the next qtd */
- qh->xacterrs = 0;
- }
-
- /* last urb's completion might still need calling */
- if (likely (last != NULL)) {
- fusbh200_urb_done(fusbh200, last->urb, last_status);
- count++;
- fusbh200_qtd_free (fusbh200, last);
- }
-
- /* Do we need to rescan for URBs dequeued during a giveback? */
- if (unlikely(qh->needs_rescan)) {
- /* If the QH is already unlinked, do the rescan now. */
- if (state == QH_STATE_IDLE)
- goto rescan;
-
- /* Otherwise we have to wait until the QH is fully unlinked.
- * Our caller will start an unlink if qh->needs_rescan is
- * set. But if an unlink has already started, nothing needs
- * to be done.
- */
- if (state != QH_STATE_LINKED)
- qh->needs_rescan = 0;
- }
-
- /* restore original state; caller must unlink or relink */
- qh->qh_state = state;
-
- /* be sure the hardware's done with the qh before refreshing
- * it after fault cleanup, or recovering from silicon wrongly
- * overlaying the dummy qtd (which reduces DMA chatter).
- */
- if (stopped != 0 || hw->hw_qtd_next == FUSBH200_LIST_END(fusbh200)) {
- switch (state) {
- case QH_STATE_IDLE:
- qh_refresh(fusbh200, qh);
- break;
- case QH_STATE_LINKED:
- /* We won't refresh a QH that's linked (after the HC
- * stopped the queue). That avoids a race:
- * - HC reads first part of QH;
- * - CPU updates that first part and the token;
- * - HC reads rest of that QH, including token
- * Result: HC gets an inconsistent image, and then
- * DMAs to/from the wrong memory (corrupting it).
- *
- * That should be rare for interrupt transfers,
- * except maybe high bandwidth ...
- */
-
- /* Tell the caller to start an unlink */
- qh->needs_rescan = 1;
- break;
- /* otherwise, unlink already started */
- }
- }
-
- return count;
-}
-
-/*-------------------------------------------------------------------------*/
-
-// high bandwidth multiplier, as encoded in highspeed endpoint descriptors
-#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
-// ... and packet size, for any kind of endpoint descriptor
-#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
-
-/*
- * reverse of qh_urb_transaction: free a list of TDs.
- * used for cleanup after errors, before HC sees an URB's TDs.
- */
-static void qtd_list_free (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- struct list_head *qtd_list
-) {
- struct list_head *entry, *temp;
-
- list_for_each_safe (entry, temp, qtd_list) {
- struct fusbh200_qtd *qtd;
-
- qtd = list_entry (entry, struct fusbh200_qtd, qtd_list);
- list_del (&qtd->qtd_list);
- fusbh200_qtd_free (fusbh200, qtd);
- }
-}
-
-/*
- * create a list of filled qtds for this URB; won't link into qh.
- */
-static struct list_head *
-qh_urb_transaction (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- struct list_head *head,
- gfp_t flags
-) {
- struct fusbh200_qtd *qtd, *qtd_prev;
- dma_addr_t buf;
- int len, this_sg_len, maxpacket;
- int is_input;
- u32 token;
- int i;
- struct scatterlist *sg;
-
- /*
- * URBs map to sequences of QTDs: one logical transaction
- */
- qtd = fusbh200_qtd_alloc (fusbh200, flags);
- if (unlikely (!qtd))
- return NULL;
- list_add_tail (&qtd->qtd_list, head);
- qtd->urb = urb;
-
- token = QTD_STS_ACTIVE;
- token |= (FUSBH200_TUNE_CERR << 10);
- /* for split transactions, SplitXState initialized to zero */
-
- len = urb->transfer_buffer_length;
- is_input = usb_pipein (urb->pipe);
- if (usb_pipecontrol (urb->pipe)) {
- /* SETUP pid */
- qtd_fill(fusbh200, qtd, urb->setup_dma,
- sizeof (struct usb_ctrlrequest),
- token | (2 /* "setup" */ << 8), 8);
-
- /* ... and always at least one more pid */
- token ^= QTD_TOGGLE;
- qtd_prev = qtd;
- qtd = fusbh200_qtd_alloc (fusbh200, flags);
- if (unlikely (!qtd))
- goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
- list_add_tail (&qtd->qtd_list, head);
-
- /* for zero length DATA stages, STATUS is always IN */
- if (len == 0)
- token |= (1 /* "in" */ << 8);
- }
-
- /*
- * data transfer stage: buffer setup
- */
- i = urb->num_mapped_sgs;
- if (len > 0 && i > 0) {
- sg = urb->sg;
- buf = sg_dma_address(sg);
-
- /* urb->transfer_buffer_length may be smaller than the
- * size of the scatterlist (or vice versa)
- */
- this_sg_len = min_t(int, sg_dma_len(sg), len);
- } else {
- sg = NULL;
- buf = urb->transfer_dma;
- this_sg_len = len;
- }
-
- if (is_input)
- token |= (1 /* "in" */ << 8);
- /* else it's already initted to "out" pid (0 << 8) */
-
- maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
-
- /*
- * buffer gets wrapped in one or more qtds;
- * last one may be "short" (including zero len)
- * and may serve as a control status ack
- */
- for (;;) {
- int this_qtd_len;
-
- this_qtd_len = qtd_fill(fusbh200, qtd, buf, this_sg_len, token,
- maxpacket);
- this_sg_len -= this_qtd_len;
- len -= this_qtd_len;
- buf += this_qtd_len;
-
- /*
- * short reads advance to a "magic" dummy instead of the next
- * qtd ... that forces the queue to stop, for manual cleanup.
- * (this will usually be overridden later.)
- */
- if (is_input)
- qtd->hw_alt_next = fusbh200->async->hw->hw_alt_next;
-
- /* qh makes control packets use qtd toggle; maybe switch it */
- if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
- token ^= QTD_TOGGLE;
-
- if (likely(this_sg_len <= 0)) {
- if (--i <= 0 || len <= 0)
- break;
- sg = sg_next(sg);
- buf = sg_dma_address(sg);
- this_sg_len = min_t(int, sg_dma_len(sg), len);
- }
-
- qtd_prev = qtd;
- qtd = fusbh200_qtd_alloc (fusbh200, flags);
- if (unlikely (!qtd))
- goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
- list_add_tail (&qtd->qtd_list, head);
- }
-
- /*
- * unless the caller requires manual cleanup after short reads,
- * have the alt_next mechanism keep the queue running after the
- * last data qtd (the only one, for control and most other cases).
- */
- if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
- || usb_pipecontrol (urb->pipe)))
- qtd->hw_alt_next = FUSBH200_LIST_END(fusbh200);
-
- /*
- * control requests may need a terminating data "status" ack;
- * other OUT ones may need a terminating short packet
- * (zero length).
- */
- if (likely (urb->transfer_buffer_length != 0)) {
- int one_more = 0;
-
- if (usb_pipecontrol (urb->pipe)) {
- one_more = 1;
- token ^= 0x0100; /* "in" <--> "out" */
- token |= QTD_TOGGLE; /* force DATA1 */
- } else if (usb_pipeout(urb->pipe)
- && (urb->transfer_flags & URB_ZERO_PACKET)
- && !(urb->transfer_buffer_length % maxpacket)) {
- one_more = 1;
- }
- if (one_more) {
- qtd_prev = qtd;
- qtd = fusbh200_qtd_alloc (fusbh200, flags);
- if (unlikely (!qtd))
- goto cleanup;
- qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT(fusbh200, qtd->qtd_dma);
- list_add_tail (&qtd->qtd_list, head);
-
- /* never any data in such packets */
- qtd_fill(fusbh200, qtd, 0, 0, token, 0);
- }
- }
-
- /* by default, enable interrupt on urb completion */
- if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
- qtd->hw_token |= cpu_to_hc32(fusbh200, QTD_IOC);
- return head;
-
-cleanup:
- qtd_list_free (fusbh200, urb, head);
- return NULL;
-}
-
-/*-------------------------------------------------------------------------*/
-
-// Would be best to create all qh's from config descriptors,
-// when each interface/altsetting is established. Unlink
-// any previous qh and cancel its urbs first; endpoints are
-// implicitly reset then (data toggle too).
-// That'd mean updating how usbcore talks to HCDs. (2.7?)
-
-
-/*
- * Each QH holds a qtd list; a QH is used for everything except iso.
- *
- * For interrupt urbs, the scheduler must set the microframe scheduling
- * mask(s) each time the QH gets scheduled. For highspeed, that's
- * just one microframe in the s-mask. For split interrupt transactions
- * there are additional complications: c-mask, maybe FSTNs.
- */
-static struct fusbh200_qh *
-qh_make (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- gfp_t flags
-) {
- struct fusbh200_qh *qh = fusbh200_qh_alloc (fusbh200, flags);
- u32 info1 = 0, info2 = 0;
- int is_input, type;
- int maxp = 0;
- struct usb_tt *tt = urb->dev->tt;
- struct fusbh200_qh_hw *hw;
-
- if (!qh)
- return qh;
-
- /*
- * init endpoint/device data for this QH
- */
- info1 |= usb_pipeendpoint (urb->pipe) << 8;
- info1 |= usb_pipedevice (urb->pipe) << 0;
-
- is_input = usb_pipein (urb->pipe);
- type = usb_pipetype (urb->pipe);
- maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
-
- /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
- * acts like up to 3KB, but is built from smaller packets.
- */
- if (max_packet(maxp) > 1024) {
- fusbh200_dbg(fusbh200, "bogus qh maxpacket %d\n", max_packet(maxp));
- goto done;
- }
-
- /* Compute interrupt scheduling parameters just once, and save.
- * - allowing for high bandwidth, how many nsec/uframe are used?
- * - split transactions need a second CSPLIT uframe; same question
- * - splits also need a schedule gap (for full/low speed I/O)
- * - qh has a polling interval
- *
- * For control/bulk requests, the HC or TT handles these.
- */
- if (type == PIPE_INTERRUPT) {
- qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
- is_input, 0,
- hb_mult(maxp) * max_packet(maxp)));
- qh->start = NO_FRAME;
-
- if (urb->dev->speed == USB_SPEED_HIGH) {
- qh->c_usecs = 0;
- qh->gap_uf = 0;
-
- qh->period = urb->interval >> 3;
- if (qh->period == 0 && urb->interval != 1) {
- /* NOTE interval 2 or 4 uframes could work.
- * But interval 1 scheduling is simpler, and
- * includes high bandwidth.
- */
- urb->interval = 1;
- } else if (qh->period > fusbh200->periodic_size) {
- qh->period = fusbh200->periodic_size;
- urb->interval = qh->period << 3;
- }
- } else {
- int think_time;
-
- /* gap is f(FS/LS transfer times) */
- qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed,
- is_input, 0, maxp) / (125 * 1000);
-
- /* FIXME this just approximates SPLIT/CSPLIT times */
- if (is_input) { // SPLIT, gap, CSPLIT+DATA
- qh->c_usecs = qh->usecs + HS_USECS (0);
- qh->usecs = HS_USECS (1);
- } else { // SPLIT+DATA, gap, CSPLIT
- qh->usecs += HS_USECS (1);
- qh->c_usecs = HS_USECS (0);
- }
-
- think_time = tt ? tt->think_time : 0;
- qh->tt_usecs = NS_TO_US (think_time +
- usb_calc_bus_time (urb->dev->speed,
- is_input, 0, max_packet (maxp)));
- qh->period = urb->interval;
- if (qh->period > fusbh200->periodic_size) {
- qh->period = fusbh200->periodic_size;
- urb->interval = qh->period;
- }
- }
- }
-
- /* support for tt scheduling, and access to toggles */
- qh->dev = urb->dev;
-
- /* using TT? */
- switch (urb->dev->speed) {
- case USB_SPEED_LOW:
- info1 |= QH_LOW_SPEED;
- /* FALL THROUGH */
-
- case USB_SPEED_FULL:
- /* EPS 0 means "full" */
- if (type != PIPE_INTERRUPT)
- info1 |= (FUSBH200_TUNE_RL_TT << 28);
- if (type == PIPE_CONTROL) {
- info1 |= QH_CONTROL_EP; /* for TT */
- info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
- }
- info1 |= maxp << 16;
-
- info2 |= (FUSBH200_TUNE_MULT_TT << 30);
-
- /* Some Freescale processors have an erratum in which the
- * port number in the queue head was 0..N-1 instead of 1..N.
- */
- if (fusbh200_has_fsl_portno_bug(fusbh200))
- info2 |= (urb->dev->ttport-1) << 23;
- else
- info2 |= urb->dev->ttport << 23;
-
- /* set the address of the TT; for TDI's integrated
- * root hub tt, leave it zeroed.
- */
- if (tt && tt->hub != fusbh200_to_hcd(fusbh200)->self.root_hub)
- info2 |= tt->hub->devnum << 16;
-
- /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
-
- break;
-
- case USB_SPEED_HIGH: /* no TT involved */
- info1 |= QH_HIGH_SPEED;
- if (type == PIPE_CONTROL) {
- info1 |= (FUSBH200_TUNE_RL_HS << 28);
- info1 |= 64 << 16; /* usb2 fixed maxpacket */
- info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
- info2 |= (FUSBH200_TUNE_MULT_HS << 30);
- } else if (type == PIPE_BULK) {
- info1 |= (FUSBH200_TUNE_RL_HS << 28);
- /* The USB spec says that high speed bulk endpoints
- * always use 512 byte maxpacket. But some device
- * vendors decided to ignore that, and MSFT is happy
- * to help them do so. So now people expect to use
- * such nonconformant devices with Linux too; sigh.
- */
- info1 |= max_packet(maxp) << 16;
- info2 |= (FUSBH200_TUNE_MULT_HS << 30);
- } else { /* PIPE_INTERRUPT */
- info1 |= max_packet (maxp) << 16;
- info2 |= hb_mult (maxp) << 30;
- }
- break;
- default:
- fusbh200_dbg(fusbh200, "bogus dev %p speed %d\n", urb->dev,
- urb->dev->speed);
-done:
- qh_destroy(fusbh200, qh);
- return NULL;
- }
-
- /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
-
- /* init as live, toggle clear, advance to dummy */
- qh->qh_state = QH_STATE_IDLE;
- hw = qh->hw;
- hw->hw_info1 = cpu_to_hc32(fusbh200, info1);
- hw->hw_info2 = cpu_to_hc32(fusbh200, info2);
- qh->is_out = !is_input;
- usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
- qh_refresh (fusbh200, qh);
- return qh;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void enable_async(struct fusbh200_hcd *fusbh200)
-{
- if (fusbh200->async_count++)
- return;
-
- /* Stop waiting to turn off the async schedule */
- fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_ASYNC);
-
- /* Don't start the schedule until ASS is 0 */
- fusbh200_poll_ASS(fusbh200);
- turn_on_io_watchdog(fusbh200);
-}
-
-static void disable_async(struct fusbh200_hcd *fusbh200)
-{
- if (--fusbh200->async_count)
- return;
-
- /* The async schedule and async_unlink list are supposed to be empty */
- WARN_ON(fusbh200->async->qh_next.qh || fusbh200->async_unlink);
-
- /* Don't turn off the schedule until ASS is 1 */
- fusbh200_poll_ASS(fusbh200);
-}
-
-/* move qh (and its qtds) onto async queue; maybe enable queue. */
-
-static void qh_link_async (struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- __hc32 dma = QH_NEXT(fusbh200, qh->qh_dma);
- struct fusbh200_qh *head;
-
- /* Don't link a QH if there's a Clear-TT-Buffer pending */
- if (unlikely(qh->clearing_tt))
- return;
-
- WARN_ON(qh->qh_state != QH_STATE_IDLE);
-
- /* clear halt and/or toggle; and maybe recover from silicon quirk */
- qh_refresh(fusbh200, qh);
-
- /* splice right after start */
- head = fusbh200->async;
- qh->qh_next = head->qh_next;
- qh->hw->hw_next = head->hw->hw_next;
- wmb ();
-
- head->qh_next.qh = qh;
- head->hw->hw_next = dma;
-
- qh->xacterrs = 0;
- qh->qh_state = QH_STATE_LINKED;
- /* qtd completions reported later by interrupt */
-
- enable_async(fusbh200);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * For control/bulk/interrupt, return QH with these TDs appended.
- * Allocates and initializes the QH if necessary.
- * Returns null if it can't allocate a QH it needs to.
- * If the QH has TDs (urbs) already, that's great.
- */
-static struct fusbh200_qh *qh_append_tds (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- struct list_head *qtd_list,
- int epnum,
- void **ptr
-)
-{
- struct fusbh200_qh *qh = NULL;
- __hc32 qh_addr_mask = cpu_to_hc32(fusbh200, 0x7f);
-
- qh = (struct fusbh200_qh *) *ptr;
- if (unlikely (qh == NULL)) {
- /* can't sleep here, we have fusbh200->lock... */
- qh = qh_make (fusbh200, urb, GFP_ATOMIC);
- *ptr = qh;
- }
- if (likely (qh != NULL)) {
- struct fusbh200_qtd *qtd;
-
- if (unlikely (list_empty (qtd_list)))
- qtd = NULL;
- else
- qtd = list_entry (qtd_list->next, struct fusbh200_qtd,
- qtd_list);
-
- /* control qh may need patching ... */
- if (unlikely (epnum == 0)) {
-
- /* usb_reset_device() briefly reverts to address 0 */
- if (usb_pipedevice (urb->pipe) == 0)
- qh->hw->hw_info1 &= ~qh_addr_mask;
- }
-
- /* just one way to queue requests: swap with the dummy qtd.
- * only hc or qh_refresh() ever modify the overlay.
- */
- if (likely (qtd != NULL)) {
- struct fusbh200_qtd *dummy;
- dma_addr_t dma;
- __hc32 token;
-
- /* to avoid racing the HC, use the dummy td instead of
- * the first td of our list (becomes new dummy). both
- * tds stay deactivated until we're done, when the
- * HC is allowed to fetch the old dummy (4.10.2).
- */
- token = qtd->hw_token;
- qtd->hw_token = HALT_BIT(fusbh200);
-
- dummy = qh->dummy;
-
- dma = dummy->qtd_dma;
- *dummy = *qtd;
- dummy->qtd_dma = dma;
-
- list_del (&qtd->qtd_list);
- list_add (&dummy->qtd_list, qtd_list);
- list_splice_tail(qtd_list, &qh->qtd_list);
-
- fusbh200_qtd_init(fusbh200, qtd, qtd->qtd_dma);
- qh->dummy = qtd;
-
- /* hc must see the new dummy at list end */
- dma = qtd->qtd_dma;
- qtd = list_entry (qh->qtd_list.prev,
- struct fusbh200_qtd, qtd_list);
- qtd->hw_next = QTD_NEXT(fusbh200, dma);
-
- /* let the hc process these next qtds */
- wmb ();
- dummy->hw_token = token;
-
- urb->hcpriv = qh;
- }
- }
- return qh;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int
-submit_async (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- struct list_head *qtd_list,
- gfp_t mem_flags
-) {
- int epnum;
- unsigned long flags;
- struct fusbh200_qh *qh = NULL;
- int rc;
-
- epnum = urb->ep->desc.bEndpointAddress;
-
-#ifdef FUSBH200_URB_TRACE
- {
- struct fusbh200_qtd *qtd;
- qtd = list_entry(qtd_list->next, struct fusbh200_qtd, qtd_list);
- fusbh200_dbg(fusbh200,
- "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- __func__, urb->dev->devpath, urb,
- epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
- urb->transfer_buffer_length,
- qtd, urb->ep->hcpriv);
- }
-#endif
-
- spin_lock_irqsave (&fusbh200->lock, flags);
- if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
- rc = -ESHUTDOWN;
- goto done;
- }
- rc = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
- if (unlikely(rc))
- goto done;
-
- qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
- if (unlikely(qh == NULL)) {
- usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
- rc = -ENOMEM;
- goto done;
- }
-
- /* Control/bulk operations through TTs don't need scheduling,
- * the HC and TT handle it when the TT has a buffer ready.
- */
- if (likely (qh->qh_state == QH_STATE_IDLE))
- qh_link_async(fusbh200, qh);
- done:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- if (unlikely (qh == NULL))
- qtd_list_free (fusbh200, urb, qtd_list);
- return rc;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void single_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- struct fusbh200_qh *prev;
-
- /* Add to the end of the list of QHs waiting for the next IAAD */
- qh->qh_state = QH_STATE_UNLINK;
- if (fusbh200->async_unlink)
- fusbh200->async_unlink_last->unlink_next = qh;
- else
- fusbh200->async_unlink = qh;
- fusbh200->async_unlink_last = qh;
-
- /* Unlink it from the schedule */
- prev = fusbh200->async;
- while (prev->qh_next.qh != qh)
- prev = prev->qh_next.qh;
-
- prev->hw->hw_next = qh->hw->hw_next;
- prev->qh_next = qh->qh_next;
- if (fusbh200->qh_scan_next == qh)
- fusbh200->qh_scan_next = qh->qh_next.qh;
-}
-
-static void start_iaa_cycle(struct fusbh200_hcd *fusbh200, bool nested)
-{
- /*
- * Do nothing if an IAA cycle is already running or
- * if one will be started shortly.
- */
- if (fusbh200->async_iaa || fusbh200->async_unlinking)
- return;
-
- /* Do all the waiting QHs at once */
- fusbh200->async_iaa = fusbh200->async_unlink;
- fusbh200->async_unlink = NULL;
-
- /* If the controller isn't running, we don't have to wait for it */
- if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING)) {
- if (!nested) /* Avoid recursion */
- end_unlink_async(fusbh200);
-
- /* Otherwise start a new IAA cycle */
- } else if (likely(fusbh200->rh_state == FUSBH200_RH_RUNNING)) {
- /* Make sure the unlinks are all visible to the hardware */
- wmb();
-
- fusbh200_writel(fusbh200, fusbh200->command | CMD_IAAD,
- &fusbh200->regs->command);
- fusbh200_readl(fusbh200, &fusbh200->regs->command);
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_IAA_WATCHDOG, true);
- }
-}
-
-/* the async qh for the qtds being unlinked are now gone from the HC */
-
-static void end_unlink_async(struct fusbh200_hcd *fusbh200)
-{
- struct fusbh200_qh *qh;
-
- /* Process the idle QHs */
- restart:
- fusbh200->async_unlinking = true;
- while (fusbh200->async_iaa) {
- qh = fusbh200->async_iaa;
- fusbh200->async_iaa = qh->unlink_next;
- qh->unlink_next = NULL;
-
- qh->qh_state = QH_STATE_IDLE;
- qh->qh_next.qh = NULL;
-
- qh_completions(fusbh200, qh);
- if (!list_empty(&qh->qtd_list) &&
- fusbh200->rh_state == FUSBH200_RH_RUNNING)
- qh_link_async(fusbh200, qh);
- disable_async(fusbh200);
- }
- fusbh200->async_unlinking = false;
-
- /* Start a new IAA cycle if any QHs are waiting for it */
- if (fusbh200->async_unlink) {
- start_iaa_cycle(fusbh200, true);
- if (unlikely(fusbh200->rh_state < FUSBH200_RH_RUNNING))
- goto restart;
- }
-}
-
-static void unlink_empty_async(struct fusbh200_hcd *fusbh200)
-{
- struct fusbh200_qh *qh, *next;
- bool stopped = (fusbh200->rh_state < FUSBH200_RH_RUNNING);
- bool check_unlinks_later = false;
-
- /* Unlink all the async QHs that have been empty for a timer cycle */
- next = fusbh200->async->qh_next.qh;
- while (next) {
- qh = next;
- next = qh->qh_next.qh;
-
- if (list_empty(&qh->qtd_list) &&
- qh->qh_state == QH_STATE_LINKED) {
- if (!stopped && qh->unlink_cycle ==
- fusbh200->async_unlink_cycle)
- check_unlinks_later = true;
- else
- single_unlink_async(fusbh200, qh);
- }
- }
-
- /* Start a new IAA cycle if any QHs are waiting for it */
- if (fusbh200->async_unlink)
- start_iaa_cycle(fusbh200, false);
-
- /* QHs that haven't been empty for long enough will be handled later */
- if (check_unlinks_later) {
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
- ++fusbh200->async_unlink_cycle;
- }
-}
-
-/* makes sure the async qh will become idle */
-/* caller must own fusbh200->lock */
-
-static void start_unlink_async(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- /*
- * If the QH isn't linked then there's nothing we can do
- * unless we were called during a giveback, in which case
- * qh_completions() has to deal with it.
- */
- if (qh->qh_state != QH_STATE_LINKED) {
- if (qh->qh_state == QH_STATE_COMPLETING)
- qh->needs_rescan = 1;
- return;
- }
-
- single_unlink_async(fusbh200, qh);
- start_iaa_cycle(fusbh200, false);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void scan_async (struct fusbh200_hcd *fusbh200)
-{
- struct fusbh200_qh *qh;
- bool check_unlinks_later = false;
-
- fusbh200->qh_scan_next = fusbh200->async->qh_next.qh;
- while (fusbh200->qh_scan_next) {
- qh = fusbh200->qh_scan_next;
- fusbh200->qh_scan_next = qh->qh_next.qh;
- rescan:
- /* clean any finished work for this qh */
- if (!list_empty(&qh->qtd_list)) {
- int temp;
-
- /*
- * Unlinks could happen here; completion reporting
- * drops the lock. That's why fusbh200->qh_scan_next
- * always holds the next qh to scan; if the next qh
- * gets unlinked then fusbh200->qh_scan_next is adjusted
- * in single_unlink_async().
- */
- temp = qh_completions(fusbh200, qh);
- if (qh->needs_rescan) {
- start_unlink_async(fusbh200, qh);
- } else if (list_empty(&qh->qtd_list)
- && qh->qh_state == QH_STATE_LINKED) {
- qh->unlink_cycle = fusbh200->async_unlink_cycle;
- check_unlinks_later = true;
- } else if (temp != 0)
- goto rescan;
- }
- }
-
- /*
- * Unlink empty entries, reducing DMA usage as well
- * as HCD schedule-scanning costs. Delay for any qh
- * we just scanned, there's a not-unusual case that it
- * doesn't stay idle for long.
- */
- if (check_unlinks_later && fusbh200->rh_state == FUSBH200_RH_RUNNING &&
- !(fusbh200->enabled_hrtimer_events &
- BIT(FUSBH200_HRTIMER_ASYNC_UNLINKS))) {
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_ASYNC_UNLINKS, true);
- ++fusbh200->async_unlink_cycle;
- }
-}
-/*-------------------------------------------------------------------------*/
-/*
- * EHCI scheduled transaction support: interrupt, iso, split iso
- * These are called "periodic" transactions in the EHCI spec.
- *
- * Note that for interrupt transfers, the QH/QTD manipulation is shared
- * with the "asynchronous" transaction support (control/bulk transfers).
- * The only real difference is in how interrupt transfers are scheduled.
- *
- * For ISO, we make an "iso_stream" head to serve the same role as a QH.
- * It keeps track of every ITD (or SITD) that's linked, and holds enough
- * pre-calculated schedule data to make appending to the queue be quick.
- */
-
-static int fusbh200_get_frame (struct usb_hcd *hcd);
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * periodic_next_shadow - return "next" pointer on shadow list
- * @periodic: host pointer to qh/itd
- * @tag: hardware tag for type of this record
- */
-static union fusbh200_shadow *
-periodic_next_shadow(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
- __hc32 tag)
-{
- switch (hc32_to_cpu(fusbh200, tag)) {
- case Q_TYPE_QH:
- return &periodic->qh->qh_next;
- case Q_TYPE_FSTN:
- return &periodic->fstn->fstn_next;
- default:
- return &periodic->itd->itd_next;
- }
-}
-
-static __hc32 *
-shadow_next_periodic(struct fusbh200_hcd *fusbh200, union fusbh200_shadow *periodic,
- __hc32 tag)
-{
- switch (hc32_to_cpu(fusbh200, tag)) {
- /* our fusbh200_shadow.qh is actually software part */
- case Q_TYPE_QH:
- return &periodic->qh->hw->hw_next;
- /* others are hw parts */
- default:
- return periodic->hw_next;
- }
-}
-
-/* caller must hold fusbh200->lock */
-static void periodic_unlink (struct fusbh200_hcd *fusbh200, unsigned frame, void *ptr)
-{
- union fusbh200_shadow *prev_p = &fusbh200->pshadow[frame];
- __hc32 *hw_p = &fusbh200->periodic[frame];
- union fusbh200_shadow here = *prev_p;
-
- /* find predecessor of "ptr"; hw and shadow lists are in sync */
- while (here.ptr && here.ptr != ptr) {
- prev_p = periodic_next_shadow(fusbh200, prev_p,
- Q_NEXT_TYPE(fusbh200, *hw_p));
- hw_p = shadow_next_periodic(fusbh200, &here,
- Q_NEXT_TYPE(fusbh200, *hw_p));
- here = *prev_p;
- }
- /* an interrupt entry (at list end) could have been shared */
- if (!here.ptr)
- return;
-
- /* update shadow and hardware lists ... the old "next" pointers
- * from ptr may still be in use, the caller updates them.
- */
- *prev_p = *periodic_next_shadow(fusbh200, &here,
- Q_NEXT_TYPE(fusbh200, *hw_p));
-
- *hw_p = *shadow_next_periodic(fusbh200, &here,
- Q_NEXT_TYPE(fusbh200, *hw_p));
-}
-
-/* how many of the uframe's 125 usecs are allocated? */
-static unsigned short
-periodic_usecs (struct fusbh200_hcd *fusbh200, unsigned frame, unsigned uframe)
-{
- __hc32 *hw_p = &fusbh200->periodic [frame];
- union fusbh200_shadow *q = &fusbh200->pshadow [frame];
- unsigned usecs = 0;
- struct fusbh200_qh_hw *hw;
-
- while (q->ptr) {
- switch (hc32_to_cpu(fusbh200, Q_NEXT_TYPE(fusbh200, *hw_p))) {
- case Q_TYPE_QH:
- hw = q->qh->hw;
- /* is it in the S-mask? */
- if (hw->hw_info2 & cpu_to_hc32(fusbh200, 1 << uframe))
- usecs += q->qh->usecs;
- /* ... or C-mask? */
- if (hw->hw_info2 & cpu_to_hc32(fusbh200,
- 1 << (8 + uframe)))
- usecs += q->qh->c_usecs;
- hw_p = &hw->hw_next;
- q = &q->qh->qh_next;
- break;
- // case Q_TYPE_FSTN:
- default:
- /* for "save place" FSTNs, count the relevant INTR
- * bandwidth from the previous frame
- */
- if (q->fstn->hw_prev != FUSBH200_LIST_END(fusbh200)) {
- fusbh200_dbg (fusbh200, "ignoring FSTN cost ...\n");
- }
- hw_p = &q->fstn->hw_next;
- q = &q->fstn->fstn_next;
- break;
- case Q_TYPE_ITD:
- if (q->itd->hw_transaction[uframe])
- usecs += q->itd->stream->usecs;
- hw_p = &q->itd->hw_next;
- q = &q->itd->itd_next;
- break;
- }
- }
- if (usecs > fusbh200->uframe_periodic_max)
- fusbh200_err (fusbh200, "uframe %d sched overrun: %d usecs\n",
- frame * 8 + uframe, usecs);
- return usecs;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
-{
- if (!dev1->tt || !dev2->tt)
- return 0;
- if (dev1->tt != dev2->tt)
- return 0;
- if (dev1->tt->multi)
- return dev1->ttport == dev2->ttport;
- else
- return 1;
-}
-
-/* return true iff the device's transaction translator is available
- * for a periodic transfer starting at the specified frame, using
- * all the uframes in the mask.
- */
-static int tt_no_collision (
- struct fusbh200_hcd *fusbh200,
- unsigned period,
- struct usb_device *dev,
- unsigned frame,
- u32 uf_mask
-)
-{
- if (period == 0) /* error */
- return 0;
-
- /* note bandwidth wastage: split never follows csplit
- * (different dev or endpoint) until the next uframe.
- * calling convention doesn't make that distinction.
- */
- for (; frame < fusbh200->periodic_size; frame += period) {
- union fusbh200_shadow here;
- __hc32 type;
- struct fusbh200_qh_hw *hw;
-
- here = fusbh200->pshadow [frame];
- type = Q_NEXT_TYPE(fusbh200, fusbh200->periodic [frame]);
- while (here.ptr) {
- switch (hc32_to_cpu(fusbh200, type)) {
- case Q_TYPE_ITD:
- type = Q_NEXT_TYPE(fusbh200, here.itd->hw_next);
- here = here.itd->itd_next;
- continue;
- case Q_TYPE_QH:
- hw = here.qh->hw;
- if (same_tt (dev, here.qh->dev)) {
- u32 mask;
-
- mask = hc32_to_cpu(fusbh200,
- hw->hw_info2);
- /* "knows" no gap is needed */
- mask |= mask >> 8;
- if (mask & uf_mask)
- break;
- }
- type = Q_NEXT_TYPE(fusbh200, hw->hw_next);
- here = here.qh->qh_next;
- continue;
- // case Q_TYPE_FSTN:
- default:
- fusbh200_dbg (fusbh200,
- "periodic frame %d bogus type %d\n",
- frame, type);
- }
-
- /* collision or error */
- return 0;
- }
- }
-
- /* no collision */
- return 1;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void enable_periodic(struct fusbh200_hcd *fusbh200)
-{
- if (fusbh200->periodic_count++)
- return;
-
- /* Stop waiting to turn off the periodic schedule */
- fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_DISABLE_PERIODIC);
-
- /* Don't start the schedule until PSS is 0 */
- fusbh200_poll_PSS(fusbh200);
- turn_on_io_watchdog(fusbh200);
-}
-
-static void disable_periodic(struct fusbh200_hcd *fusbh200)
-{
- if (--fusbh200->periodic_count)
- return;
-
- /* Don't turn off the schedule until PSS is 1 */
- fusbh200_poll_PSS(fusbh200);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* periodic schedule slots have iso tds (normal or split) first, then a
- * sparse tree for active interrupt transfers.
- *
- * this just links in a qh; caller guarantees uframe masks are set right.
- * no FSTN support (yet; fusbh200 0.96+)
- */
-static void qh_link_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- unsigned i;
- unsigned period = qh->period;
-
- dev_dbg (&qh->dev->dev,
- "link qh%d-%04x/%p start %d [%d/%d us]\n",
- period, hc32_to_cpup(fusbh200, &qh->hw->hw_info2)
- & (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
-
- /* high bandwidth, or otherwise every microframe */
- if (period == 0)
- period = 1;
-
- for (i = qh->start; i < fusbh200->periodic_size; i += period) {
- union fusbh200_shadow *prev = &fusbh200->pshadow[i];
- __hc32 *hw_p = &fusbh200->periodic[i];
- union fusbh200_shadow here = *prev;
- __hc32 type = 0;
-
- /* skip the iso nodes at list head */
- while (here.ptr) {
- type = Q_NEXT_TYPE(fusbh200, *hw_p);
- if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
- break;
- prev = periodic_next_shadow(fusbh200, prev, type);
- hw_p = shadow_next_periodic(fusbh200, &here, type);
- here = *prev;
- }
-
- /* sorting each branch by period (slow-->fast)
- * enables sharing interior tree nodes
- */
- while (here.ptr && qh != here.qh) {
- if (qh->period > here.qh->period)
- break;
- prev = &here.qh->qh_next;
- hw_p = &here.qh->hw->hw_next;
- here = *prev;
- }
- /* link in this qh, unless some earlier pass did that */
- if (qh != here.qh) {
- qh->qh_next = here;
- if (here.qh)
- qh->hw->hw_next = *hw_p;
- wmb ();
- prev->qh = qh;
- *hw_p = QH_NEXT (fusbh200, qh->qh_dma);
- }
- }
- qh->qh_state = QH_STATE_LINKED;
- qh->xacterrs = 0;
-
- /* update per-qh bandwidth for usbfs */
- fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated += qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
-
- list_add(&qh->intr_node, &fusbh200->intr_qh_list);
-
- /* maybe enable periodic schedule processing */
- ++fusbh200->intr_count;
- enable_periodic(fusbh200);
-}
-
-static void qh_unlink_periodic(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- unsigned i;
- unsigned period;
-
- /*
- * If qh is for a low/full-speed device, simply unlinking it
- * could interfere with an ongoing split transaction. To unlink
- * it safely would require setting the QH_INACTIVATE bit and
- * waiting at least one frame, as described in EHCI 4.12.2.5.
- *
- * We won't bother with any of this. Instead, we assume that the
- * only reason for unlinking an interrupt QH while the current URB
- * is still active is to dequeue all the URBs (flush the whole
- * endpoint queue).
- *
- * If rebalancing the periodic schedule is ever implemented, this
- * approach will no longer be valid.
- */
-
- /* high bandwidth, or otherwise part of every microframe */
- if ((period = qh->period) == 0)
- period = 1;
-
- for (i = qh->start; i < fusbh200->periodic_size; i += period)
- periodic_unlink (fusbh200, i, qh);
-
- /* update per-qh bandwidth for usbfs */
- fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated -= qh->period
- ? ((qh->usecs + qh->c_usecs) / qh->period)
- : (qh->usecs * 8);
-
- dev_dbg (&qh->dev->dev,
- "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
- qh->period,
- hc32_to_cpup(fusbh200, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
- qh, qh->start, qh->usecs, qh->c_usecs);
-
- /* qh->qh_next still "live" to HC */
- qh->qh_state = QH_STATE_UNLINK;
- qh->qh_next.ptr = NULL;
-
- if (fusbh200->qh_scan_next == qh)
- fusbh200->qh_scan_next = list_entry(qh->intr_node.next,
- struct fusbh200_qh, intr_node);
- list_del(&qh->intr_node);
-}
-
-static void start_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- /* If the QH isn't linked then there's nothing we can do
- * unless we were called during a giveback, in which case
- * qh_completions() has to deal with it.
- */
- if (qh->qh_state != QH_STATE_LINKED) {
- if (qh->qh_state == QH_STATE_COMPLETING)
- qh->needs_rescan = 1;
- return;
- }
-
- qh_unlink_periodic (fusbh200, qh);
-
- /* Make sure the unlinks are visible before starting the timer */
- wmb();
-
- /*
- * The EHCI spec doesn't say how long it takes the controller to
- * stop accessing an unlinked interrupt QH. The timer delay is
- * 9 uframes; presumably that will be long enough.
- */
- qh->unlink_cycle = fusbh200->intr_unlink_cycle;
-
- /* New entries go at the end of the intr_unlink list */
- if (fusbh200->intr_unlink)
- fusbh200->intr_unlink_last->unlink_next = qh;
- else
- fusbh200->intr_unlink = qh;
- fusbh200->intr_unlink_last = qh;
-
- if (fusbh200->intr_unlinking)
- ; /* Avoid recursive calls */
- else if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
- fusbh200_handle_intr_unlinks(fusbh200);
- else if (fusbh200->intr_unlink == qh) {
- fusbh200_enable_event(fusbh200, FUSBH200_HRTIMER_UNLINK_INTR, true);
- ++fusbh200->intr_unlink_cycle;
- }
-}
-
-static void end_unlink_intr(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- struct fusbh200_qh_hw *hw = qh->hw;
- int rc;
-
- qh->qh_state = QH_STATE_IDLE;
- hw->hw_next = FUSBH200_LIST_END(fusbh200);
-
- qh_completions(fusbh200, qh);
-
- /* reschedule QH iff another request is queued */
- if (!list_empty(&qh->qtd_list) && fusbh200->rh_state == FUSBH200_RH_RUNNING) {
- rc = qh_schedule(fusbh200, qh);
-
- /* An error here likely indicates handshake failure
- * or no space left in the schedule. Neither fault
- * should happen often ...
- *
- * FIXME kill the now-dysfunctional queued urbs
- */
- if (rc != 0)
- fusbh200_err(fusbh200, "can't reschedule qh %p, err %d\n",
- qh, rc);
- }
-
- /* maybe turn off periodic schedule */
- --fusbh200->intr_count;
- disable_periodic(fusbh200);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int check_period (
- struct fusbh200_hcd *fusbh200,
- unsigned frame,
- unsigned uframe,
- unsigned period,
- unsigned usecs
-) {
- int claimed;
-
- /* complete split running into next frame?
- * given FSTN support, we could sometimes check...
- */
- if (uframe >= 8)
- return 0;
-
- /* convert "usecs we need" to "max already claimed" */
- usecs = fusbh200->uframe_periodic_max - usecs;
-
- /* we "know" 2 and 4 uframe intervals were rejected; so
- * for period 0, check _every_ microframe in the schedule.
- */
- if (unlikely (period == 0)) {
- do {
- for (uframe = 0; uframe < 7; uframe++) {
- claimed = periodic_usecs (fusbh200, frame, uframe);
- if (claimed > usecs)
- return 0;
- }
- } while ((frame += 1) < fusbh200->periodic_size);
-
- /* just check the specified uframe, at that period */
- } else {
- do {
- claimed = periodic_usecs (fusbh200, frame, uframe);
- if (claimed > usecs)
- return 0;
- } while ((frame += period) < fusbh200->periodic_size);
- }
-
- // success!
- return 1;
-}
-
-static int check_intr_schedule (
- struct fusbh200_hcd *fusbh200,
- unsigned frame,
- unsigned uframe,
- const struct fusbh200_qh *qh,
- __hc32 *c_maskp
-)
-{
- int retval = -ENOSPC;
- u8 mask = 0;
-
- if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
- goto done;
-
- if (!check_period (fusbh200, frame, uframe, qh->period, qh->usecs))
- goto done;
- if (!qh->c_usecs) {
- retval = 0;
- *c_maskp = 0;
- goto done;
- }
-
- /* Make sure this tt's buffer is also available for CSPLITs.
- * We pessimize a bit; probably the typical full speed case
- * doesn't need the second CSPLIT.
- *
- * NOTE: both SPLIT and CSPLIT could be checked in just
- * one smart pass...
- */
- mask = 0x03 << (uframe + qh->gap_uf);
- *c_maskp = cpu_to_hc32(fusbh200, mask << 8);
-
- mask |= 1 << uframe;
- if (tt_no_collision (fusbh200, qh->period, qh->dev, frame, mask)) {
- if (!check_period (fusbh200, frame, uframe + qh->gap_uf + 1,
- qh->period, qh->c_usecs))
- goto done;
- if (!check_period (fusbh200, frame, uframe + qh->gap_uf,
- qh->period, qh->c_usecs))
- goto done;
- retval = 0;
- }
-done:
- return retval;
-}
-
-/* "first fit" scheduling policy used the first time through,
- * or when the previous schedule slot can't be re-used.
- */
-static int qh_schedule(struct fusbh200_hcd *fusbh200, struct fusbh200_qh *qh)
-{
- int status;
- unsigned uframe;
- __hc32 c_mask;
- unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
- struct fusbh200_qh_hw *hw = qh->hw;
-
- qh_refresh(fusbh200, qh);
- hw->hw_next = FUSBH200_LIST_END(fusbh200);
- frame = qh->start;
-
- /* reuse the previous schedule slots, if we can */
- if (frame < qh->period) {
- uframe = ffs(hc32_to_cpup(fusbh200, &hw->hw_info2) & QH_SMASK);
- status = check_intr_schedule (fusbh200, frame, --uframe,
- qh, &c_mask);
- } else {
- uframe = 0;
- c_mask = 0;
- status = -ENOSPC;
- }
-
- /* else scan the schedule to find a group of slots such that all
- * uframes have enough periodic bandwidth available.
- */
- if (status) {
- /* "normal" case, uframing flexible except with splits */
- if (qh->period) {
- int i;
-
- for (i = qh->period; status && i > 0; --i) {
- frame = ++fusbh200->random_frame % qh->period;
- for (uframe = 0; uframe < 8; uframe++) {
- status = check_intr_schedule (fusbh200,
- frame, uframe, qh,
- &c_mask);
- if (status == 0)
- break;
- }
- }
-
- /* qh->period == 0 means every uframe */
- } else {
- frame = 0;
- status = check_intr_schedule (fusbh200, 0, 0, qh, &c_mask);
- }
- if (status)
- goto done;
- qh->start = frame;
-
- /* reset S-frame and (maybe) C-frame masks */
- hw->hw_info2 &= cpu_to_hc32(fusbh200, ~(QH_CMASK | QH_SMASK));
- hw->hw_info2 |= qh->period
- ? cpu_to_hc32(fusbh200, 1 << uframe)
- : cpu_to_hc32(fusbh200, QH_SMASK);
- hw->hw_info2 |= c_mask;
- } else
- fusbh200_dbg (fusbh200, "reused qh %p schedule\n", qh);
-
- /* stuff into the periodic schedule */
- qh_link_periodic(fusbh200, qh);
-done:
- return status;
-}
-
-static int intr_submit (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- struct list_head *qtd_list,
- gfp_t mem_flags
-) {
- unsigned epnum;
- unsigned long flags;
- struct fusbh200_qh *qh;
- int status;
- struct list_head empty;
-
- /* get endpoint and transfer/schedule data */
- epnum = urb->ep->desc.bEndpointAddress;
-
- spin_lock_irqsave (&fusbh200->lock, flags);
-
- if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
- status = -ESHUTDOWN;
- goto done_not_linked;
- }
- status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
- if (unlikely(status))
- goto done_not_linked;
-
- /* get qh and force any scheduling errors */
- INIT_LIST_HEAD (&empty);
- qh = qh_append_tds(fusbh200, urb, &empty, epnum, &urb->ep->hcpriv);
- if (qh == NULL) {
- status = -ENOMEM;
- goto done;
- }
- if (qh->qh_state == QH_STATE_IDLE) {
- if ((status = qh_schedule (fusbh200, qh)) != 0)
- goto done;
- }
-
- /* then queue the urb's tds to the qh */
- qh = qh_append_tds(fusbh200, urb, qtd_list, epnum, &urb->ep->hcpriv);
- BUG_ON (qh == NULL);
-
- /* ... update usbfs periodic stats */
- fusbh200_to_hcd(fusbh200)->self.bandwidth_int_reqs++;
-
-done:
- if (unlikely(status))
- usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
-done_not_linked:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- if (status)
- qtd_list_free (fusbh200, urb, qtd_list);
-
- return status;
-}
-
-static void scan_intr(struct fusbh200_hcd *fusbh200)
-{
- struct fusbh200_qh *qh;
-
- list_for_each_entry_safe(qh, fusbh200->qh_scan_next, &fusbh200->intr_qh_list,
- intr_node) {
- rescan:
- /* clean any finished work for this qh */
- if (!list_empty(&qh->qtd_list)) {
- int temp;
-
- /*
- * Unlinks could happen here; completion reporting
- * drops the lock. That's why fusbh200->qh_scan_next
- * always holds the next qh to scan; if the next qh
- * gets unlinked then fusbh200->qh_scan_next is adjusted
- * in qh_unlink_periodic().
- */
- temp = qh_completions(fusbh200, qh);
- if (unlikely(qh->needs_rescan ||
- (list_empty(&qh->qtd_list) &&
- qh->qh_state == QH_STATE_LINKED)))
- start_unlink_intr(fusbh200, qh);
- else if (temp != 0)
- goto rescan;
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* fusbh200_iso_stream ops work with both ITD and SITD */
-
-static struct fusbh200_iso_stream *
-iso_stream_alloc (gfp_t mem_flags)
-{
- struct fusbh200_iso_stream *stream;
-
- stream = kzalloc(sizeof *stream, mem_flags);
- if (likely (stream != NULL)) {
- INIT_LIST_HEAD(&stream->td_list);
- INIT_LIST_HEAD(&stream->free_list);
- stream->next_uframe = -1;
- }
- return stream;
-}
-
-static void
-iso_stream_init (
- struct fusbh200_hcd *fusbh200,
- struct fusbh200_iso_stream *stream,
- struct usb_device *dev,
- int pipe,
- unsigned interval
-)
-{
- u32 buf1;
- unsigned epnum, maxp;
- int is_input;
- long bandwidth;
- unsigned multi;
-
- /*
- * this might be a "high bandwidth" highspeed endpoint,
- * as encoded in the ep descriptor's wMaxPacket field
- */
- epnum = usb_pipeendpoint (pipe);
- is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
- maxp = usb_maxpacket(dev, pipe, !is_input);
- if (is_input) {
- buf1 = (1 << 11);
- } else {
- buf1 = 0;
- }
-
- maxp = max_packet(maxp);
- multi = hb_mult(maxp);
- buf1 |= maxp;
- maxp *= multi;
-
- stream->buf0 = cpu_to_hc32(fusbh200, (epnum << 8) | dev->devnum);
- stream->buf1 = cpu_to_hc32(fusbh200, buf1);
- stream->buf2 = cpu_to_hc32(fusbh200, multi);
-
- /* usbfs wants to report the average usecs per frame tied up
- * when transfers on this endpoint are scheduled ...
- */
- if (dev->speed == USB_SPEED_FULL) {
- interval <<= 3;
- stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
- is_input, 1, maxp));
- stream->usecs /= 8;
- } else {
- stream->highspeed = 1;
- stream->usecs = HS_USECS_ISO (maxp);
- }
- bandwidth = stream->usecs * 8;
- bandwidth /= interval;
-
- stream->bandwidth = bandwidth;
- stream->udev = dev;
- stream->bEndpointAddress = is_input | epnum;
- stream->interval = interval;
- stream->maxp = maxp;
-}
-
-static struct fusbh200_iso_stream *
-iso_stream_find (struct fusbh200_hcd *fusbh200, struct urb *urb)
-{
- unsigned epnum;
- struct fusbh200_iso_stream *stream;
- struct usb_host_endpoint *ep;
- unsigned long flags;
-
- epnum = usb_pipeendpoint (urb->pipe);
- if (usb_pipein(urb->pipe))
- ep = urb->dev->ep_in[epnum];
- else
- ep = urb->dev->ep_out[epnum];
-
- spin_lock_irqsave (&fusbh200->lock, flags);
- stream = ep->hcpriv;
-
- if (unlikely (stream == NULL)) {
- stream = iso_stream_alloc(GFP_ATOMIC);
- if (likely (stream != NULL)) {
- ep->hcpriv = stream;
- stream->ep = ep;
- iso_stream_init(fusbh200, stream, urb->dev, urb->pipe,
- urb->interval);
- }
-
- /* if dev->ep [epnum] is a QH, hw is set */
- } else if (unlikely (stream->hw != NULL)) {
- fusbh200_dbg (fusbh200, "dev %s ep%d%s, not iso??\n",
- urb->dev->devpath, epnum,
- usb_pipein(urb->pipe) ? "in" : "out");
- stream = NULL;
- }
-
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- return stream;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* fusbh200_iso_sched ops can be ITD-only or SITD-only */
-
-static struct fusbh200_iso_sched *
-iso_sched_alloc (unsigned packets, gfp_t mem_flags)
-{
- struct fusbh200_iso_sched *iso_sched;
- int size = sizeof *iso_sched;
-
- size += packets * sizeof (struct fusbh200_iso_packet);
- iso_sched = kzalloc(size, mem_flags);
- if (likely (iso_sched != NULL)) {
- INIT_LIST_HEAD (&iso_sched->td_list);
- }
- return iso_sched;
-}
-
-static inline void
-itd_sched_init(
- struct fusbh200_hcd *fusbh200,
- struct fusbh200_iso_sched *iso_sched,
- struct fusbh200_iso_stream *stream,
- struct urb *urb
-)
-{
- unsigned i;
- dma_addr_t dma = urb->transfer_dma;
-
- /* how many uframes are needed for these transfers */
- iso_sched->span = urb->number_of_packets * stream->interval;
-
- /* figure out per-uframe itd fields that we'll need later
- * when we fit new itds into the schedule.
- */
- for (i = 0; i < urb->number_of_packets; i++) {
- struct fusbh200_iso_packet *uframe = &iso_sched->packet [i];
- unsigned length;
- dma_addr_t buf;
- u32 trans;
-
- length = urb->iso_frame_desc [i].length;
- buf = dma + urb->iso_frame_desc [i].offset;
-
- trans = FUSBH200_ISOC_ACTIVE;
- trans |= buf & 0x0fff;
- if (unlikely (((i + 1) == urb->number_of_packets))
- && !(urb->transfer_flags & URB_NO_INTERRUPT))
- trans |= FUSBH200_ITD_IOC;
- trans |= length << 16;
- uframe->transaction = cpu_to_hc32(fusbh200, trans);
-
- /* might need to cross a buffer page within a uframe */
- uframe->bufp = (buf & ~(u64)0x0fff);
- buf += length;
- if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
- uframe->cross = 1;
- }
-}
-
-static void
-iso_sched_free (
- struct fusbh200_iso_stream *stream,
- struct fusbh200_iso_sched *iso_sched
-)
-{
- if (!iso_sched)
- return;
- // caller must hold fusbh200->lock!
- list_splice (&iso_sched->td_list, &stream->free_list);
- kfree (iso_sched);
-}
-
-static int
-itd_urb_transaction (
- struct fusbh200_iso_stream *stream,
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- gfp_t mem_flags
-)
-{
- struct fusbh200_itd *itd;
- dma_addr_t itd_dma;
- int i;
- unsigned num_itds;
- struct fusbh200_iso_sched *sched;
- unsigned long flags;
-
- sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
- if (unlikely (sched == NULL))
- return -ENOMEM;
-
- itd_sched_init(fusbh200, sched, stream, urb);
-
- if (urb->interval < 8)
- num_itds = 1 + (sched->span + 7) / 8;
- else
- num_itds = urb->number_of_packets;
-
- /* allocate/init ITDs */
- spin_lock_irqsave (&fusbh200->lock, flags);
- for (i = 0; i < num_itds; i++) {
-
- /*
- * Use iTDs from the free list, but not iTDs that may
- * still be in use by the hardware.
- */
- if (likely(!list_empty(&stream->free_list))) {
- itd = list_first_entry(&stream->free_list,
- struct fusbh200_itd, itd_list);
- if (itd->frame == fusbh200->now_frame)
- goto alloc_itd;
- list_del (&itd->itd_list);
- itd_dma = itd->itd_dma;
- } else {
- alloc_itd:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- itd = dma_pool_alloc (fusbh200->itd_pool, mem_flags,
- &itd_dma);
- spin_lock_irqsave (&fusbh200->lock, flags);
- if (!itd) {
- iso_sched_free(stream, sched);
- spin_unlock_irqrestore(&fusbh200->lock, flags);
- return -ENOMEM;
- }
- }
-
- memset (itd, 0, sizeof *itd);
- itd->itd_dma = itd_dma;
- list_add (&itd->itd_list, &sched->td_list);
- }
- spin_unlock_irqrestore (&fusbh200->lock, flags);
-
- /* temporarily store schedule info in hcpriv */
- urb->hcpriv = sched;
- urb->error_count = 0;
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static inline int
-itd_slot_ok (
- struct fusbh200_hcd *fusbh200,
- u32 mod,
- u32 uframe,
- u8 usecs,
- u32 period
-)
-{
- uframe %= period;
- do {
- /* can't commit more than uframe_periodic_max usec */
- if (periodic_usecs (fusbh200, uframe >> 3, uframe & 0x7)
- > (fusbh200->uframe_periodic_max - usecs))
- return 0;
-
- /* we know urb->interval is 2^N uframes */
- uframe += period;
- } while (uframe < mod);
- return 1;
-}
-
-/*
- * This scheduler plans almost as far into the future as it has actual
- * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
- * "as small as possible" to be cache-friendlier.) That limits the size
- * transfers you can stream reliably; avoid more than 64 msec per urb.
- * Also avoid queue depths of less than fusbh200's worst irq latency (affected
- * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
- * and other factors); or more than about 230 msec total (for portability,
- * given FUSBH200_TUNE_FLS and the slop). Or, write a smarter scheduler!
- */
-
-#define SCHEDULE_SLOP 80 /* microframes */
-
-static int
-iso_stream_schedule (
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- struct fusbh200_iso_stream *stream
-)
-{
- u32 now, next, start, period, span;
- int status;
- unsigned mod = fusbh200->periodic_size << 3;
- struct fusbh200_iso_sched *sched = urb->hcpriv;
-
- period = urb->interval;
- span = sched->span;
-
- if (span > mod - SCHEDULE_SLOP) {
- fusbh200_dbg (fusbh200, "iso request %p too long\n", urb);
- status = -EFBIG;
- goto fail;
- }
-
- now = fusbh200_read_frame_index(fusbh200) & (mod - 1);
-
- /* Typical case: reuse current schedule, stream is still active.
- * Hopefully there are no gaps from the host falling behind
- * (irq delays etc), but if there are we'll take the next
- * slot in the schedule, implicitly assuming URB_ISO_ASAP.
- */
- if (likely (!list_empty (&stream->td_list))) {
- u32 excess;
-
- /* For high speed devices, allow scheduling within the
- * isochronous scheduling threshold. For full speed devices
- * and Intel PCI-based controllers, don't (work around for
- * Intel ICH9 bug).
- */
- if (!stream->highspeed && fusbh200->fs_i_thresh)
- next = now + fusbh200->i_thresh;
- else
- next = now;
-
- /* Fell behind (by up to twice the slop amount)?
- * We decide based on the time of the last currently-scheduled
- * slot, not the time of the next available slot.
- */
- excess = (stream->next_uframe - period - next) & (mod - 1);
- if (excess >= mod - 2 * SCHEDULE_SLOP)
- start = next + excess - mod + period *
- DIV_ROUND_UP(mod - excess, period);
- else
- start = next + excess + period;
- if (start - now >= mod) {
- fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
- urb, start - now - period, period,
- mod);
- status = -EFBIG;
- goto fail;
- }
- }
-
- /* need to schedule; when's the next (u)frame we could start?
- * this is bigger than fusbh200->i_thresh allows; scheduling itself
- * isn't free, the slop should handle reasonably slow cpus. it
- * can also help high bandwidth if the dma and irq loads don't
- * jump until after the queue is primed.
- */
- else {
- int done = 0;
- start = SCHEDULE_SLOP + (now & ~0x07);
-
- /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
-
- /* find a uframe slot with enough bandwidth.
- * Early uframes are more precious because full-speed
- * iso IN transfers can't use late uframes,
- * and therefore they should be allocated last.
- */
- next = start;
- start += period;
- do {
- start--;
- /* check schedule: enough space? */
- if (itd_slot_ok(fusbh200, mod, start,
- stream->usecs, period))
- done = 1;
- } while (start > next && !done);
-
- /* no room in the schedule */
- if (!done) {
- fusbh200_dbg(fusbh200, "iso resched full %p (now %d max %d)\n",
- urb, now, now + mod);
- status = -ENOSPC;
- goto fail;
- }
- }
-
- /* Tried to schedule too far into the future? */
- if (unlikely(start - now + span - period
- >= mod - 2 * SCHEDULE_SLOP)) {
- fusbh200_dbg(fusbh200, "request %p would overflow (%d+%d >= %d)\n",
- urb, start - now, span - period,
- mod - 2 * SCHEDULE_SLOP);
- status = -EFBIG;
- goto fail;
- }
-
- stream->next_uframe = start & (mod - 1);
-
- /* report high speed start in uframes; full speed, in frames */
- urb->start_frame = stream->next_uframe;
- if (!stream->highspeed)
- urb->start_frame >>= 3;
-
- /* Make sure scan_isoc() sees these */
- if (fusbh200->isoc_count == 0)
- fusbh200->next_frame = now >> 3;
- return 0;
-
- fail:
- iso_sched_free(stream, sched);
- urb->hcpriv = NULL;
- return status;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static inline void
-itd_init(struct fusbh200_hcd *fusbh200, struct fusbh200_iso_stream *stream,
- struct fusbh200_itd *itd)
-{
- int i;
-
- /* it's been recently zeroed */
- itd->hw_next = FUSBH200_LIST_END(fusbh200);
- itd->hw_bufp [0] = stream->buf0;
- itd->hw_bufp [1] = stream->buf1;
- itd->hw_bufp [2] = stream->buf2;
-
- for (i = 0; i < 8; i++)
- itd->index[i] = -1;
-
- /* All other fields are filled when scheduling */
-}
-
-static inline void
-itd_patch(
- struct fusbh200_hcd *fusbh200,
- struct fusbh200_itd *itd,
- struct fusbh200_iso_sched *iso_sched,
- unsigned index,
- u16 uframe
-)
-{
- struct fusbh200_iso_packet *uf = &iso_sched->packet [index];
- unsigned pg = itd->pg;
-
- // BUG_ON (pg == 6 && uf->cross);
-
- uframe &= 0x07;
- itd->index [uframe] = index;
-
- itd->hw_transaction[uframe] = uf->transaction;
- itd->hw_transaction[uframe] |= cpu_to_hc32(fusbh200, pg << 12);
- itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, uf->bufp & ~(u32)0);
- itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(uf->bufp >> 32));
-
- /* iso_frame_desc[].offset must be strictly increasing */
- if (unlikely (uf->cross)) {
- u64 bufp = uf->bufp + 4096;
-
- itd->pg = ++pg;
- itd->hw_bufp[pg] |= cpu_to_hc32(fusbh200, bufp & ~(u32)0);
- itd->hw_bufp_hi[pg] |= cpu_to_hc32(fusbh200, (u32)(bufp >> 32));
- }
-}
-
-static inline void
-itd_link (struct fusbh200_hcd *fusbh200, unsigned frame, struct fusbh200_itd *itd)
-{
- union fusbh200_shadow *prev = &fusbh200->pshadow[frame];
- __hc32 *hw_p = &fusbh200->periodic[frame];
- union fusbh200_shadow here = *prev;
- __hc32 type = 0;
-
- /* skip any iso nodes which might belong to previous microframes */
- while (here.ptr) {
- type = Q_NEXT_TYPE(fusbh200, *hw_p);
- if (type == cpu_to_hc32(fusbh200, Q_TYPE_QH))
- break;
- prev = periodic_next_shadow(fusbh200, prev, type);
- hw_p = shadow_next_periodic(fusbh200, &here, type);
- here = *prev;
- }
-
- itd->itd_next = here;
- itd->hw_next = *hw_p;
- prev->itd = itd;
- itd->frame = frame;
- wmb ();
- *hw_p = cpu_to_hc32(fusbh200, itd->itd_dma | Q_TYPE_ITD);
-}
-
-/* fit urb's itds into the selected schedule slot; activate as needed */
-static void itd_link_urb(
- struct fusbh200_hcd *fusbh200,
- struct urb *urb,
- unsigned mod,
- struct fusbh200_iso_stream *stream
-)
-{
- int packet;
- unsigned next_uframe, uframe, frame;
- struct fusbh200_iso_sched *iso_sched = urb->hcpriv;
- struct fusbh200_itd *itd;
-
- next_uframe = stream->next_uframe & (mod - 1);
-
- if (unlikely (list_empty(&stream->td_list))) {
- fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
- += stream->bandwidth;
- fusbh200_dbg(fusbh200,
- "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
- urb->dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- urb->interval,
- next_uframe >> 3, next_uframe & 0x7);
- }
-
- /* fill iTDs uframe by uframe */
- for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
- if (itd == NULL) {
- /* ASSERT: we have all necessary itds */
- // BUG_ON (list_empty (&iso_sched->td_list));
-
- /* ASSERT: no itds for this endpoint in this uframe */
-
- itd = list_entry (iso_sched->td_list.next,
- struct fusbh200_itd, itd_list);
- list_move_tail (&itd->itd_list, &stream->td_list);
- itd->stream = stream;
- itd->urb = urb;
- itd_init (fusbh200, stream, itd);
- }
-
- uframe = next_uframe & 0x07;
- frame = next_uframe >> 3;
-
- itd_patch(fusbh200, itd, iso_sched, packet, uframe);
-
- next_uframe += stream->interval;
- next_uframe &= mod - 1;
- packet++;
-
- /* link completed itds into the schedule */
- if (((next_uframe >> 3) != frame)
- || packet == urb->number_of_packets) {
- itd_link(fusbh200, frame & (fusbh200->periodic_size - 1), itd);
- itd = NULL;
- }
- }
- stream->next_uframe = next_uframe;
-
- /* don't need that schedule data any more */
- iso_sched_free (stream, iso_sched);
- urb->hcpriv = NULL;
-
- ++fusbh200->isoc_count;
- enable_periodic(fusbh200);
-}
-
-#define ISO_ERRS (FUSBH200_ISOC_BUF_ERR | FUSBH200_ISOC_BABBLE | FUSBH200_ISOC_XACTERR)
-
-/* Process and recycle a completed ITD. Return true iff its urb completed,
- * and hence its completion callback probably added things to the hardware
- * schedule.
- *
- * Note that we carefully avoid recycling this descriptor until after any
- * completion callback runs, so that it won't be reused quickly. That is,
- * assuming (a) no more than two urbs per frame on this endpoint, and also
- * (b) only this endpoint's completions submit URBs. It seems some silicon
- * corrupts things if you reuse completed descriptors very quickly...
- */
-static bool itd_complete(struct fusbh200_hcd *fusbh200, struct fusbh200_itd *itd)
-{
- struct urb *urb = itd->urb;
- struct usb_iso_packet_descriptor *desc;
- u32 t;
- unsigned uframe;
- int urb_index = -1;
- struct fusbh200_iso_stream *stream = itd->stream;
- struct usb_device *dev;
- bool retval = false;
-
- /* for each uframe with a packet */
- for (uframe = 0; uframe < 8; uframe++) {
- if (likely (itd->index[uframe] == -1))
- continue;
- urb_index = itd->index[uframe];
- desc = &urb->iso_frame_desc [urb_index];
-
- t = hc32_to_cpup(fusbh200, &itd->hw_transaction [uframe]);
- itd->hw_transaction [uframe] = 0;
-
- /* report transfer status */
- if (unlikely (t & ISO_ERRS)) {
- urb->error_count++;
- if (t & FUSBH200_ISOC_BUF_ERR)
- desc->status = usb_pipein (urb->pipe)
- ? -ENOSR /* hc couldn't read */
- : -ECOMM; /* hc couldn't write */
- else if (t & FUSBH200_ISOC_BABBLE)
- desc->status = -EOVERFLOW;
- else /* (t & FUSBH200_ISOC_XACTERR) */
- desc->status = -EPROTO;
-
- /* HC need not update length with this error */
- if (!(t & FUSBH200_ISOC_BABBLE)) {
- desc->actual_length = fusbh200_itdlen(urb, desc, t);
- urb->actual_length += desc->actual_length;
- }
- } else if (likely ((t & FUSBH200_ISOC_ACTIVE) == 0)) {
- desc->status = 0;
- desc->actual_length = fusbh200_itdlen(urb, desc, t);
- urb->actual_length += desc->actual_length;
- } else {
- /* URB was too late */
- desc->status = -EXDEV;
- }
- }
-
- /* handle completion now? */
- if (likely ((urb_index + 1) != urb->number_of_packets))
- goto done;
-
- /* ASSERT: it's really the last itd for this urb
- list_for_each_entry (itd, &stream->td_list, itd_list)
- BUG_ON (itd->urb == urb);
- */
-
- /* give urb back to the driver; completion often (re)submits */
- dev = urb->dev;
- fusbh200_urb_done(fusbh200, urb, 0);
- retval = true;
- urb = NULL;
-
- --fusbh200->isoc_count;
- disable_periodic(fusbh200);
-
- if (unlikely(list_is_singular(&stream->td_list))) {
- fusbh200_to_hcd(fusbh200)->self.bandwidth_allocated
- -= stream->bandwidth;
- fusbh200_dbg(fusbh200,
- "deschedule devp %s ep%d%s-iso\n",
- dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
- }
-
-done:
- itd->urb = NULL;
-
- /* Add to the end of the free list for later reuse */
- list_move_tail(&itd->itd_list, &stream->free_list);
-
- /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
- if (list_empty(&stream->td_list)) {
- list_splice_tail_init(&stream->free_list,
- &fusbh200->cached_itd_list);
- start_free_itds(fusbh200);
- }
-
- return retval;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int itd_submit (struct fusbh200_hcd *fusbh200, struct urb *urb,
- gfp_t mem_flags)
-{
- int status = -EINVAL;
- unsigned long flags;
- struct fusbh200_iso_stream *stream;
-
- /* Get iso_stream head */
- stream = iso_stream_find (fusbh200, urb);
- if (unlikely (stream == NULL)) {
- fusbh200_dbg (fusbh200, "can't get iso stream\n");
- return -ENOMEM;
- }
- if (unlikely (urb->interval != stream->interval &&
- fusbh200_port_speed(fusbh200, 0) == USB_PORT_STAT_HIGH_SPEED)) {
- fusbh200_dbg (fusbh200, "can't change iso interval %d --> %d\n",
- stream->interval, urb->interval);
- goto done;
- }
-
-#ifdef FUSBH200_URB_TRACE
- fusbh200_dbg (fusbh200,
- "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
- __func__, urb->dev->devpath, urb,
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
- urb->transfer_buffer_length,
- urb->number_of_packets, urb->interval,
- stream);
-#endif
-
- /* allocate ITDs w/o locking anything */
- status = itd_urb_transaction (stream, fusbh200, urb, mem_flags);
- if (unlikely (status < 0)) {
- fusbh200_dbg (fusbh200, "can't init itds\n");
- goto done;
- }
-
- /* schedule ... need to lock */
- spin_lock_irqsave (&fusbh200->lock, flags);
- if (unlikely(!HCD_HW_ACCESSIBLE(fusbh200_to_hcd(fusbh200)))) {
- status = -ESHUTDOWN;
- goto done_not_linked;
- }
- status = usb_hcd_link_urb_to_ep(fusbh200_to_hcd(fusbh200), urb);
- if (unlikely(status))
- goto done_not_linked;
- status = iso_stream_schedule(fusbh200, urb, stream);
- if (likely (status == 0))
- itd_link_urb (fusbh200, urb, fusbh200->periodic_size << 3, stream);
- else
- usb_hcd_unlink_urb_from_ep(fusbh200_to_hcd(fusbh200), urb);
- done_not_linked:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- done:
- return status;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void scan_isoc(struct fusbh200_hcd *fusbh200)
-{
- unsigned uf, now_frame, frame;
- unsigned fmask = fusbh200->periodic_size - 1;
- bool modified, live;
-
- /*
- * When running, scan from last scan point up to "now"
- * else clean up by scanning everything that's left.
- * Touches as few pages as possible: cache-friendly.
- */
- if (fusbh200->rh_state >= FUSBH200_RH_RUNNING) {
- uf = fusbh200_read_frame_index(fusbh200);
- now_frame = (uf >> 3) & fmask;
- live = true;
- } else {
- now_frame = (fusbh200->next_frame - 1) & fmask;
- live = false;
- }
- fusbh200->now_frame = now_frame;
-
- frame = fusbh200->next_frame;
- for (;;) {
- union fusbh200_shadow q, *q_p;
- __hc32 type, *hw_p;
-
-restart:
- /* scan each element in frame's queue for completions */
- q_p = &fusbh200->pshadow [frame];
- hw_p = &fusbh200->periodic [frame];
- q.ptr = q_p->ptr;
- type = Q_NEXT_TYPE(fusbh200, *hw_p);
- modified = false;
-
- while (q.ptr != NULL) {
- switch (hc32_to_cpu(fusbh200, type)) {
- case Q_TYPE_ITD:
- /* If this ITD is still active, leave it for
- * later processing ... check the next entry.
- * No need to check for activity unless the
- * frame is current.
- */
- if (frame == now_frame && live) {
- rmb();
- for (uf = 0; uf < 8; uf++) {
- if (q.itd->hw_transaction[uf] &
- ITD_ACTIVE(fusbh200))
- break;
- }
- if (uf < 8) {
- q_p = &q.itd->itd_next;
- hw_p = &q.itd->hw_next;
- type = Q_NEXT_TYPE(fusbh200,
- q.itd->hw_next);
- q = *q_p;
- break;
- }
- }
-
- /* Take finished ITDs out of the schedule
- * and process them: recycle, maybe report
- * URB completion. HC won't cache the
- * pointer for much longer, if at all.
- */
- *q_p = q.itd->itd_next;
- *hw_p = q.itd->hw_next;
- type = Q_NEXT_TYPE(fusbh200, q.itd->hw_next);
- wmb();
- modified = itd_complete (fusbh200, q.itd);
- q = *q_p;
- break;
- default:
- fusbh200_dbg(fusbh200, "corrupt type %d frame %d shadow %p\n",
- type, frame, q.ptr);
- // BUG ();
- /* FALL THROUGH */
- case Q_TYPE_QH:
- case Q_TYPE_FSTN:
- /* End of the iTDs and siTDs */
- q.ptr = NULL;
- break;
- }
-
- /* assume completion callbacks modify the queue */
- if (unlikely(modified && fusbh200->isoc_count > 0))
- goto restart;
- }
-
- /* Stop when we have reached the current frame */
- if (frame == now_frame)
- break;
- frame = (frame + 1) & fmask;
- }
- fusbh200->next_frame = now_frame;
-}
-/*-------------------------------------------------------------------------*/
-/*
- * Display / Set uframe_periodic_max
- */
-static ssize_t show_uframe_periodic_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct fusbh200_hcd *fusbh200;
- int n;
-
- fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
- n = scnprintf(buf, PAGE_SIZE, "%d\n", fusbh200->uframe_periodic_max);
- return n;
-}
-
-
-static ssize_t store_uframe_periodic_max(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fusbh200_hcd *fusbh200;
- unsigned uframe_periodic_max;
- unsigned frame, uframe;
- unsigned short allocated_max;
- unsigned long flags;
- ssize_t ret;
-
- fusbh200 = hcd_to_fusbh200(bus_to_hcd(dev_get_drvdata(dev)));
- if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
- return -EINVAL;
-
- if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
- fusbh200_info(fusbh200, "rejecting invalid request for "
- "uframe_periodic_max=%u\n", uframe_periodic_max);
- return -EINVAL;
- }
-
- ret = -EINVAL;
-
- /*
- * lock, so that our checking does not race with possible periodic
- * bandwidth allocation through submitting new urbs.
- */
- spin_lock_irqsave (&fusbh200->lock, flags);
-
- /*
- * for request to decrease max periodic bandwidth, we have to check
- * every microframe in the schedule to see whether the decrease is
- * possible.
- */
- if (uframe_periodic_max < fusbh200->uframe_periodic_max) {
- allocated_max = 0;
-
- for (frame = 0; frame < fusbh200->periodic_size; ++frame)
- for (uframe = 0; uframe < 7; ++uframe)
- allocated_max = max(allocated_max,
- periodic_usecs (fusbh200, frame, uframe));
-
- if (allocated_max > uframe_periodic_max) {
- fusbh200_info(fusbh200,
- "cannot decrease uframe_periodic_max because "
- "periodic bandwidth is already allocated "
- "(%u > %u)\n",
- allocated_max, uframe_periodic_max);
- goto out_unlock;
- }
- }
-
- /* increasing is always ok */
-
- fusbh200_info(fusbh200, "setting max periodic bandwidth to %u%% "
- "(== %u usec/uframe)\n",
- 100*uframe_periodic_max/125, uframe_periodic_max);
-
- if (uframe_periodic_max != 100)
- fusbh200_warn(fusbh200, "max periodic bandwidth set is non-standard\n");
-
- fusbh200->uframe_periodic_max = uframe_periodic_max;
- ret = count;
-
-out_unlock:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- return ret;
-}
-static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max);
-
-
-static inline int create_sysfs_files(struct fusbh200_hcd *fusbh200)
-{
- struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
- int i = 0;
-
- if (i)
- goto out;
-
- i = device_create_file(controller, &dev_attr_uframe_periodic_max);
-out:
- return i;
-}
-
-static inline void remove_sysfs_files(struct fusbh200_hcd *fusbh200)
-{
- struct device *controller = fusbh200_to_hcd(fusbh200)->self.controller;
-
- device_remove_file(controller, &dev_attr_uframe_periodic_max);
-}
-/*-------------------------------------------------------------------------*/
-
-/* On some systems, leaving remote wakeup enabled prevents system shutdown.
- * The firmware seems to think that powering off is a wakeup event!
- * This routine turns off remote wakeup and everything else, on all ports.
- */
-static void fusbh200_turn_off_all_ports(struct fusbh200_hcd *fusbh200)
-{
- u32 __iomem *status_reg = &fusbh200->regs->port_status;
-
- fusbh200_writel(fusbh200, PORT_RWC_BITS, status_reg);
-}
-
-/*
- * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
- * Must be called with interrupts enabled and the lock not held.
- */
-static void fusbh200_silence_controller(struct fusbh200_hcd *fusbh200)
-{
- fusbh200_halt(fusbh200);
-
- spin_lock_irq(&fusbh200->lock);
- fusbh200->rh_state = FUSBH200_RH_HALTED;
- fusbh200_turn_off_all_ports(fusbh200);
- spin_unlock_irq(&fusbh200->lock);
-}
-
-/* fusbh200_shutdown kick in for silicon on any bus (not just pci, etc).
- * This forcibly disables dma and IRQs, helping kexec and other cases
- * where the next system software may expect clean state.
- */
-static void fusbh200_shutdown(struct usb_hcd *hcd)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
-
- spin_lock_irq(&fusbh200->lock);
- fusbh200->shutdown = true;
- fusbh200->rh_state = FUSBH200_RH_STOPPING;
- fusbh200->enabled_hrtimer_events = 0;
- spin_unlock_irq(&fusbh200->lock);
-
- fusbh200_silence_controller(fusbh200);
-
- hrtimer_cancel(&fusbh200->hrtimer);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * fusbh200_work is called from some interrupts, timers, and so on.
- * it calls driver completion functions, after dropping fusbh200->lock.
- */
-static void fusbh200_work (struct fusbh200_hcd *fusbh200)
-{
- /* another CPU may drop fusbh200->lock during a schedule scan while
- * it reports urb completions. this flag guards against bogus
- * attempts at re-entrant schedule scanning.
- */
- if (fusbh200->scanning) {
- fusbh200->need_rescan = true;
- return;
- }
- fusbh200->scanning = true;
-
- rescan:
- fusbh200->need_rescan = false;
- if (fusbh200->async_count)
- scan_async(fusbh200);
- if (fusbh200->intr_count > 0)
- scan_intr(fusbh200);
- if (fusbh200->isoc_count > 0)
- scan_isoc(fusbh200);
- if (fusbh200->need_rescan)
- goto rescan;
- fusbh200->scanning = false;
-
- /* the IO watchdog guards against hardware or driver bugs that
- * misplace IRQs, and should let us run completely without IRQs.
- * such lossage has been observed on both VT6202 and VT8235.
- */
- turn_on_io_watchdog(fusbh200);
-}
-
-/*
- * Called when the fusbh200_hcd module is removed.
- */
-static void fusbh200_stop (struct usb_hcd *hcd)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
-
- fusbh200_dbg (fusbh200, "stop\n");
-
- /* no more interrupts ... */
-
- spin_lock_irq(&fusbh200->lock);
- fusbh200->enabled_hrtimer_events = 0;
- spin_unlock_irq(&fusbh200->lock);
-
- fusbh200_quiesce(fusbh200);
- fusbh200_silence_controller(fusbh200);
- fusbh200_reset (fusbh200);
-
- hrtimer_cancel(&fusbh200->hrtimer);
- remove_sysfs_files(fusbh200);
- remove_debug_files (fusbh200);
-
- /* root hub is shut down separately (first, when possible) */
- spin_lock_irq (&fusbh200->lock);
- end_free_itds(fusbh200);
- spin_unlock_irq (&fusbh200->lock);
- fusbh200_mem_cleanup (fusbh200);
-
- fusbh200_dbg(fusbh200, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
- fusbh200->stats.normal, fusbh200->stats.error, fusbh200->stats.iaa,
- fusbh200->stats.lost_iaa);
- fusbh200_dbg (fusbh200, "complete %ld unlink %ld\n",
- fusbh200->stats.complete, fusbh200->stats.unlink);
-
- dbg_status (fusbh200, "fusbh200_stop completed",
- fusbh200_readl(fusbh200, &fusbh200->regs->status));
-}
-
-/* one-time init, only for memory state */
-static int hcd_fusbh200_init(struct usb_hcd *hcd)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
- u32 temp;
- int retval;
- u32 hcc_params;
- struct fusbh200_qh_hw *hw;
-
- spin_lock_init(&fusbh200->lock);
-
- /*
- * keep io watchdog by default, those good HCDs could turn off it later
- */
- fusbh200->need_io_watchdog = 1;
-
- hrtimer_init(&fusbh200->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- fusbh200->hrtimer.function = fusbh200_hrtimer_func;
- fusbh200->next_hrtimer_event = FUSBH200_HRTIMER_NO_EVENT;
-
- hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
-
- /*
- * by default set standard 80% (== 100 usec/uframe) max periodic
- * bandwidth as required by USB 2.0
- */
- fusbh200->uframe_periodic_max = 100;
-
- /*
- * hw default: 1K periodic list heads, one per frame.
- * periodic_size can shrink by USBCMD update if hcc_params allows.
- */
- fusbh200->periodic_size = DEFAULT_I_TDPS;
- INIT_LIST_HEAD(&fusbh200->intr_qh_list);
- INIT_LIST_HEAD(&fusbh200->cached_itd_list);
-
- if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
- /* periodic schedule size can be smaller than default */
- switch (FUSBH200_TUNE_FLS) {
- case 0: fusbh200->periodic_size = 1024; break;
- case 1: fusbh200->periodic_size = 512; break;
- case 2: fusbh200->periodic_size = 256; break;
- default: BUG();
- }
- }
- if ((retval = fusbh200_mem_init(fusbh200, GFP_KERNEL)) < 0)
- return retval;
-
- /* controllers may cache some of the periodic schedule ... */
- fusbh200->i_thresh = 2;
-
- /*
- * dedicate a qh for the async ring head, since we couldn't unlink
- * a 'real' qh without stopping the async schedule [4.8]. use it
- * as the 'reclamation list head' too.
- * its dummy is used in hw_alt_next of many tds, to prevent the qh
- * from automatically advancing to the next td after short reads.
- */
- fusbh200->async->qh_next.qh = NULL;
- hw = fusbh200->async->hw;
- hw->hw_next = QH_NEXT(fusbh200, fusbh200->async->qh_dma);
- hw->hw_info1 = cpu_to_hc32(fusbh200, QH_HEAD);
- hw->hw_token = cpu_to_hc32(fusbh200, QTD_STS_HALT);
- hw->hw_qtd_next = FUSBH200_LIST_END(fusbh200);
- fusbh200->async->qh_state = QH_STATE_LINKED;
- hw->hw_alt_next = QTD_NEXT(fusbh200, fusbh200->async->dummy->qtd_dma);
-
- /* clear interrupt enables, set irq latency */
- if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
- log2_irq_thresh = 0;
- temp = 1 << (16 + log2_irq_thresh);
- if (HCC_CANPARK(hcc_params)) {
- /* HW default park == 3, on hardware that supports it (like
- * NVidia and ALI silicon), maximizes throughput on the async
- * schedule by avoiding QH fetches between transfers.
- *
- * With fast usb storage devices and NForce2, "park" seems to
- * make problems: throughput reduction (!), data errors...
- */
- if (park) {
- park = min(park, (unsigned) 3);
- temp |= CMD_PARK;
- temp |= park << 8;
- }
- fusbh200_dbg(fusbh200, "park %d\n", park);
- }
- if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
- /* periodic schedule size can be smaller than default */
- temp &= ~(3 << 2);
- temp |= (FUSBH200_TUNE_FLS << 2);
- }
- fusbh200->command = temp;
-
- /* Accept arbitrarily long scatter-gather lists */
- if (!(hcd->driver->flags & HCD_LOCAL_MEM))
- hcd->self.sg_tablesize = ~0;
- return 0;
-}
-
-/* start HC running; it's halted, hcd_fusbh200_init() has been run (once) */
-static int fusbh200_run (struct usb_hcd *hcd)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- u32 temp;
- u32 hcc_params;
-
- hcd->uses_new_polling = 1;
-
- /* EHCI spec section 4.1 */
-
- fusbh200_writel(fusbh200, fusbh200->periodic_dma, &fusbh200->regs->frame_list);
- fusbh200_writel(fusbh200, (u32)fusbh200->async->qh_dma, &fusbh200->regs->async_next);
-
- /*
- * hcc_params controls whether fusbh200->regs->segment must (!!!)
- * be used; it constrains QH/ITD/SITD and QTD locations.
- * pci_pool consistent memory always uses segment zero.
- * streaming mappings for I/O buffers, like pci_map_single(),
- * can return segments above 4GB, if the device allows.
- *
- * NOTE: the dma mask is visible through dma_supported(), so
- * drivers can pass this info along ... like NETIF_F_HIGHDMA,
- * Scsi_Host.highmem_io, and so forth. It's readonly to all
- * host side drivers though.
- */
- hcc_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcc_params);
-
- // Philips, Intel, and maybe others need CMD_RUN before the
- // root hub will detect new devices (why?); NEC doesn't
- fusbh200->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
- fusbh200->command |= CMD_RUN;
- fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
- dbg_cmd (fusbh200, "init", fusbh200->command);
-
- /*
- * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
- * are explicitly handed to companion controller(s), so no TT is
- * involved with the root hub. (Except where one is integrated,
- * and there's no companion controller unless maybe for USB OTG.)
- *
- * Turning on the CF flag will transfer ownership of all ports
- * from the companions to the EHCI controller. If any of the
- * companions are in the middle of a port reset at the time, it
- * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
- * guarantees that no resets are in progress. After we set CF,
- * a short delay lets the hardware catch up; new resets shouldn't
- * be started before the port switching actions could complete.
- */
- down_write(&ehci_cf_port_reset_rwsem);
- fusbh200->rh_state = FUSBH200_RH_RUNNING;
- fusbh200_readl(fusbh200, &fusbh200->regs->command); /* unblock posted writes */
- msleep(5);
- up_write(&ehci_cf_port_reset_rwsem);
- fusbh200->last_periodic_enable = ktime_get_real();
-
- temp = HC_VERSION(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
- fusbh200_info (fusbh200,
- "USB %x.%x started, EHCI %x.%02x\n",
- ((fusbh200->sbrn & 0xf0)>>4), (fusbh200->sbrn & 0x0f),
- temp >> 8, temp & 0xff);
-
- fusbh200_writel(fusbh200, INTR_MASK,
- &fusbh200->regs->intr_enable); /* Turn On Interrupts */
-
- /* GRR this is run-once init(), being done every time the HC starts.
- * So long as they're part of class devices, we can't do it init()
- * since the class device isn't created that early.
- */
- create_debug_files(fusbh200);
- create_sysfs_files(fusbh200);
-
- return 0;
-}
-
-static int fusbh200_setup(struct usb_hcd *hcd)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
- int retval;
-
- fusbh200->regs = (void __iomem *)fusbh200->caps +
- HC_LENGTH(fusbh200, fusbh200_readl(fusbh200, &fusbh200->caps->hc_capbase));
- dbg_hcs_params(fusbh200, "reset");
- dbg_hcc_params(fusbh200, "reset");
-
- /* cache this readonly data; minimize chip reads */
- fusbh200->hcs_params = fusbh200_readl(fusbh200, &fusbh200->caps->hcs_params);
-
- fusbh200->sbrn = HCD_USB2;
-
- /* data structure init */
- retval = hcd_fusbh200_init(hcd);
- if (retval)
- return retval;
-
- retval = fusbh200_halt(fusbh200);
- if (retval)
- return retval;
-
- fusbh200_reset(fusbh200);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-
-static irqreturn_t fusbh200_irq (struct usb_hcd *hcd)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- u32 status, masked_status, pcd_status = 0, cmd;
- int bh;
-
- spin_lock (&fusbh200->lock);
-
- status = fusbh200_readl(fusbh200, &fusbh200->regs->status);
-
- /* e.g. cardbus physical eject */
- if (status == ~(u32) 0) {
- fusbh200_dbg (fusbh200, "device removed\n");
- goto dead;
- }
-
- /*
- * We don't use STS_FLR, but some controllers don't like it to
- * remain on, so mask it out along with the other status bits.
- */
- masked_status = status & (INTR_MASK | STS_FLR);
-
- /* Shared IRQ? */
- if (!masked_status || unlikely(fusbh200->rh_state == FUSBH200_RH_HALTED)) {
- spin_unlock(&fusbh200->lock);
- return IRQ_NONE;
- }
-
- /* clear (just) interrupts */
- fusbh200_writel(fusbh200, masked_status, &fusbh200->regs->status);
- cmd = fusbh200_readl(fusbh200, &fusbh200->regs->command);
- bh = 0;
-
- /* normal [4.15.1.2] or error [4.15.1.1] completion */
- if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
- if (likely ((status & STS_ERR) == 0))
- COUNT (fusbh200->stats.normal);
- else
- COUNT (fusbh200->stats.error);
- bh = 1;
- }
-
- /* complete the unlinking of some qh [4.15.2.3] */
- if (status & STS_IAA) {
-
- /* Turn off the IAA watchdog */
- fusbh200->enabled_hrtimer_events &= ~BIT(FUSBH200_HRTIMER_IAA_WATCHDOG);
-
- /*
- * Mild optimization: Allow another IAAD to reset the
- * hrtimer, if one occurs before the next expiration.
- * In theory we could always cancel the hrtimer, but
- * tests show that about half the time it will be reset
- * for some other event anyway.
- */
- if (fusbh200->next_hrtimer_event == FUSBH200_HRTIMER_IAA_WATCHDOG)
- ++fusbh200->next_hrtimer_event;
-
- /* guard against (alleged) silicon errata */
- if (cmd & CMD_IAAD)
- fusbh200_dbg(fusbh200, "IAA with IAAD still set?\n");
- if (fusbh200->async_iaa) {
- COUNT(fusbh200->stats.iaa);
- end_unlink_async(fusbh200);
- } else
- fusbh200_dbg(fusbh200, "IAA with nothing unlinked?\n");
- }
-
- /* remote wakeup [4.3.1] */
- if (status & STS_PCD) {
- int pstatus;
- u32 __iomem *status_reg = &fusbh200->regs->port_status;
-
- /* kick root hub later */
- pcd_status = status;
-
- /* resume root hub? */
- if (fusbh200->rh_state == FUSBH200_RH_SUSPENDED)
- usb_hcd_resume_root_hub(hcd);
-
- pstatus = fusbh200_readl(fusbh200, status_reg);
-
- if (test_bit(0, &fusbh200->suspended_ports) &&
- ((pstatus & PORT_RESUME) ||
- !(pstatus & PORT_SUSPEND)) &&
- (pstatus & PORT_PE) &&
- fusbh200->reset_done[0] == 0) {
-
- /* start 20 msec resume signaling from this port,
- * and make hub_wq collect PORT_STAT_C_SUSPEND to
- * stop that signaling. Use 5 ms extra for safety,
- * like usb_port_resume() does.
- */
- fusbh200->reset_done[0] = jiffies + msecs_to_jiffies(25);
- set_bit(0, &fusbh200->resuming_ports);
- fusbh200_dbg (fusbh200, "port 1 remote wakeup\n");
- mod_timer(&hcd->rh_timer, fusbh200->reset_done[0]);
- }
- }
-
- /* PCI errors [4.15.2.4] */
- if (unlikely ((status & STS_FATAL) != 0)) {
- fusbh200_err(fusbh200, "fatal error\n");
- dbg_cmd(fusbh200, "fatal", cmd);
- dbg_status(fusbh200, "fatal", status);
-dead:
- usb_hc_died(hcd);
-
- /* Don't let the controller do anything more */
- fusbh200->shutdown = true;
- fusbh200->rh_state = FUSBH200_RH_STOPPING;
- fusbh200->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
- fusbh200_writel(fusbh200, fusbh200->command, &fusbh200->regs->command);
- fusbh200_writel(fusbh200, 0, &fusbh200->regs->intr_enable);
- fusbh200_handle_controller_death(fusbh200);
-
- /* Handle completions when the controller stops */
- bh = 0;
- }
-
- if (bh)
- fusbh200_work (fusbh200);
- spin_unlock (&fusbh200->lock);
- if (pcd_status)
- usb_hcd_poll_rh_status(hcd);
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * non-error returns are a promise to giveback() the urb later
- * we drop ownership so next owner (or urb unlink) can get it
- *
- * urb + dev is in hcd.self.controller.urb_list
- * we're queueing TDs onto software and hardware lists
- *
- * hcd-specific init for hcpriv hasn't been done yet
- *
- * NOTE: control, bulk, and interrupt share the same code to append TDs
- * to a (possibly active) QH, and the same QH scanning code.
- */
-static int fusbh200_urb_enqueue (
- struct usb_hcd *hcd,
- struct urb *urb,
- gfp_t mem_flags
-) {
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- struct list_head qtd_list;
-
- INIT_LIST_HEAD (&qtd_list);
-
- switch (usb_pipetype (urb->pipe)) {
- case PIPE_CONTROL:
- /* qh_completions() code doesn't handle all the fault cases
- * in multi-TD control transfers. Even 1KB is rare anyway.
- */
- if (urb->transfer_buffer_length > (16 * 1024))
- return -EMSGSIZE;
- /* FALLTHROUGH */
- /* case PIPE_BULK: */
- default:
- if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
- return -ENOMEM;
- return submit_async(fusbh200, urb, &qtd_list, mem_flags);
-
- case PIPE_INTERRUPT:
- if (!qh_urb_transaction (fusbh200, urb, &qtd_list, mem_flags))
- return -ENOMEM;
- return intr_submit(fusbh200, urb, &qtd_list, mem_flags);
-
- case PIPE_ISOCHRONOUS:
- return itd_submit (fusbh200, urb, mem_flags);
- }
-}
-
-/* remove from hardware lists
- * completions normally happen asynchronously
- */
-
-static int fusbh200_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- struct fusbh200_qh *qh;
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave (&fusbh200->lock, flags);
- rc = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (rc)
- goto done;
-
- switch (usb_pipetype (urb->pipe)) {
- // case PIPE_CONTROL:
- // case PIPE_BULK:
- default:
- qh = (struct fusbh200_qh *) urb->hcpriv;
- if (!qh)
- break;
- switch (qh->qh_state) {
- case QH_STATE_LINKED:
- case QH_STATE_COMPLETING:
- start_unlink_async(fusbh200, qh);
- break;
- case QH_STATE_UNLINK:
- case QH_STATE_UNLINK_WAIT:
- /* already started */
- break;
- case QH_STATE_IDLE:
- /* QH might be waiting for a Clear-TT-Buffer */
- qh_completions(fusbh200, qh);
- break;
- }
- break;
-
- case PIPE_INTERRUPT:
- qh = (struct fusbh200_qh *) urb->hcpriv;
- if (!qh)
- break;
- switch (qh->qh_state) {
- case QH_STATE_LINKED:
- case QH_STATE_COMPLETING:
- start_unlink_intr(fusbh200, qh);
- break;
- case QH_STATE_IDLE:
- qh_completions (fusbh200, qh);
- break;
- default:
- fusbh200_dbg (fusbh200, "bogus qh %p state %d\n",
- qh, qh->qh_state);
- goto done;
- }
- break;
-
- case PIPE_ISOCHRONOUS:
- // itd...
-
- // wait till next completion, do it then.
- // completion irqs can wait up to 1024 msec,
- break;
- }
-done:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- return rc;
-}
-
-/*-------------------------------------------------------------------------*/
-
-// bulk qh holds the data toggle
-
-static void
-fusbh200_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- unsigned long flags;
- struct fusbh200_qh *qh, *tmp;
-
- /* ASSERT: any requests/urbs are being unlinked */
- /* ASSERT: nobody can be submitting urbs for this any more */
-
-rescan:
- spin_lock_irqsave (&fusbh200->lock, flags);
- qh = ep->hcpriv;
- if (!qh)
- goto done;
-
- /* endpoints can be iso streams. for now, we don't
- * accelerate iso completions ... so spin a while.
- */
- if (qh->hw == NULL) {
- struct fusbh200_iso_stream *stream = ep->hcpriv;
-
- if (!list_empty(&stream->td_list))
- goto idle_timeout;
-
- /* BUG_ON(!list_empty(&stream->free_list)); */
- kfree(stream);
- goto done;
- }
-
- if (fusbh200->rh_state < FUSBH200_RH_RUNNING)
- qh->qh_state = QH_STATE_IDLE;
- switch (qh->qh_state) {
- case QH_STATE_LINKED:
- case QH_STATE_COMPLETING:
- for (tmp = fusbh200->async->qh_next.qh;
- tmp && tmp != qh;
- tmp = tmp->qh_next.qh)
- continue;
- /* periodic qh self-unlinks on empty, and a COMPLETING qh
- * may already be unlinked.
- */
- if (tmp)
- start_unlink_async(fusbh200, qh);
- /* FALL THROUGH */
- case QH_STATE_UNLINK: /* wait for hw to finish? */
- case QH_STATE_UNLINK_WAIT:
-idle_timeout:
- spin_unlock_irqrestore (&fusbh200->lock, flags);
- schedule_timeout_uninterruptible(1);
- goto rescan;
- case QH_STATE_IDLE: /* fully unlinked */
- if (qh->clearing_tt)
- goto idle_timeout;
- if (list_empty (&qh->qtd_list)) {
- qh_destroy(fusbh200, qh);
- break;
- }
- /* else FALL THROUGH */
- default:
- /* caller was supposed to have unlinked any requests;
- * that's not our job. just leak this memory.
- */
- fusbh200_err (fusbh200, "qh %p (#%02x) state %d%s\n",
- qh, ep->desc.bEndpointAddress, qh->qh_state,
- list_empty (&qh->qtd_list) ? "" : "(has tds)");
- break;
- }
- done:
- ep->hcpriv = NULL;
- spin_unlock_irqrestore (&fusbh200->lock, flags);
-}
-
-static void
-fusbh200_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200(hcd);
- struct fusbh200_qh *qh;
- int eptype = usb_endpoint_type(&ep->desc);
- int epnum = usb_endpoint_num(&ep->desc);
- int is_out = usb_endpoint_dir_out(&ep->desc);
- unsigned long flags;
-
- if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
- return;
-
- spin_lock_irqsave(&fusbh200->lock, flags);
- qh = ep->hcpriv;
-
- /* For Bulk and Interrupt endpoints we maintain the toggle state
- * in the hardware; the toggle bits in udev aren't used at all.
- * When an endpoint is reset by usb_clear_halt() we must reset
- * the toggle bit in the QH.
- */
- if (qh) {
- usb_settoggle(qh->dev, epnum, is_out, 0);
- if (!list_empty(&qh->qtd_list)) {
- WARN_ONCE(1, "clear_halt for a busy endpoint\n");
- } else if (qh->qh_state == QH_STATE_LINKED ||
- qh->qh_state == QH_STATE_COMPLETING) {
-
- /* The toggle value in the QH can't be updated
- * while the QH is active. Unlink it now;
- * re-linking will call qh_refresh().
- */
- if (eptype == USB_ENDPOINT_XFER_BULK)
- start_unlink_async(fusbh200, qh);
- else
- start_unlink_intr(fusbh200, qh);
- }
- }
- spin_unlock_irqrestore(&fusbh200->lock, flags);
-}
-
-static int fusbh200_get_frame (struct usb_hcd *hcd)
-{
- struct fusbh200_hcd *fusbh200 = hcd_to_fusbh200 (hcd);
- return (fusbh200_read_frame_index(fusbh200) >> 3) % fusbh200->periodic_size;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * The EHCI in ChipIdea HDRC cannot be a separate module or device,
- * because its registers (and irq) are shared between host/gadget/otg
- * functions and in order to facilitate role switching we cannot
- * give the fusbh200 driver exclusive access to those.
- */
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR (DRIVER_AUTHOR);
-MODULE_LICENSE ("GPL");
-
-static const struct hc_driver fusbh200_fusbh200_hc_driver = {
- .description = hcd_name,
- .product_desc = "Faraday USB2.0 Host Controller",
- .hcd_priv_size = sizeof(struct fusbh200_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = fusbh200_irq,
- .flags = HCD_MEMORY | HCD_USB2,
-
- /*
- * basic lifecycle operations
- */
- .reset = hcd_fusbh200_init,
- .start = fusbh200_run,
- .stop = fusbh200_stop,
- .shutdown = fusbh200_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = fusbh200_urb_enqueue,
- .urb_dequeue = fusbh200_urb_dequeue,
- .endpoint_disable = fusbh200_endpoint_disable,
- .endpoint_reset = fusbh200_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = fusbh200_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = fusbh200_hub_status_data,
- .hub_control = fusbh200_hub_control,
- .bus_suspend = fusbh200_bus_suspend,
- .bus_resume = fusbh200_bus_resume,
-
- .relinquish_port = fusbh200_relinquish_port,
- .port_handed_over = fusbh200_port_handed_over,
-
- .clear_tt_buffer_complete = fusbh200_clear_tt_buffer_complete,
-};
-
-static void fusbh200_init(struct fusbh200_hcd *fusbh200)
-{
- u32 reg;
-
- reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmcsr);
- reg |= BMCSR_INT_POLARITY;
- reg &= ~BMCSR_VBUS_OFF;
- fusbh200_writel(fusbh200, reg, &fusbh200->regs->bmcsr);
-
- reg = fusbh200_readl(fusbh200, &fusbh200->regs->bmier);
- fusbh200_writel(fusbh200, reg | BMIER_OVC_EN | BMIER_VBUS_ERR_EN,
- &fusbh200->regs->bmier);
-}
-
-/**
- * fusbh200_hcd_probe - initialize faraday FUSBH200 HCDs
- *
- * Allocates basic resources for this USB host controller, and
- * then invokes the start() method for the HCD associated with it
- * through the hotplug entry's driver_data.
- */
-static int fusbh200_hcd_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd;
- struct resource *res;
- int irq;
- int retval = -ENODEV;
- struct fusbh200_hcd *fusbh200;
-
- if (usb_disabled())
- return -ENODEV;
-
- pdev->dev.power.power_state = PMSG_ON;
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev,
- "Found HC with no IRQ. Check %s setup!\n",
- dev_name(dev));
- return -ENODEV;
- }
-
- irq = res->start;
-
- hcd = usb_create_hcd(&fusbh200_fusbh200_hc_driver, dev,
- dev_name(dev));
- if (!hcd) {
- dev_err(dev, "failed to create hcd with err %d\n", retval);
- retval = -ENOMEM;
- goto fail_create_hcd;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(dev));
- retval = -ENODEV;
- goto fail_request_resource;
- }
-
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
- hcd->has_tt = 1;
-
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
- fusbh200_fusbh200_hc_driver.description)) {
- dev_dbg(dev, "controller already in use\n");
- retval = -EBUSY;
- goto fail_request_resource;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!res) {
- dev_err(dev,
- "Found HC with no register addr. Check %s setup!\n",
- dev_name(dev));
- retval = -ENODEV;
- goto fail_request_resource;
- }
-
- hcd->regs = ioremap_nocache(res->start, resource_size(res));
- if (hcd->regs == NULL) {
- dev_dbg(dev, "error mapping memory\n");
- retval = -EFAULT;
- goto fail_ioremap;
- }
-
- fusbh200 = hcd_to_fusbh200(hcd);
-
- fusbh200->caps = hcd->regs;
-
- retval = fusbh200_setup(hcd);
- if (retval)
- goto fail_add_hcd;
-
- fusbh200_init(fusbh200);
-
- retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (retval) {
- dev_err(dev, "failed to add hcd with err %d\n", retval);
- goto fail_add_hcd;
- }
- device_wakeup_enable(hcd->self.controller);
-
- return retval;
-
-fail_add_hcd:
- iounmap(hcd->regs);
-fail_ioremap:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-fail_request_resource:
- usb_put_hcd(hcd);
-fail_create_hcd:
- dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
- return retval;
-}
-
-/**
- * fusbh200_hcd_remove - shutdown processing for EHCI HCDs
- * @dev: USB Host Controller being removed
- *
- * Reverses the effect of fotg2xx_usb_hcd_probe(), first invoking
- * the HCD's stop() method. It is always called from a thread
- * context, normally "rmmod", "apmd", or something similar.
- */
-static int fusbh200_hcd_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
-
- if (!hcd)
- return 0;
-
- usb_remove_hcd(hcd);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static struct platform_driver fusbh200_hcd_fusbh200_driver = {
- .driver = {
- .name = "fusbh200",
- },
- .probe = fusbh200_hcd_probe,
- .remove = fusbh200_hcd_remove,
-};
-
-static int __init fusbh200_hcd_init(void)
-{
- int retval = 0;
-
- if (usb_disabled())
- return -ENODEV;
-
- printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name);
- set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
- if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
- test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
- printk(KERN_WARNING "Warning! fusbh200_hcd should always be loaded"
- " before uhci_hcd and ohci_hcd, not after\n");
-
- pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
- hcd_name,
- sizeof(struct fusbh200_qh), sizeof(struct fusbh200_qtd),
- sizeof(struct fusbh200_itd));
-
- fusbh200_debug_root = debugfs_create_dir("fusbh200", usb_debug_root);
- if (!fusbh200_debug_root) {
- retval = -ENOENT;
- goto err_debug;
- }
-
- retval = platform_driver_register(&fusbh200_hcd_fusbh200_driver);
- if (retval < 0)
- goto clean;
- return retval;
-
- platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
-clean:
- debugfs_remove(fusbh200_debug_root);
- fusbh200_debug_root = NULL;
-err_debug:
- clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
- return retval;
-}
-module_init(fusbh200_hcd_init);
-
-static void __exit fusbh200_hcd_cleanup(void)
-{
- platform_driver_unregister(&fusbh200_hcd_fusbh200_driver);
- debugfs_remove(fusbh200_debug_root);
- clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
-}
-module_exit(fusbh200_hcd_cleanup);
diff --git a/drivers/usb/host/fusbh200.h b/drivers/usb/host/fusbh200.h
deleted file mode 100644
index d6e5b3d4aa68..000000000000
--- a/drivers/usb/host/fusbh200.h
+++ /dev/null
@@ -1,675 +0,0 @@
-#ifndef __LINUX_FUSBH200_H
-#define __LINUX_FUSBH200_H
-
-#include <linux/usb/ehci-dbgp.h>
-
-/* definitions used for the EHCI driver */
-
-/*
- * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
- * __leXX (normally) or __beXX (given FUSBH200_BIG_ENDIAN_DESC), depending on
- * the host controller implementation.
- *
- * To facilitate the strongest possible byte-order checking from "sparse"
- * and so on, we use __leXX unless that's not practical.
- */
-#define __hc32 __le32
-#define __hc16 __le16
-
-/* statistics can be kept for tuning/monitoring */
-struct fusbh200_stats {
- /* irq usage */
- unsigned long normal;
- unsigned long error;
- unsigned long iaa;
- unsigned long lost_iaa;
-
- /* termination of urbs from core */
- unsigned long complete;
- unsigned long unlink;
-};
-
-/* fusbh200_hcd->lock guards shared data against other CPUs:
- * fusbh200_hcd: async, unlink, periodic (and shadow), ...
- * usb_host_endpoint: hcpriv
- * fusbh200_qh: qh_next, qtd_list
- * fusbh200_qtd: qtd_list
- *
- * Also, hold this lock when talking to HC registers or
- * when updating hw_* fields in shared qh/qtd/... structures.
- */
-
-#define FUSBH200_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
-
-/*
- * fusbh200_rh_state values of FUSBH200_RH_RUNNING or above mean that the
- * controller may be doing DMA. Lower values mean there's no DMA.
- */
-enum fusbh200_rh_state {
- FUSBH200_RH_HALTED,
- FUSBH200_RH_SUSPENDED,
- FUSBH200_RH_RUNNING,
- FUSBH200_RH_STOPPING
-};
-
-/*
- * Timer events, ordered by increasing delay length.
- * Always update event_delays_ns[] and event_handlers[] (defined in
- * ehci-timer.c) in parallel with this list.
- */
-enum fusbh200_hrtimer_event {
- FUSBH200_HRTIMER_POLL_ASS, /* Poll for async schedule off */
- FUSBH200_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
- FUSBH200_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
- FUSBH200_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
- FUSBH200_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
- FUSBH200_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
- FUSBH200_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
- FUSBH200_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
- FUSBH200_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
- FUSBH200_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
- FUSBH200_HRTIMER_NUM_EVENTS /* Must come last */
-};
-#define FUSBH200_HRTIMER_NO_EVENT 99
-
-struct fusbh200_hcd { /* one per controller */
- /* timing support */
- enum fusbh200_hrtimer_event next_hrtimer_event;
- unsigned enabled_hrtimer_events;
- ktime_t hr_timeouts[FUSBH200_HRTIMER_NUM_EVENTS];
- struct hrtimer hrtimer;
-
- int PSS_poll_count;
- int ASS_poll_count;
- int died_poll_count;
-
- /* glue to PCI and HCD framework */
- struct fusbh200_caps __iomem *caps;
- struct fusbh200_regs __iomem *regs;
- struct ehci_dbg_port __iomem *debug;
-
- __u32 hcs_params; /* cached register copy */
- spinlock_t lock;
- enum fusbh200_rh_state rh_state;
-
- /* general schedule support */
- bool scanning:1;
- bool need_rescan:1;
- bool intr_unlinking:1;
- bool async_unlinking:1;
- bool shutdown:1;
- struct fusbh200_qh *qh_scan_next;
-
- /* async schedule support */
- struct fusbh200_qh *async;
- struct fusbh200_qh *dummy; /* For AMD quirk use */
- struct fusbh200_qh *async_unlink;
- struct fusbh200_qh *async_unlink_last;
- struct fusbh200_qh *async_iaa;
- unsigned async_unlink_cycle;
- unsigned async_count; /* async activity count */
-
- /* periodic schedule support */
-#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
- unsigned periodic_size;
- __hc32 *periodic; /* hw periodic table */
- dma_addr_t periodic_dma;
- struct list_head intr_qh_list;
- unsigned i_thresh; /* uframes HC might cache */
-
- union fusbh200_shadow *pshadow; /* mirror hw periodic table */
- struct fusbh200_qh *intr_unlink;
- struct fusbh200_qh *intr_unlink_last;
- unsigned intr_unlink_cycle;
- unsigned now_frame; /* frame from HC hardware */
- unsigned next_frame; /* scan periodic, start here */
- unsigned intr_count; /* intr activity count */
- unsigned isoc_count; /* isoc activity count */
- unsigned periodic_count; /* periodic activity count */
- unsigned uframe_periodic_max; /* max periodic time per uframe */
-
-
- /* list of itds completed while now_frame was still active */
- struct list_head cached_itd_list;
- struct fusbh200_itd *last_itd_to_free;
-
- /* per root hub port */
- unsigned long reset_done [FUSBH200_MAX_ROOT_PORTS];
-
- /* bit vectors (one bit per port) */
- unsigned long bus_suspended; /* which ports were
- already suspended at the start of a bus suspend */
- unsigned long companion_ports; /* which ports are
- dedicated to the companion controller */
- unsigned long owned_ports; /* which ports are
- owned by the companion during a bus suspend */
- unsigned long port_c_suspend; /* which ports have
- the change-suspend feature turned on */
- unsigned long suspended_ports; /* which ports are
- suspended */
- unsigned long resuming_ports; /* which ports have
- started to resume */
-
- /* per-HC memory pools (could be per-bus, but ...) */
- struct dma_pool *qh_pool; /* qh per active urb */
- struct dma_pool *qtd_pool; /* one or more per qh */
- struct dma_pool *itd_pool; /* itd per iso urb */
-
- unsigned random_frame;
- unsigned long next_statechange;
- ktime_t last_periodic_enable;
- u32 command;
-
- /* SILICON QUIRKS */
- unsigned need_io_watchdog:1;
- unsigned fs_i_thresh:1; /* Intel iso scheduling */
-
- u8 sbrn; /* packed release number */
-
- /* irq statistics */
- struct fusbh200_stats stats;
-# define COUNT(x) do { (x)++; } while (0)
-
- /* debug files */
- struct dentry *debug_dir;
-};
-
-/* convert between an HCD pointer and the corresponding FUSBH200_HCD */
-static inline struct fusbh200_hcd *hcd_to_fusbh200 (struct usb_hcd *hcd)
-{
- return (struct fusbh200_hcd *) (hcd->hcd_priv);
-}
-static inline struct usb_hcd *fusbh200_to_hcd (struct fusbh200_hcd *fusbh200)
-{
- return container_of ((void *) fusbh200, struct usb_hcd, hcd_priv);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
-
-/* Section 2.2 Host Controller Capability Registers */
-struct fusbh200_caps {
- /* these fields are specified as 8 and 16 bit registers,
- * but some hosts can't perform 8 or 16 bit PCI accesses.
- * some hosts treat caplength and hciversion as parts of a 32-bit
- * register, others treat them as two separate registers, this
- * affects the memory map for big endian controllers.
- */
- u32 hc_capbase;
-#define HC_LENGTH(fusbh200, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
- (fusbh200_big_endian_capbase(fusbh200) ? 24 : 0)))
-#define HC_VERSION(fusbh200, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
- (fusbh200_big_endian_capbase(fusbh200) ? 0 : 16)))
- u32 hcs_params; /* HCSPARAMS - offset 0x4 */
-#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
-
- u32 hcc_params; /* HCCPARAMS - offset 0x8 */
-#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
-#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
- u8 portroute[8]; /* nibbles for routing - offset 0xC */
-};
-
-
-/* Section 2.3 Host Controller Operational Registers */
-struct fusbh200_regs {
-
- /* USBCMD: offset 0x00 */
- u32 command;
-
-/* EHCI 1.1 addendum */
-/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
-#define CMD_PARK (1<<11) /* enable "park" on async qh */
-#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
-#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
-#define CMD_ASE (1<<5) /* async schedule enable */
-#define CMD_PSE (1<<4) /* periodic schedule enable */
-/* 3:2 is periodic frame list size */
-#define CMD_RESET (1<<1) /* reset HC not bus */
-#define CMD_RUN (1<<0) /* start/stop HC */
-
- /* USBSTS: offset 0x04 */
- u32 status;
-#define STS_ASS (1<<15) /* Async Schedule Status */
-#define STS_PSS (1<<14) /* Periodic Schedule Status */
-#define STS_RECL (1<<13) /* Reclamation */
-#define STS_HALT (1<<12) /* Not running (any reason) */
-/* some bits reserved */
- /* these STS_* flags are also intr_enable bits (USBINTR) */
-#define STS_IAA (1<<5) /* Interrupted on async advance */
-#define STS_FATAL (1<<4) /* such as some PCI access errors */
-#define STS_FLR (1<<3) /* frame list rolled over */
-#define STS_PCD (1<<2) /* port change detect */
-#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
-#define STS_INT (1<<0) /* "normal" completion (short, ...) */
-
- /* USBINTR: offset 0x08 */
- u32 intr_enable;
-
- /* FRINDEX: offset 0x0C */
- u32 frame_index; /* current microframe number */
- /* CTRLDSSEGMENT: offset 0x10 */
- u32 segment; /* address bits 63:32 if needed */
- /* PERIODICLISTBASE: offset 0x14 */
- u32 frame_list; /* points to periodic list */
- /* ASYNCLISTADDR: offset 0x18 */
- u32 async_next; /* address of next async queue head */
-
- u32 reserved1;
- /* PORTSC: offset 0x20 */
- u32 port_status;
-/* 31:23 reserved */
-#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
-#define PORT_RESET (1<<8) /* reset port */
-#define PORT_SUSPEND (1<<7) /* suspend port */
-#define PORT_RESUME (1<<6) /* resume it */
-#define PORT_PEC (1<<3) /* port enable change */
-#define PORT_PE (1<<2) /* port enable */
-#define PORT_CSC (1<<1) /* connect status change */
-#define PORT_CONNECT (1<<0) /* device connected */
-#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
-
- u32 reserved2[3];
-
- /* BMCSR: offset 0x30 */
- u32 bmcsr; /* Bus Moniter Control/Status Register */
-#define BMCSR_HOST_SPD_TYP (3<<9)
-#define BMCSR_VBUS_OFF (1<<4)
-#define BMCSR_INT_POLARITY (1<<3)
-
- /* BMISR: offset 0x34 */
- u32 bmisr; /* Bus Moniter Interrupt Status Register*/
-#define BMISR_OVC (1<<1)
-
- /* BMIER: offset 0x38 */
- u32 bmier; /* Bus Moniter Interrupt Enable Register */
-#define BMIER_OVC_EN (1<<1)
-#define BMIER_VBUS_ERR_EN (1<<0)
-};
-
-/*-------------------------------------------------------------------------*/
-
-#define QTD_NEXT(fusbh200, dma) cpu_to_hc32(fusbh200, (u32)dma)
-
-/*
- * EHCI Specification 0.95 Section 3.5
- * QTD: describe data transfer components (buffer, direction, ...)
- * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
- *
- * These are associated only with "QH" (Queue Head) structures,
- * used with control, bulk, and interrupt transfers.
- */
-struct fusbh200_qtd {
- /* first part defined by EHCI spec */
- __hc32 hw_next; /* see EHCI 3.5.1 */
- __hc32 hw_alt_next; /* see EHCI 3.5.2 */
- __hc32 hw_token; /* see EHCI 3.5.3 */
-#define QTD_TOGGLE (1 << 31) /* data toggle */
-#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
-#define QTD_IOC (1 << 15) /* interrupt on complete */
-#define QTD_CERR(tok) (((tok)>>10) & 0x3)
-#define QTD_PID(tok) (((tok)>>8) & 0x3)
-#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
-#define QTD_STS_HALT (1 << 6) /* halted on error */
-#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
-#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
-#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
-#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
-#define QTD_STS_STS (1 << 1) /* split transaction state */
-#define QTD_STS_PING (1 << 0) /* issue PING? */
-
-#define ACTIVE_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_ACTIVE)
-#define HALT_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_HALT)
-#define STATUS_BIT(fusbh200) cpu_to_hc32(fusbh200, QTD_STS_STS)
-
- __hc32 hw_buf [5]; /* see EHCI 3.5.4 */
- __hc32 hw_buf_hi [5]; /* Appendix B */
-
- /* the rest is HCD-private */
- dma_addr_t qtd_dma; /* qtd address */
- struct list_head qtd_list; /* sw qtd list */
- struct urb *urb; /* qtd's urb */
- size_t length; /* length of buffer */
-} __attribute__ ((aligned (32)));
-
-/* mask NakCnt+T in qh->hw_alt_next */
-#define QTD_MASK(fusbh200) cpu_to_hc32 (fusbh200, ~0x1f)
-
-#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
-
-/*-------------------------------------------------------------------------*/
-
-/* type tag from {qh,itd,fstn}->hw_next */
-#define Q_NEXT_TYPE(fusbh200,dma) ((dma) & cpu_to_hc32(fusbh200, 3 << 1))
-
-/*
- * Now the following defines are not converted using the
- * cpu_to_le32() macro anymore, since we have to support
- * "dynamic" switching between be and le support, so that the driver
- * can be used on one system with SoC EHCI controller using big-endian
- * descriptors as well as a normal little-endian PCI EHCI controller.
- */
-/* values for that type tag */
-#define Q_TYPE_ITD (0 << 1)
-#define Q_TYPE_QH (1 << 1)
-#define Q_TYPE_SITD (2 << 1)
-#define Q_TYPE_FSTN (3 << 1)
-
-/* next async queue entry, or pointer to interrupt/periodic QH */
-#define QH_NEXT(fusbh200,dma) (cpu_to_hc32(fusbh200, (((u32)dma)&~0x01f)|Q_TYPE_QH))
-
-/* for periodic/async schedules and qtd lists, mark end of list */
-#define FUSBH200_LIST_END(fusbh200) cpu_to_hc32(fusbh200, 1) /* "null pointer" to hw */
-
-/*
- * Entries in periodic shadow table are pointers to one of four kinds
- * of data structure. That's dictated by the hardware; a type tag is
- * encoded in the low bits of the hardware's periodic schedule. Use
- * Q_NEXT_TYPE to get the tag.
- *
- * For entries in the async schedule, the type tag always says "qh".
- */
-union fusbh200_shadow {
- struct fusbh200_qh *qh; /* Q_TYPE_QH */
- struct fusbh200_itd *itd; /* Q_TYPE_ITD */
- struct fusbh200_fstn *fstn; /* Q_TYPE_FSTN */
- __hc32 *hw_next; /* (all types) */
- void *ptr;
-};
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * EHCI Specification 0.95 Section 3.6
- * QH: describes control/bulk/interrupt endpoints
- * See Fig 3-7 "Queue Head Structure Layout".
- *
- * These appear in both the async and (for interrupt) periodic schedules.
- */
-
-/* first part defined by EHCI spec */
-struct fusbh200_qh_hw {
- __hc32 hw_next; /* see EHCI 3.6.1 */
- __hc32 hw_info1; /* see EHCI 3.6.2 */
-#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
-#define QH_HEAD (1 << 15) /* Head of async reclamation list */
-#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
-#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
-#define QH_LOW_SPEED (1 << 12)
-#define QH_FULL_SPEED (0 << 12)
-#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
- __hc32 hw_info2; /* see EHCI 3.6.2 */
-#define QH_SMASK 0x000000ff
-#define QH_CMASK 0x0000ff00
-#define QH_HUBADDR 0x007f0000
-#define QH_HUBPORT 0x3f800000
-#define QH_MULT 0xc0000000
- __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
-
- /* qtd overlay (hardware parts of a struct fusbh200_qtd) */
- __hc32 hw_qtd_next;
- __hc32 hw_alt_next;
- __hc32 hw_token;
- __hc32 hw_buf [5];
- __hc32 hw_buf_hi [5];
-} __attribute__ ((aligned(32)));
-
-struct fusbh200_qh {
- struct fusbh200_qh_hw *hw; /* Must come first */
- /* the rest is HCD-private */
- dma_addr_t qh_dma; /* address of qh */
- union fusbh200_shadow qh_next; /* ptr to qh; or periodic */
- struct list_head qtd_list; /* sw qtd list */
- struct list_head intr_node; /* list of intr QHs */
- struct fusbh200_qtd *dummy;
- struct fusbh200_qh *unlink_next; /* next on unlink list */
-
- unsigned unlink_cycle;
-
- u8 needs_rescan; /* Dequeue during giveback */
- u8 qh_state;
-#define QH_STATE_LINKED 1 /* HC sees this */
-#define QH_STATE_UNLINK 2 /* HC may still see this */
-#define QH_STATE_IDLE 3 /* HC doesn't see this */
-#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
-#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
-
- u8 xacterrs; /* XactErr retry counter */
-#define QH_XACTERR_MAX 32 /* XactErr retry limit */
-
- /* periodic schedule info */
- u8 usecs; /* intr bandwidth */
- u8 gap_uf; /* uframes split/csplit gap */
- u8 c_usecs; /* ... split completion bw */
- u16 tt_usecs; /* tt downstream bandwidth */
- unsigned short period; /* polling interval */
- unsigned short start; /* where polling starts */
-#define NO_FRAME ((unsigned short)~0) /* pick new start */
-
- struct usb_device *dev; /* access to TT */
- unsigned is_out:1; /* bulk or intr OUT */
- unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
-};
-
-/*-------------------------------------------------------------------------*/
-
-/* description of one iso transaction (up to 3 KB data if highspeed) */
-struct fusbh200_iso_packet {
- /* These will be copied to iTD when scheduling */
- u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
- __hc32 transaction; /* itd->hw_transaction[i] |= */
- u8 cross; /* buf crosses pages */
- /* for full speed OUT splits */
- u32 buf1;
-};
-
-/* temporary schedule data for packets from iso urbs (both speeds)
- * each packet is one logical usb transaction to the device (not TT),
- * beginning at stream->next_uframe
- */
-struct fusbh200_iso_sched {
- struct list_head td_list;
- unsigned span;
- struct fusbh200_iso_packet packet [0];
-};
-
-/*
- * fusbh200_iso_stream - groups all (s)itds for this endpoint.
- * acts like a qh would, if EHCI had them for ISO.
- */
-struct fusbh200_iso_stream {
- /* first field matches fusbh200_hq, but is NULL */
- struct fusbh200_qh_hw *hw;
-
- u8 bEndpointAddress;
- u8 highspeed;
- struct list_head td_list; /* queued itds */
- struct list_head free_list; /* list of unused itds */
- struct usb_device *udev;
- struct usb_host_endpoint *ep;
-
- /* output of (re)scheduling */
- int next_uframe;
- __hc32 splits;
-
- /* the rest is derived from the endpoint descriptor,
- * trusting urb->interval == f(epdesc->bInterval) and
- * including the extra info for hw_bufp[0..2]
- */
- u8 usecs, c_usecs;
- u16 interval;
- u16 tt_usecs;
- u16 maxp;
- u16 raw_mask;
- unsigned bandwidth;
-
- /* This is used to initialize iTD's hw_bufp fields */
- __hc32 buf0;
- __hc32 buf1;
- __hc32 buf2;
-
- /* this is used to initialize sITD's tt info */
- __hc32 address;
-};
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * EHCI Specification 0.95 Section 3.3
- * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
- *
- * Schedule records for high speed iso xfers
- */
-struct fusbh200_itd {
- /* first part defined by EHCI spec */
- __hc32 hw_next; /* see EHCI 3.3.1 */
- __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */
-#define FUSBH200_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
-#define FUSBH200_ISOC_BUF_ERR (1<<30) /* Data buffer error */
-#define FUSBH200_ISOC_BABBLE (1<<29) /* babble detected */
-#define FUSBH200_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
-#define FUSBH200_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
-#define FUSBH200_ITD_IOC (1 << 15) /* interrupt on complete */
-
-#define ITD_ACTIVE(fusbh200) cpu_to_hc32(fusbh200, FUSBH200_ISOC_ACTIVE)
-
- __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */
- __hc32 hw_bufp_hi [7]; /* Appendix B */
-
- /* the rest is HCD-private */
- dma_addr_t itd_dma; /* for this itd */
- union fusbh200_shadow itd_next; /* ptr to periodic q entry */
-
- struct urb *urb;
- struct fusbh200_iso_stream *stream; /* endpoint's queue */
- struct list_head itd_list; /* list of stream's itds */
-
- /* any/all hw_transactions here may be used by that urb */
- unsigned frame; /* where scheduled */
- unsigned pg;
- unsigned index[8]; /* in urb->iso_frame_desc */
-} __attribute__ ((aligned (32)));
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * EHCI Specification 0.96 Section 3.7
- * Periodic Frame Span Traversal Node (FSTN)
- *
- * Manages split interrupt transactions (using TT) that span frame boundaries
- * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
- * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
- * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
- */
-struct fusbh200_fstn {
- __hc32 hw_next; /* any periodic q entry */
- __hc32 hw_prev; /* qh or FUSBH200_LIST_END */
-
- /* the rest is HCD-private */
- dma_addr_t fstn_dma;
- union fusbh200_shadow fstn_next; /* ptr to periodic q entry */
-} __attribute__ ((aligned (32)));
-
-/*-------------------------------------------------------------------------*/
-
-/* Prepare the PORTSC wakeup flags during controller suspend/resume */
-
-#define fusbh200_prepare_ports_for_controller_suspend(fusbh200, do_wakeup) \
- fusbh200_adjust_port_wakeup_flags(fusbh200, true, do_wakeup);
-
-#define fusbh200_prepare_ports_for_controller_resume(fusbh200) \
- fusbh200_adjust_port_wakeup_flags(fusbh200, false, false);
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Some EHCI controllers have a Transaction Translator built into the
- * root hub. This is a non-standard feature. Each controller will need
- * to add code to the following inline functions, and call them as
- * needed (mostly in root hub code).
- */
-
-static inline unsigned int
-fusbh200_get_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
-{
- return (readl(&fusbh200->regs->bmcsr)
- & BMCSR_HOST_SPD_TYP) >> 9;
-}
-
-/* Returns the speed of a device attached to a port on the root hub. */
-static inline unsigned int
-fusbh200_port_speed(struct fusbh200_hcd *fusbh200, unsigned int portsc)
-{
- switch (fusbh200_get_speed(fusbh200, portsc)) {
- case 0:
- return 0;
- case 1:
- return USB_PORT_STAT_LOW_SPEED;
- case 2:
- default:
- return USB_PORT_STAT_HIGH_SPEED;
- }
-}
-
-/*-------------------------------------------------------------------------*/
-
-#define fusbh200_has_fsl_portno_bug(e) (0)
-
-/*
- * While most USB host controllers implement their registers in
- * little-endian format, a minority (celleb companion chip) implement
- * them in big endian format.
- *
- * This attempts to support either format at compile time without a
- * runtime penalty, or both formats with the additional overhead
- * of checking a flag bit.
- *
- */
-
-#define fusbh200_big_endian_mmio(e) 0
-#define fusbh200_big_endian_capbase(e) 0
-
-static inline unsigned int fusbh200_readl(const struct fusbh200_hcd *fusbh200,
- __u32 __iomem * regs)
-{
- return readl(regs);
-}
-
-static inline void fusbh200_writel(const struct fusbh200_hcd *fusbh200,
- const unsigned int val, __u32 __iomem *regs)
-{
- writel(val, regs);
-}
-
-/* cpu to fusbh200 */
-static inline __hc32 cpu_to_hc32 (const struct fusbh200_hcd *fusbh200, const u32 x)
-{
- return cpu_to_le32(x);
-}
-
-/* fusbh200 to cpu */
-static inline u32 hc32_to_cpu (const struct fusbh200_hcd *fusbh200, const __hc32 x)
-{
- return le32_to_cpu(x);
-}
-
-static inline u32 hc32_to_cpup (const struct fusbh200_hcd *fusbh200, const __hc32 *x)
-{
- return le32_to_cpup(x);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static inline unsigned fusbh200_read_frame_index(struct fusbh200_hcd *fusbh200)
-{
- return fusbh200_readl(fusbh200, &fusbh200->regs->frame_index);
-}
-
-#define fusbh200_itdlen(urb, desc, t) ({ \
- usb_pipein((urb)->pipe) ? \
- (desc)->length - FUSBH200_ITD_LENGTH(t) : \
- FUSBH200_ITD_LENGTH(t); \
-})
-/*-------------------------------------------------------------------------*/
-
-#endif /* __LINUX_FUSBH200_H */
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 707437c88d03..56478ed2f932 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -161,6 +161,7 @@ static const struct of_device_id spear_ohci_id_table[] = {
{ .compatible = "st,spear600-ohci", },
{ },
};
+MODULE_DEVICE_TABLE(of, spear_ohci_id_table);
/* Driver definition to register with the platform bus */
static struct platform_driver spear_ohci_hcd_driver = {
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index a67bd5090330..0a94895a358d 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2245,8 +2245,7 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
struct u132 *u132 = hcd_to_u132(hcd);
if (irqs_disabled()) {
if (__GFP_WAIT & mem_flags) {
- printk(KERN_ERR "invalid context for function that migh"
- "t sleep\n");
+ printk(KERN_ERR "invalid context for function that might sleep\n");
return -EINVAL;
}
}
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 3a3e3eeba291..32a6f3d8deec 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -140,6 +140,7 @@ static const struct of_device_id platform_uhci_ids[] = {
{ .compatible = "platform-uhci", },
{}
};
+MODULE_DEVICE_TABLE(of, platform_uhci_ids);
static struct platform_driver uhci_platform_driver = {
.probe = uhci_hcd_platform_probe,
diff --git a/drivers/usb/host/whci/init.c b/drivers/usb/host/whci/init.c
index d3e13b640d4b..e36372393bb1 100644
--- a/drivers/usb/host/whci/init.c
+++ b/drivers/usb/host/whci/init.c
@@ -175,8 +175,7 @@ void whc_clean_up(struct whc *whc)
pzl_clean_up(whc);
asl_clean_up(whc);
- if (whc->qset_pool)
- dma_pool_destroy(whc->qset_pool);
+ dma_pool_destroy(whc->qset_pool);
len = resource_size(&whc->umc->resource);
if (whc->base)
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2d16faefb429..74c42f722678 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -58,16 +58,17 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
static void xhci_print_cap_regs(struct xhci_hcd *xhci)
{
u32 temp;
+ u32 hci_version;
xhci_dbg(xhci, "xHCI capability registers at %p:\n", xhci->cap_regs);
temp = readl(&xhci->cap_regs->hc_capbase);
+ hci_version = HC_VERSION(temp);
xhci_dbg(xhci, "CAPLENGTH AND HCIVERSION 0x%x:\n",
(unsigned int) temp);
xhci_dbg(xhci, "CAPLENGTH: 0x%x\n",
(unsigned int) HC_LENGTH(temp));
- xhci_dbg(xhci, "HCIVERSION: 0x%x\n",
- (unsigned int) HC_VERSION(temp));
+ xhci_dbg(xhci, "HCIVERSION: 0x%x\n", hci_version);
temp = readl(&xhci->cap_regs->hcs_params1);
xhci_dbg(xhci, "HCSPARAMS 1: 0x%x\n",
@@ -108,6 +109,18 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
temp = readl(&xhci->cap_regs->run_regs_off);
xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
+
+ /* xhci 1.1 controllers have the HCCPARAMS2 register */
+ if (hci_version > 100) {
+ temp = readl(&xhci->cap_regs->hcc_params2);
+ xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
+ xhci_dbg(xhci, " HC %s Force save context capability",
+ HCC2_FSC(temp) ? "supports" : "doesn't support");
+ xhci_dbg(xhci, " HC %s Large ESIT Payload Capability",
+ HCC2_LEC(temp) ? "supports" : "doesn't support");
+ xhci_dbg(xhci, " HC %s Extended TBC capability",
+ HCC2_ETC(temp) ? "supports" : "doesn't support");
+ }
}
static void xhci_print_command_reg(struct xhci_hcd *xhci)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 78241b5550df..5d2d7e954bd4 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -31,13 +31,15 @@
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
PORT_RC | PORT_PLC | PORT_PE)
-/* USB 3.0 BOS descriptor and a capability descriptor, combined */
+/* USB 3 BOS descriptor and a capability descriptors, combined.
+ * Fields will be adjusted and added later in xhci_create_usb3_bos_desc()
+ */
static u8 usb_bos_descriptor [] = {
USB_DT_BOS_SIZE, /* __u8 bLength, 5 bytes */
USB_DT_BOS, /* __u8 bDescriptorType */
0x0F, 0x00, /* __le16 wTotalLength, 15 bytes */
0x1, /* __u8 bNumDeviceCaps */
- /* First device capability */
+ /* First device capability, SuperSpeed */
USB_DT_USB_SS_CAP_SIZE, /* __u8 bLength, 10 bytes */
USB_DT_DEVICE_CAPABILITY, /* Device Capability */
USB_SS_CAP_TYPE, /* bDevCapabilityType, SUPERSPEED_USB */
@@ -46,9 +48,108 @@ static u8 usb_bos_descriptor [] = {
0x03, /* bFunctionalitySupport,
USB 3.0 speed only */
0x00, /* bU1DevExitLat, set later. */
- 0x00, 0x00 /* __le16 bU2DevExitLat, set later. */
+ 0x00, 0x00, /* __le16 bU2DevExitLat, set later. */
+ /* Second device capability, SuperSpeedPlus */
+ 0x0c, /* bLength 12, will be adjusted later */
+ USB_DT_DEVICE_CAPABILITY, /* Device Capability */
+ USB_SSP_CAP_TYPE, /* bDevCapabilityType SUPERSPEED_PLUS */
+ 0x00, /* bReserved 0 */
+ 0x00, 0x00, 0x00, 0x00, /* bmAttributes, get from xhci psic */
+ 0x00, 0x00, /* wFunctionalitySupport */
+ 0x00, 0x00, /* wReserved 0 */
+ /* Sublink Speed Attributes are added in xhci_create_usb3_bos_desc() */
};
+static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
+ u16 wLength)
+{
+ int i, ssa_count;
+ u32 temp;
+ u16 desc_size, ssp_cap_size, ssa_size = 0;
+ bool usb3_1 = false;
+
+ desc_size = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
+ ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
+
+ /* does xhci support USB 3.1 Enhanced SuperSpeed */
+ if (xhci->usb3_rhub.min_rev >= 0x01 && xhci->usb3_rhub.psi_uid_count) {
+ /* two SSA entries for each unique PSI ID, one RX and one TX */
+ ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
+ ssa_size = ssa_count * sizeof(u32);
+ desc_size += ssp_cap_size;
+ usb3_1 = true;
+ }
+ memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
+
+ if (usb3_1) {
+ /* modify bos descriptor bNumDeviceCaps and wTotalLength */
+ buf[4] += 1;
+ put_unaligned_le16(desc_size + ssa_size, &buf[2]);
+ }
+
+ if (wLength < USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE)
+ return wLength;
+
+ /* Indicate whether the host has LTM support. */
+ temp = readl(&xhci->cap_regs->hcc_params);
+ if (HCC_LTC(temp))
+ buf[8] |= USB_LTM_SUPPORT;
+
+ /* Set the U1 and U2 exit latencies. */
+ if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
+ temp = readl(&xhci->cap_regs->hcs_params3);
+ buf[12] = HCS_U1_LATENCY(temp);
+ put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
+ }
+
+ if (usb3_1) {
+ u32 ssp_cap_base, bm_attrib, psi;
+ int offset;
+
+ ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
+
+ if (wLength < desc_size)
+ return wLength;
+ buf[ssp_cap_base] = ssp_cap_size + ssa_size;
+
+ /* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
+ bm_attrib = (ssa_count - 1) & 0x1f;
+ bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
+ put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
+
+ if (wLength < desc_size + ssa_size)
+ return wLength;
+ /*
+ * Create the Sublink Speed Attributes (SSA) array.
+ * The xhci PSI field and USB 3.1 SSA fields are very similar,
+ * but link type bits 7:6 differ for values 01b and 10b.
+ * xhci has also only one PSI entry for a symmetric link when
+ * USB 3.1 requires two SSA entries (RX and TX) for every link
+ */
+ offset = desc_size;
+ for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
+ psi = xhci->usb3_rhub.psi[i];
+ psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
+ if ((psi & PLT_MASK) == PLT_SYM) {
+ /* Symmetric, create SSA RX and TX from one PSI entry */
+ put_unaligned_le32(psi, &buf[offset]);
+ psi |= 1 << 7; /* turn entry to TX */
+ offset += 4;
+ if (offset >= desc_size + ssa_size)
+ return desc_size + ssa_size;
+ } else if ((psi & PLT_MASK) == PLT_ASYM_RX) {
+ /* Asymetric RX, flip bits 7:6 for SSA */
+ psi ^= PLT_MASK;
+ }
+ put_unaligned_le32(psi, &buf[offset]);
+ offset += 4;
+ if (offset >= desc_size + ssa_size)
+ return desc_size + ssa_size;
+ }
+ }
+ /* ssa_size is 0 for other than usb 3.1 hosts */
+ return desc_size + ssa_size;
+}
static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
struct usb_hub_descriptor *desc, int ports)
@@ -161,7 +262,7 @@ static void xhci_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
struct usb_hub_descriptor *desc)
{
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
xhci_usb3_hub_descriptor(hcd, xhci, desc);
else
xhci_usb2_hub_descriptor(hcd, xhci, desc);
@@ -250,7 +351,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
if (!xhci->devs[i])
continue;
speed = xhci->devs[i]->udev->speed;
- if (((speed == USB_SPEED_SUPER) == (hcd->speed == HCD_USB3))
+ if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
&& xhci->devs[i]->fake_port == port) {
slot_id = i;
break;
@@ -339,7 +440,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
u16 wIndex, __le32 __iomem *addr, u32 port_status)
{
/* Don't allow the USB core to disable SuperSpeed ports. */
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
xhci_dbg(xhci, "Ignoring request to disable "
"SuperSpeed port.\n");
return;
@@ -407,7 +508,7 @@ static int xhci_get_ports(struct usb_hcd *hcd, __le32 __iomem ***port_array)
int max_ports;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
max_ports = xhci->num_usb3_ports;
*port_array = xhci->usb3_ports;
} else {
@@ -558,6 +659,22 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
}
}
+static u32 xhci_get_ext_port_status(u32 raw_port_status, u32 port_li)
+{
+ u32 ext_stat = 0;
+ int speed_id;
+
+ /* only support rx and tx lane counts of 1 in usb3.1 spec */
+ speed_id = DEV_PORT_SPEED(raw_port_status);
+ ext_stat |= speed_id; /* bits 3:0, RX speed id */
+ ext_stat |= speed_id << 4; /* bits 7:4, TX speed id */
+
+ ext_stat |= PORT_RX_LANES(port_li) << 8; /* bits 11:8 Rx lane count */
+ ext_stat |= PORT_TX_LANES(port_li) << 12; /* bits 15:12 Tx lane count */
+
+ return ext_stat;
+}
+
/*
* Converts a raw xHCI port status into the format that external USB 2.0 or USB
* 3.0 hubs use.
@@ -590,7 +707,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
if ((raw_port_status & PORT_RC))
status |= USB_PORT_STAT_C_RESET << 16;
/* USB3.0 only */
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
/* Port link change with port in resume state should not be
* reported to usbcore, as this is an internal state to be
* handled by xhci driver. Reporting PLC to usbcore may
@@ -606,13 +723,13 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
}
- if (hcd->speed != HCD_USB3) {
+ if (hcd->speed < HCD_USB3) {
if ((raw_port_status & PORT_PLS_MASK) == XDEV_U3
&& (raw_port_status & PORT_POWER))
status |= USB_PORT_STAT_SUSPEND;
}
if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
- !DEV_SUPERSPEED(raw_port_status)) {
+ !DEV_SUPERSPEED_ANY(raw_port_status)) {
if ((raw_port_status & PORT_RESET) ||
!(raw_port_status & PORT_PE))
return 0xffffffff;
@@ -669,7 +786,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
&& (raw_port_status & PORT_POWER)
&& (bus_state->suspended_ports & (1 << wIndex))) {
bus_state->suspended_ports &= ~(1 << wIndex);
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
bus_state->port_c_suspend |= 1 << wIndex;
}
if (raw_port_status & PORT_CONNECT) {
@@ -683,13 +800,13 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
if (raw_port_status & PORT_RESET)
status |= USB_PORT_STAT_RESET;
if (raw_port_status & PORT_POWER) {
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
status |= USB_SS_PORT_STAT_POWER;
else
status |= USB_PORT_STAT_POWER;
}
/* Update Port Link State */
- if (hcd->speed == HCD_USB3) {
+ if (hcd->speed >= HCD_USB3) {
xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status);
/*
* Verify if all USB3 Ports Have entered U0 already.
@@ -734,7 +851,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* descriptor for the USB 3.0 roothub. If not, we stall the
* endpoint, like external hubs do.
*/
- if (hcd->speed == HCD_USB3 &&
+ if (hcd->speed >= HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
xhci_dbg(xhci, "Wrong hub descriptor type for "
@@ -748,25 +865,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if ((wValue & 0xff00) != (USB_DT_BOS << 8))
goto error;
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
goto error;
- /* Set the U1 and U2 exit latencies. */
- memcpy(buf, &usb_bos_descriptor,
- USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE);
- if ((xhci->quirks & XHCI_LPM_SUPPORT)) {
- temp = readl(&xhci->cap_regs->hcs_params3);
- buf[12] = HCS_U1_LATENCY(temp);
- put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
- }
-
- /* Indicate whether the host has LTM support. */
- temp = readl(&xhci->cap_regs->hcc_params);
- if (HCC_LTC(temp))
- buf[8] |= USB_LTM_SUPPORT;
-
+ retval = xhci_create_usb3_bos_desc(xhci, buf, wLength);
spin_unlock_irqrestore(&xhci->lock, flags);
- return USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
+ return retval;
case GetPortStatus:
if (!wIndex || wIndex > max_ports)
goto error;
@@ -786,6 +890,19 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
+ /* if USB 3.1 extended port status return additional 4 bytes */
+ if (wValue == 0x02) {
+ u32 port_li;
+
+ if (hcd->speed < HCD_USB31 || wLength != 8) {
+ xhci_err(xhci, "get ext port status invalid parameter\n");
+ retval = -EINVAL;
+ break;
+ }
+ port_li = readl(port_array[wIndex] + PORTLI);
+ status = xhci_get_ext_port_status(temp, port_li);
+ put_unaligned_le32(cpu_to_le32(status), &buf[4]);
+ }
break;
case SetPortFeature:
if (wValue == USB_PORT_FEAT_LINK_STATE)
@@ -952,7 +1069,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = readl(port_array[wIndex]);
break;
case USB_PORT_FEAT_U1_TIMEOUT:
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
goto error;
temp = readl(port_array[wIndex] + PORTPMSC);
temp &= ~PORT_U1_TIMEOUT_MASK;
@@ -960,7 +1077,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
writel(temp, port_array[wIndex] + PORTPMSC);
break;
case USB_PORT_FEAT_U2_TIMEOUT:
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
goto error;
temp = readl(port_array[wIndex] + PORTPMSC);
temp &= ~PORT_U2_TIMEOUT_MASK;
@@ -1223,14 +1340,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
u32 temp;
temp = readl(port_array[port_index]);
- if (DEV_SUPERSPEED(temp))
+ if (DEV_SUPERSPEED_ANY(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
if (test_bit(port_index, &bus_state->bus_suspended) &&
(temp & PORT_PLS_MASK)) {
set_bit(port_index, &port_was_suspended);
- if (!DEV_SUPERSPEED(temp)) {
+ if (!DEV_SUPERSPEED_ANY(temp)) {
xhci_set_link_state(xhci, port_array,
port_index, XDEV_RESUME);
need_usb2_u3_exit = true;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 41f841fa6c4d..c48cbe731356 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1828,24 +1828,20 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
for (i = 1; i < MAX_HC_SLOTS; ++i)
xhci_free_virt_device(xhci, i);
- if (xhci->segment_pool)
- dma_pool_destroy(xhci->segment_pool);
+ dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
- if (xhci->device_pool)
- dma_pool_destroy(xhci->device_pool);
+ dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
- if (xhci->small_streams_pool)
- dma_pool_destroy(xhci->small_streams_pool);
+ dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed small stream array pool");
- if (xhci->medium_streams_pool)
- dma_pool_destroy(xhci->medium_streams_pool);
+ dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed medium stream array pool");
@@ -2072,14 +2068,23 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
{
u32 temp, port_offset, port_count;
int i;
+ struct xhci_hub *rhub;
- if (major_revision > 0x03) {
+ temp = readl(addr);
+
+ if (XHCI_EXT_PORT_MAJOR(temp) == 0x03) {
+ rhub = &xhci->usb3_rhub;
+ } else if (XHCI_EXT_PORT_MAJOR(temp) <= 0x02) {
+ rhub = &xhci->usb2_rhub;
+ } else {
xhci_warn(xhci, "Ignoring unknown port speed, "
"Ext Cap %p, revision = 0x%x\n",
addr, major_revision);
/* Ignoring port protocol we can't understand. FIXME */
return;
}
+ rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
+ rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
@@ -2094,6 +2099,33 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
/* WTF? "Valid values are ‘1’ to MaxPorts" */
return;
+ rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
+ if (rhub->psi_count) {
+ rhub->psi = kcalloc(rhub->psi_count, sizeof(*rhub->psi),
+ GFP_KERNEL);
+ if (!rhub->psi)
+ rhub->psi_count = 0;
+
+ rhub->psi_uid_count++;
+ for (i = 0; i < rhub->psi_count; i++) {
+ rhub->psi[i] = readl(addr + 4 + i);
+
+ /* count unique ID values, two consecutive entries can
+ * have the same ID if link is assymetric
+ */
+ if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
+ XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
+ rhub->psi_uid_count++;
+
+ xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
+ XHCI_EXT_PORT_PSIV(rhub->psi[i]),
+ XHCI_EXT_PORT_PSIE(rhub->psi[i]),
+ XHCI_EXT_PORT_PLT(rhub->psi[i]),
+ XHCI_EXT_PORT_PFD(rhub->psi[i]),
+ XHCI_EXT_PORT_LP(rhub->psi[i]),
+ XHCI_EXT_PORT_PSIM(rhub->psi[i]));
+ }
+ }
/* cache usb2 port capabilities */
if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
xhci->ext_caps[xhci->num_ext_caps++] = temp;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c79d33676672..012d7f4c2901 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -200,15 +200,17 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
int retval;
+ xhci = hcd_to_xhci(hcd);
+ if (!xhci->sbrn)
+ pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
+
retval = xhci_gen_setup(hcd, xhci_pci_quirks);
if (retval)
return retval;
- xhci = hcd_to_xhci(hcd);
if (!usb_hcd_is_primary_hcd(hcd))
return 0;
- pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
/* Find any debug ports */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 890ad9d9d329..05647e6753cd 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -19,6 +19,7 @@
#include <linux/usb/phy.h>
#include <linux/slab.h>
#include <linux/usb/xhci_pdriver.h>
+#include <linux/acpi.h>
#include "xhci.h"
#include "xhci-mvebu.h"
@@ -93,14 +94,20 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (irq < 0)
return -ENODEV;
- /* Initialize dma_mask and coherent_dma_mask to 32-bits */
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
- if (!pdev->dev.dma_mask)
- pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ /* Try to set 64-bit DMA first */
+ if (WARN_ON(!pdev->dev.dma_mask))
+ /* Platform did not initialize dma_mask */
+ ret = dma_coerce_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(64));
else
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+
+ /* If seting 64-bit DMA mask fails, fall back to 32-bit DMA mask */
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ }
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
@@ -262,6 +269,13 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
+static const struct acpi_device_id usb_xhci_acpi_match[] = {
+ /* XHCI-compliant USB Controller */
+ { "PNP0D10", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
+
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
@@ -269,6 +283,7 @@ static struct platform_driver usb_xhci_driver = {
.name = "xhci-hcd",
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(usb_xhci_of_match),
+ .acpi_match_table = ACPI_PTR(usb_xhci_acpi_match),
},
};
MODULE_ALIAS("platform:xhci-hcd");
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 43291f93afeb..4c54ccc1583a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1453,7 +1453,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
* 1.1 ports are under the USB 2.0 hub. If the port speed
* matches the device speed, it's a similar speed port.
*/
- if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
+ if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
num_similar_speed_ports++;
}
return num_similar_speed_ports;
@@ -1515,7 +1515,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
/* Find the right roothub. */
hcd = xhci_to_hcd(xhci);
- if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
+ if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
hcd = xhci->shared_hcd;
if (major_revision == 0) {
@@ -1541,7 +1541,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
* correct bus_state structure.
*/
bus_state = &xhci->bus_state[hcd_index(hcd)];
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
port_array = xhci->usb3_ports;
else
port_array = xhci->usb2_ports;
@@ -1555,7 +1555,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
usb_hcd_resume_root_hub(hcd);
}
- if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+ if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1567,7 +1567,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
goto cleanup;
}
- if (DEV_SUPERSPEED(temp)) {
+ if (DEV_SUPERSPEED_ANY(temp)) {
xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
/* Set a flag to say the port signaled remote wakeup,
* so we can tell the difference between the end of
@@ -1595,7 +1595,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
- DEV_SUPERSPEED(temp)) {
+ DEV_SUPERSPEED_ANY(temp)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
/* We've just brought the device into U0 through either the
* Resume state after a device remote wakeup, or through the
@@ -1625,7 +1625,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
* RExit to a disconnect state). If so, let the the driver know it's
* out of the RExit state.
*/
- if (!DEV_SUPERSPEED(temp) &&
+ if (!DEV_SUPERSPEED_ANY(temp) &&
test_and_clear_bit(faked_port_index,
&bus_state->rexit_ports)) {
complete(&bus_state->rexit_done[faked_port_index]);
@@ -1633,7 +1633,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
goto cleanup;
}
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
PORT_PLC);
@@ -3029,21 +3029,6 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/*
- * The TD size is the number of bytes remaining in the TD (including this TRB),
- * right shifted by 10.
- * It must fit in bits 21:17, so it can't be bigger than 31.
- */
-static u32 xhci_td_remainder(unsigned int remainder)
-{
- u32 max = (1 << (21 - 17 + 1)) - 1;
-
- if ((remainder >> 10) >= max)
- return max << 17;
- else
- return (remainder >> 10) << 17;
-}
-
-/*
* For xHCI 1.0 host controllers, TD size is the number of max packet sized
* packets remaining in the TD (*not* including this TRB).
*
@@ -3055,30 +3040,36 @@ static u32 xhci_td_remainder(unsigned int remainder)
*
* TD size = total_packet_count - packets_transferred
*
- * It must fit in bits 21:17, so it can't be bigger than 31.
+ * For xHCI 0.96 and older, TD size field should be the remaining bytes
+ * including this TRB, right shifted by 10
+ *
+ * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
+ * This is taken care of in the TRB_TD_SIZE() macro
+ *
* The last TRB in a TD must have the TD size set to zero.
*/
-static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
- unsigned int total_packet_count, struct urb *urb,
- unsigned int num_trbs_left)
+static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
+ int trb_buff_len, unsigned int td_total_len,
+ struct urb *urb, unsigned int num_trbs_left)
{
- int packets_transferred;
+ u32 maxp, total_packet_count;
+
+ if (xhci->hci_version < 0x100)
+ return ((td_total_len - transferred) >> 10);
+
+ maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+ total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
/* One TRB with a zero-length data packet. */
- if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
+ if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
+ trb_buff_len == td_total_len)
return 0;
- /* All the TRB queueing functions don't count the current TRB in
- * running_total.
- */
- packets_transferred = (running_total + trb_buff_len) /
- GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
-
- if ((total_packet_count - packets_transferred) > 31)
- return 31 << 17;
- return (total_packet_count - packets_transferred) << 17;
+ /* Queueing functions don't count the current TRB into transferred */
+ return (total_packet_count - ((transferred + trb_buff_len) / maxp));
}
+
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
@@ -3200,17 +3191,12 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* Set the TRB length, TD size, and interrupter fields. */
- if (xhci->hci_version < 0x100) {
- remainder = xhci_td_remainder(
- urb->transfer_buffer_length -
- running_total);
- } else {
- remainder = xhci_v1_0_td_remainder(running_total,
- trb_buff_len, total_packet_count, urb,
- num_trbs - 1);
- }
+ remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
+ urb->transfer_buffer_length,
+ urb, num_trbs - 1);
+
length_field = TRB_LEN(trb_buff_len) |
- remainder |
+ TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
if (num_trbs > 1)
@@ -3373,17 +3359,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
- if (xhci->hci_version < 0x100) {
- remainder = xhci_td_remainder(
- urb->transfer_buffer_length -
- running_total);
- } else {
- remainder = xhci_v1_0_td_remainder(running_total,
- trb_buff_len, total_packet_count, urb,
- num_trbs - 1);
- }
+ remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
+ urb->transfer_buffer_length,
+ urb, num_trbs - 1);
+
length_field = TRB_LEN(trb_buff_len) |
- remainder |
+ TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
if (num_trbs > 1)
@@ -3421,7 +3402,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct usb_ctrlrequest *setup;
struct xhci_generic_trb *start_trb;
int start_cycle;
- u32 field, length_field;
+ u32 field, length_field, remainder;
struct urb_priv *urb_priv;
struct xhci_td *td;
@@ -3494,9 +3475,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
else
field = TRB_TYPE(TRB_DATA);
+ remainder = xhci_td_remainder(xhci, 0,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer_length,
+ urb, 1);
+
length_field = TRB_LEN(urb->transfer_buffer_length) |
- xhci_td_remainder(urb->transfer_buffer_length) |
+ TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
+
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
@@ -3825,17 +3812,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = td_remain_len;
/* Set the TRB length, TD size, & interrupter fields. */
- if (xhci->hci_version < 0x100) {
- remainder = xhci_td_remainder(
- td_len - running_total);
- } else {
- remainder = xhci_v1_0_td_remainder(
- running_total, trb_buff_len,
- total_packet_count, urb,
- (trbs_per_td - j - 1));
- }
+ remainder = xhci_td_remainder(xhci, running_total,
+ trb_buff_len, td_len,
+ urb, trbs_per_td - j - 1);
+
length_field = TRB_LEN(trb_buff_len) |
- remainder |
+ TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
queue_trb(xhci, ep_ring, more_trbs_coming,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 9957bd96d4bc..6e7dc6f93978 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3973,7 +3973,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
__le32 __iomem *addr;
int raw_port;
- if (hcd->speed != HCD_USB3)
+ if (hcd->speed < HCD_USB3)
addr = xhci->usb2_ports[port1 - 1];
else
addr = xhci->usb3_ports[port1 - 1];
@@ -4124,7 +4124,7 @@ int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
int hird, exit_latency;
int ret;
- if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
+ if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
return -EPERM;
@@ -4241,7 +4241,7 @@ int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int portnum = udev->portnum - 1;
- if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
+ if (hcd->speed >= HCD_USB3 || !xhci->sw_lpm_support ||
!udev->lpm_capable)
return 0;
@@ -4841,8 +4841,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
+ xhci = hcd_to_xhci(hcd);
+
if (usb_hcd_is_primary_hcd(hcd)) {
- xhci = hcd_to_xhci(hcd);
xhci->main_hcd = hcd;
/* Mark the first roothub as being USB 2.0.
* The xHCI driver will register the USB 3.0 roothub.
@@ -4856,6 +4857,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
*/
hcd->has_tt = 1;
} else {
+ if (xhci->sbrn == 0x31) {
+ xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
+ hcd->speed = HCD_USB31;
+ }
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
@@ -4875,6 +4880,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
xhci->hci_version = HC_VERSION(xhci->hcc_params);
xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
+ if (xhci->hci_version > 0x100)
+ xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
xhci_print_registers(xhci);
xhci->quirks = quirks;
@@ -4906,6 +4913,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
!dma_set_mask(dev, DMA_BIT_MASK(64))) {
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+ } else {
+ /*
+ * This is to avoid error in cases where a 32-bit USB
+ * controller is used on a 64-bit capable system.
+ */
+ retval = dma_set_mask(dev, DMA_BIT_MASK(32));
+ if (retval)
+ return retval;
+ xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
}
xhci_dbg(xhci, "Calling HCD init\n");
@@ -5020,7 +5037,7 @@ static int __init xhci_hcd_init(void)
BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
- BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
+ BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index dbda41e91c84..be9048e2d4d4 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -29,6 +29,8 @@
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
@@ -56,6 +58,7 @@
* @hcc_params: HCCPARAMS - Capability Parameters
* @db_off: DBOFF - Doorbell array offset
* @run_regs_off: RTSOFF - Runtime register space offset
+ * @hcc_params2: HCCPARAMS2 Capability Parameters 2, xhci 1.1 only
*/
struct xhci_cap_regs {
__le32 hc_capbase;
@@ -65,6 +68,7 @@ struct xhci_cap_regs {
__le32 hcc_params;
__le32 db_off;
__le32 run_regs_off;
+ __le32 hcc_params2; /* xhci 1.1 */
/* Reserved up to (CAPLENGTH - 0x1C) */
};
@@ -134,6 +138,21 @@ struct xhci_cap_regs {
/* run_regs_off bitmask - bits 0:4 reserved */
#define RTSOFF_MASK (~0x1f)
+/* HCCPARAMS2 - hcc_params2 - bitmasks */
+/* true: HC supports U3 entry Capability */
+#define HCC2_U3C(p) ((p) & (1 << 0))
+/* true: HC supports Configure endpoint command Max exit latency too large */
+#define HCC2_CMC(p) ((p) & (1 << 1))
+/* true: HC supports Force Save context Capability */
+#define HCC2_FSC(p) ((p) & (1 << 2))
+/* true: HC supports Compliance Transition Capability */
+#define HCC2_CTC(p) ((p) & (1 << 3))
+/* true: HC support Large ESIT payload Capability > 48k */
+#define HCC2_LEC(p) ((p) & (1 << 4))
+/* true: HC support Configuration Information Capability */
+#define HCC2_CIC(p) ((p) & (1 << 5))
+/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
+#define HCC2_ETC(p) ((p) & (1 << 6))
/* Number of registers per port */
#define NUM_PORT_REGS 4
@@ -269,7 +288,11 @@ struct xhci_op_regs {
/* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
#define MAX_DEVS(p) ((p) & 0xff)
-/* bits 8:31 - reserved and should be preserved */
+/* bit 8: U3 Entry Enabled, assert PLC when root port enters U3, xhci 1.1 */
+#define CONFIG_U3E (1 << 8)
+/* bit 9: Configuration Information Enable, xhci 1.1 */
+#define CONFIG_CIE (1 << 9)
+/* bits 10:31 - reserved and should be preserved */
/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
/* true: device connected */
@@ -306,11 +329,16 @@ struct xhci_op_regs {
#define XDEV_LS (0x2 << 10)
#define XDEV_HS (0x3 << 10)
#define XDEV_SS (0x4 << 10)
+#define XDEV_SSP (0x5 << 10)
#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+
/* Bits 20:23 in the Slot Context are the speed for the device */
#define SLOT_SPEED_FS (XDEV_FS << 10)
#define SLOT_SPEED_LS (XDEV_LS << 10)
@@ -394,6 +422,9 @@ struct xhci_op_regs {
#define PORT_L1DS(p) (((p) & 0xff) << 8)
#define PORT_HLE (1 << 16)
+/* USB3 Protocol PORTLI Port Link Information */
+#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
+#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
/* USB2 Protocol PORTHLPMC */
#define PORT_HIRDM(p)((p) & 3)
@@ -519,9 +550,23 @@ struct xhci_protocol_caps {
};
#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
+#define XHCI_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff)
+#define XHCI_EXT_PORT_PSIC(x) (((x) >> 28) & 0x0f)
#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
+#define XHCI_EXT_PORT_PSIV(x) (((x) >> 0) & 0x0f)
+#define XHCI_EXT_PORT_PSIE(x) (((x) >> 4) & 0x03)
+#define XHCI_EXT_PORT_PLT(x) (((x) >> 6) & 0x03)
+#define XHCI_EXT_PORT_PFD(x) (((x) >> 8) & 0x01)
+#define XHCI_EXT_PORT_LP(x) (((x) >> 14) & 0x03)
+#define XHCI_EXT_PORT_PSIM(x) (((x) >> 16) & 0xffff)
+
+#define PLT_MASK (0x03 << 6)
+#define PLT_SYM (0x00 << 6)
+#define PLT_ASYM_RX (0x02 << 6)
+#define PLT_ASYM_TX (0x03 << 6)
+
/**
* struct xhci_container_ctx
* @type: Type of context. Used to calculated offsets to contained contexts.
@@ -1136,6 +1181,8 @@ enum xhci_setup_dev {
/* Normal TRB fields */
/* transfer_len bitmasks - bits 0:16 */
#define TRB_LEN(p) ((p) & 0x1ffff)
+/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
+#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17)
/* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
@@ -1448,6 +1495,14 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
return 1;
}
+struct xhci_hub {
+ u8 maj_rev;
+ u8 min_rev;
+ u32 *psi; /* array of protocol speed ID entries */
+ u8 psi_count;
+ u8 psi_uid_count;
+};
+
/* There is one xhci_hcd structure per controller */
struct xhci_hcd {
struct usb_hcd *main_hcd;
@@ -1465,6 +1520,7 @@ struct xhci_hcd {
__u32 hcs_params2;
__u32 hcs_params3;
__u32 hcc_params;
+ __u32 hcc_params2;
spinlock_t lock;
@@ -1586,6 +1642,8 @@ struct xhci_hcd {
unsigned int num_usb3_ports;
/* Array of pointers to USB 2.0 PORTSC registers */
__le32 __iomem **usb2_ports;
+ struct xhci_hub usb2_rhub;
+ struct xhci_hub usb3_rhub;
unsigned int num_usb2_ports;
/* support xHCI 0.96 spec USB2 software LPM */
unsigned sw_lpm_support:1;
@@ -1651,20 +1709,12 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
__le64 __iomem *regs)
{
- __u32 __iomem *ptr = (__u32 __iomem *) regs;
- u64 val_lo = readl(ptr);
- u64 val_hi = readl(ptr + 1);
- return val_lo + (val_hi << 32);
+ return lo_hi_readq(regs);
}
static inline void xhci_write_64(struct xhci_hcd *xhci,
const u64 val, __le64 __iomem *regs)
{
- __u32 __iomem *ptr = (__u32 __iomem *) regs;
- u32 val_lo = lower_32_bits(val);
- u32 val_hi = upper_32_bits(val);
-
- writel(val_lo, ptr);
- writel(val_hi, ptr + 1);
+ lo_hi_writeq(val, regs);
}
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
index 3ad5d19e4d04..23c794813e6a 100644
--- a/drivers/usb/misc/chaoskey.c
+++ b/drivers/usb/misc/chaoskey.c
@@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data,
if (this_time > max)
this_time = max;
- memcpy(data, dev->buf, this_time);
+ memcpy(data, dev->buf + dev->used, this_time);
dev->used += this_time;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 7b98e1d9194c..d82fa36c3465 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = {
.compatible = "renesas,usbhs-r8a7794",
.data = (void *)USBHS_TYPE_RCAR_GEN2,
},
+ {
+ /* Gen3 is compatible with Gen2 */
+ .compatible = "renesas,usbhs-r8a7795",
+ .data = (void *)USBHS_TYPE_RCAR_GEN2,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, usbhs_of_match);
@@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev)
return NULL;
dparam = &info->driver_param;
- dparam->type = of_id ? (u32)of_id->data : 0;
+ dparam->type = of_id ? (uintptr_t)of_id->data : 0;
if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp))
dparam->buswait_bwait = tmp;
gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 1bac215202d2..39afd7045c43 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1456,30 +1456,26 @@ static void isd200_free_info_ptrs(void *info_)
*/
static int isd200_init_info(struct us_data *us)
{
- int retStatus = ISD200_GOOD;
struct isd200_info *info;
info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL);
if (!info)
- retStatus = ISD200_ERROR;
- else {
- info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
- info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL);
- info->srb.sense_buffer =
- kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) {
- isd200_free_info_ptrs(info);
- kfree(info);
- retStatus = ISD200_ERROR;
- }
- }
+ return ISD200_ERROR;
- if (retStatus == ISD200_GOOD) {
- us->extra = info;
- us->extra_destructor = isd200_free_info_ptrs;
+ info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL);
+ info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL);
+ info->srb.sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+
+ if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) {
+ isd200_free_info_ptrs(info);
+ kfree(info);
+ return ISD200_ERROR;
}
- return retStatus;
+ us->extra = info;
+ us->extra_destructor = isd200_free_info_ptrs;
+
+ return ISD200_GOOD;
}
/**************************************************************************
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index f68921909552..48ca9c204354 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -257,17 +257,16 @@ static void uas_stat_cmplt(struct urb *urb)
struct uas_cmd_info *cmdinfo;
unsigned long flags;
unsigned int idx;
+ int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->resetting)
goto out;
- if (urb->status) {
- if (urb->status != -ENOENT && urb->status != -ECONNRESET) {
- dev_err(&urb->dev->dev, "stat urb: status %d\n",
- urb->status);
- }
+ if (status) {
+ if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
+ dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
goto out;
}
@@ -348,6 +347,7 @@ static void uas_data_cmplt(struct urb *urb)
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = NULL;
unsigned long flags;
+ int status = urb->status;
spin_lock_irqsave(&devinfo->lock, flags);
@@ -374,9 +374,9 @@ static void uas_data_cmplt(struct urb *urb)
goto out;
}
- if (urb->status) {
- if (urb->status != -ENOENT && urb->status != -ECONNRESET)
- uas_log_cmd_state(cmnd, "data cmplt err", urb->status);
+ if (status) {
+ if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
+ uas_log_cmd_state(cmnd, "data cmplt err", status);
/* error: no data transfered */
sdb->resid = sdb->length;
} else {
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index e9ef1eccdace..7fbe19d5279e 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -218,7 +218,7 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc)
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = USB_DT_HUB;
desc->bDescLength = 9;
- desc->wHubCharacteristics = __constant_cpu_to_le16(
+ desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = VHCI_NPORTS;
desc->u.hs.DeviceRemovable[0] = 0xff;
@@ -565,7 +565,9 @@ no_need_xmit:
usb_hcd_unlink_urb_from_ep(hcd, urb);
no_need_unlink:
spin_unlock(&the_controller->lock);
- usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+ if (!ret)
+ usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
+ urb, urb->status);
return ret;
}
@@ -629,7 +631,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
/* URB was never linked! or will be soon given back by
* vhci_rx. */
spin_unlock(&the_controller->lock);
- return 0;
+ return -EIDRM;
}
{
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 0e5fde1d3ffb..9f9a7bef1ff6 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
dev_err(dev, "Invalid waveform\n");
err = -EINVAL;
- goto err_failed;
+ goto err_fw;
}
mutex_lock(&(par->io_lock));
@@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
mutex_unlock(&(par->io_lock));
if (err < 0) {
dev_err(dev, "Failed to store broadsheet waveform\n");
- goto err_failed;
+ goto err_fw;
}
dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size);
- return len;
+ err = len;
+err_fw:
+ release_firmware(fw_entry);
err_failed:
return err;
}
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index 7fa2e6f9e322..b335c1ae8625 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
static int fsl_diu_resume(struct platform_device *ofdev)
{
struct fsl_diu_data *data;
+ unsigned int i;
data = dev_get_drvdata(&ofdev->dev);
- enable_lcdc(data->fsl_diu_info);
+
+ fsl_diu_enable_interrupts(data);
+ update_lcdc(data->fsl_diu_info);
+ for (i = 0; i < NUM_AOIS; i++) {
+ if (data->mfb[i].count)
+ fsl_diu_enable_panel(&data->fsl_diu_info[i]);
+ }
return 0;
}
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 9b8bebdf8f86..f9ec5c0484fa 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = {
{ .compatible = "fujitsu,coral", },
{ /* end */ }
};
+MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl);
static struct platform_driver of_platform_mb862xxfb_driver = {
.driver = {
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index a8ce920fa797..d811e6dcaef7 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev)
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
- adapter = of_find_i2c_adapter_by_node(adapter_node);
+ adapter = of_get_i2c_adapter_by_node(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
omap_dss_put_device(ddata->in);
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index 90cbc4c3406c..c581231c74a5 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = {
{ .compatible = "omapdss,sony,acx565akm", },
{},
};
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
static struct spi_driver acx565akm_driver = {
.driver = {
diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c
index 7ed9a227f5ea..01b43e9ce941 100644
--- a/drivers/video/fbdev/tridentfb.c
+++ b/drivers/video/fbdev/tridentfb.c
@@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data,
writemmr(par, DST1, point(x, y));
writemmr(par, DST2, point(x + w - 1, y + h - 1));
- memcpy(par->io_virt + 0x10000, data, 4 * size);
+ iowrite32_rep(par->io_virt + 0x10000, data, size);
}
static void blade_copy_rect(struct tridentfb_par *par,
@@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par)
static inline void set_lwidth(struct tridentfb_par *par, int width)
{
write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
- write3X4(par, AddColReg,
- (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
+ /* chips older than TGUI9660 have only 1 width bit in AddColReg */
+ /* touching the other one breaks I2C/DDC */
+ if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320)
+ write3X4(par, AddColReg,
+ (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4));
+ else
+ write3X4(par, AddColReg,
+ (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
}
/* For resolutions smaller than FP resolution stretch */
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
index 32d8275e4c88..8a1076beecd3 100644
--- a/drivers/video/of_display_timing.c
+++ b/drivers/video/of_display_timing.c
@@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np)
*/
pr_err("%s: error in timing %d\n",
of_node_full_name(np), disp->num_timings + 1);
+ kfree(dt);
goto timingfail;
}
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c68edc16aa54..79e1aa1b0959 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -817,8 +817,9 @@ config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
select WATCHDOG_CORE
+ depends on I2C || I2C=n
select LPC_ICH if !EXPERT
- select I2C_I801 if !EXPERT
+ select I2C_I801 if !EXPERT && I2C
---help---
Hardware driver for the intel TCO timer based watchdog devices.
These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 66c3e656a616..8a5ce5b5a0b6 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -36,6 +36,13 @@
#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
#define PM_RSTC_RESET 0x00000102
+/*
+ * The Raspberry Pi firmware uses the RSTS register to know which partiton
+ * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
+ * Partiton 63 is a special partition used by the firmware to indicate halt.
+ */
+#define PM_RSTS_RASPBERRYPI_HALT 0x555
+
#define SECS_TO_WDOG_TICKS(x) ((x) << 16)
#define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void)
* hard reset.
*/
val = readl_relaxed(wdt->base + PM_RSTS);
- val &= PM_RSTC_WRCFG_CLR;
- val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
+ val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
writel_relaxed(val, wdt->base + PM_RSTS);
/* Continue with normal reset mechanism */
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index cc1bdfc2ff71..006e2348022c 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, gef_wdt_ids);
static struct platform_driver gef_wdt_driver = {
.driver = {
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 69013007dc47..098fa9c34d6d 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = {
{ .compatible = "men,a021-wdt" },
{ },
};
+MODULE_DEVICE_TABLE(of, a21_wdt_ids);
static struct platform_driver a21_wdt_driver = {
.probe = a21_wdt_probe,
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2789da2c0515..60b0605bd7e6 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = {
{ .compatible = "moxa,moxart-watchdog" },
{ },
};
+MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
static struct platform_driver moxart_wdt_driver = {
.probe = moxart_wdt_probe,