summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/apei/ghes.c7
-rw-r--r--drivers/acpi/device_pm.c10
-rw-r--r--drivers/acpi/scan.c5
-rw-r--r--drivers/acpi/video.c19
-rw-r--r--drivers/base/regmap/regcache-rbtree.c6
-rw-r--r--drivers/base/regmap/regcache.c20
-rw-r--r--drivers/base/regmap/regmap-debugfs.c5
-rw-r--r--drivers/block/cciss.c32
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c8
-rw-r--r--drivers/block/nvme-core.c62
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/block/pktcdvd.c3
-rw-r--r--drivers/block/rbd.c33
-rw-r--r--drivers/bluetooth/Kconfig4
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c5
-rw-r--r--drivers/cpufreq/cpufreq_governor.c3
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/dma/dmatest.c45
-rw-r--r--drivers/dma/ste_dma40.c8
-rw-r--r--drivers/gpu/drm/drm_irq.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c30
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
-rw-r--r--drivers/gpu/drm/i915/intel_display.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c10
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/r100.c9
-rw-r--r--drivers/gpu/drm/radeon/r300.c9
-rw-r--r--drivers/gpu/drm/radeon/r420.c10
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c53
-rw-r--r--drivers/gpu/drm/radeon/r600d.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c9
-rw-r--r--drivers/gpu/drm/radeon/rs690.c9
-rw-r--r--drivers/gpu/drm/radeon/rv515.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/hid/hid-multitouch.c11
-rw-r--r--drivers/hwmon/adm1021.c58
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c16
-rw-r--r--drivers/irqchip/irq-mxs.c14
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/md/bcache/Kconfig1
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/stats.c34
-rw-r--r--drivers/md/bcache/super.c185
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/raid1.c38
-rw-r--r--drivers/md/raid10.c29
-rw-r--r--drivers/md/raid5.c6
-rw-r--r--drivers/misc/mei/init.c4
-rw-r--r--drivers/misc/mei/nfc.c2
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/sgi-gru/grufile.c1
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/can/usb/usb_8dev.c5
-rw-r--r--drivers/net/ethernet/atheros/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/alx/Makefile3
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h114
-rw-r--r--drivers/net/ethernet/atheros/alx/ethtool.c272
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.c1226
-rw-r--r--drivers/net/ethernet/atheros/alx/hw.h499
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1625
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h810
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/macvlan.c20
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vxlan.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h2
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c17
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/common.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c29
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c134
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c13
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h6
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c2
-rw-r--r--drivers/net/xen-netback/netback.c8
-rw-r--r--drivers/of/base.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c45
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c131
-rw-r--r--drivers/rtc/rtc-cmos.c4
-rw-r--r--drivers/rtc/rtc-tps6586x.c3
-rw-r--r--drivers/rtc/rtc-twl.c1
-rw-r--r--drivers/s390/net/netiucv.c6
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c2
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/spi/spi-sh-hspi.c2
-rw-r--r--drivers/spi/spi-topcliff-pch.c3
-rw-r--r--drivers/spi/spi-xilinx.c74
-rw-r--r--drivers/usb/chipidea/core.c3
-rw-r--r--drivers/usb/chipidea/udc.c13
-rw-r--r--drivers/usb/serial/f81232.c8
-rw-r--r--drivers/usb/serial/pl2303.c10
-rw-r--r--drivers/usb/serial/spcp8x5.c10
-rw-r--r--drivers/vfio/vfio.c2
-rw-r--r--drivers/vhost/net.c29
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/xen/tmem.c4
161 files changed, 5855 insertions, 665 deletions
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 403baf4dffc1..fcd7d91cec34 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -919,13 +919,14 @@ static int ghes_probe(struct platform_device *ghes_dev)
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
/* External interrupt vector is GSI */
- if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
+ rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
+ if (rc) {
pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err_edac_unreg;
}
- if (request_irq(ghes->irq, ghes_irq_func,
- 0, "GHES IRQ", ghes)) {
+ rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
+ if (rc) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err_edac_unreg;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index bc493aa3af19..318fa32a141e 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -278,11 +278,13 @@ int acpi_bus_init_power(struct acpi_device *device)
if (result)
return result;
} else if (state == ACPI_STATE_UNKNOWN) {
- /* No power resources and missing _PSC? Try to force D0. */
+ /*
+ * No power resources and missing _PSC? Cross fingers and make
+ * it D0 in hope that this is what the BIOS put the device into.
+ * [We tried to force D0 here by executing _PS0, but that broke
+ * Toshiba P870-303 in a nasty way.]
+ */
state = ACPI_STATE_D0;
- result = acpi_dev_pm_explicit_set(device, state);
- if (result)
- return result;
}
device->power.state = state;
return 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 44225cb15f3a..b14ac46948c9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1017,11 +1017,8 @@ acpi_bus_driver_init(struct acpi_device *device, struct acpi_driver *driver)
return -ENOSYS;
result = driver->ops.add(device);
- if (result) {
- device->driver = NULL;
- device->driver_data = NULL;
+ if (result)
return result;
- }
device->driver = driver;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5b32e15a65ce..440eadf2d32c 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -458,12 +458,28 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion g6 Notebook PC",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"),
+ },
+ },
+ {
+ .callback = video_ignore_initial_backlight,
.ident = "HP 1000 Notebook PC",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
},
},
+ {
+ .callback = video_ignore_initial_backlight,
+ .ident = "HP Pavilion m4",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"),
+ },
+ },
{}
};
@@ -1706,6 +1722,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
int error;
acpi_status status;
+ if (device->handler)
+ return -EINVAL;
+
status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
device->parent->handle, 1,
acpi_video_bus_match, NULL,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index aa0875f6f1b7..02f490bad30f 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -143,7 +143,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
int registers = 0;
int this_registers, average;
- map->lock(map);
+ map->lock(map->lock_arg);
mem_size = sizeof(*rbtree_ctx);
mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
@@ -170,7 +170,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
nodes, registers, average, mem_size);
- map->unlock(map);
+ map->unlock(map->lock_arg);
return 0;
}
@@ -391,8 +391,6 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
- if (rbnode->base_reg < min)
- continue;
if (rbnode->base_reg > max)
break;
if (rbnode->base_reg + rbnode->blklen < min)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 75923f2396bd..507ee2da0f6e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -270,7 +270,7 @@ int regcache_sync(struct regmap *map)
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- map->lock(map);
+ map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
@@ -306,7 +306,7 @@ out:
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
map->cache_bypass = bypass;
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -333,7 +333,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- map->lock(map);
+ map->lock(map->lock_arg);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
@@ -352,7 +352,7 @@ out:
trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
- map->unlock(map);
+ map->unlock(map->lock_arg);
return ret;
}
@@ -372,11 +372,11 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
- map->lock(map);
+ map->lock(map->lock_arg);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
- map->unlock(map);
+ map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -391,9 +391,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
*/
void regcache_mark_dirty(struct regmap *map)
{
- map->lock(map);
+ map->lock(map->lock_arg);
map->cache_dirty = true;
- map->unlock(map);
+ map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
@@ -410,11 +410,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
- map->lock(map);
+ map->lock(map->lock_arg);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
- map->unlock(map);
+ map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 23b701f5fd2f..975719bc3450 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -265,6 +265,7 @@ static ssize_t regmap_map_write_file(struct file *file,
char *start = buf;
unsigned long reg, value;
struct regmap *map = file->private_data;
+ int ret;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@@ -282,7 +283,9 @@ static ssize_t regmap_map_write_file(struct file *file,
/* Userspace has been fiddling around behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
- regmap_write(map, reg, value);
+ ret = regmap_write(map, reg, value);
+ if (ret < 0)
+ return ret;
return buf_size;
}
#else
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 6374dc103521..62b6c2cc80b5 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -168,8 +168,6 @@ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
static int cciss_open(struct block_device *bdev, fmode_t mode);
static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
static void cciss_release(struct gendisk *disk, fmode_t mode);
-static int do_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -235,7 +233,7 @@ static const struct block_device_operations cciss_fops = {
.owner = THIS_MODULE,
.open = cciss_unlocked_open,
.release = cciss_release,
- .ioctl = do_ioctl,
+ .ioctl = cciss_ioctl,
.getgeo = cciss_getgeo,
#ifdef CONFIG_COMPAT
.compat_ioctl = cciss_compat_ioctl,
@@ -1143,16 +1141,6 @@ static void cciss_release(struct gendisk *disk, fmode_t mode)
mutex_unlock(&cciss_mutex);
}
-static int do_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- int ret;
- mutex_lock(&cciss_mutex);
- ret = cciss_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&cciss_mutex);
- return ret;
-}
-
#ifdef CONFIG_COMPAT
static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
@@ -1179,7 +1167,7 @@ static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
case CCISS_REGNEWD:
case CCISS_RESCANDISK:
case CCISS_GETLUNINFO:
- return do_ioctl(bdev, mode, cmd, arg);
+ return cciss_ioctl(bdev, mode, cmd, arg);
case CCISS_PASSTHRU32:
return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
@@ -1219,7 +1207,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
if (err)
return -EFAULT;
- err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
+ err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@@ -1261,7 +1249,7 @@ static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
if (err)
return -EFAULT;
- err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
+ err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
if (err)
return err;
err |=
@@ -1311,11 +1299,14 @@ static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
{
cciss_coalint_struct intinfo;
+ unsigned long flags;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user
(argp, &intinfo, sizeof(cciss_coalint_struct)))
return -EFAULT;
@@ -1356,12 +1347,15 @@ static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
{
NodeName_type NodeName;
+ unsigned long flags;
int i;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
for (i = 0; i < 16; i++)
NodeName[i] = readb(&h->cfgtable->ServerName[i]);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
return -EFAULT;
return 0;
@@ -1398,10 +1392,13 @@ static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
{
Heartbeat_type heartbeat;
+ unsigned long flags;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
heartbeat = readl(&h->cfgtable->HeartBeat);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
return -EFAULT;
return 0;
@@ -1410,10 +1407,13 @@ static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
{
BusTypes_type BusTypes;
+ unsigned long flags;
if (!argp)
return -EINVAL;
+ spin_lock_irqsave(&h->lock, flags);
BusTypes = readl(&h->cfgtable->BusTypes);
+ spin_unlock_irqrestore(&h->lock, flags);
if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
return -EFAULT;
return 0;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 847107ef0cce..20dd52a2f92f 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3002,7 +3002,8 @@ static int mtip_hw_debugfs_init(struct driver_data *dd)
static void mtip_hw_debugfs_exit(struct driver_data *dd)
{
- debugfs_remove_recursive(dd->dfs_node);
+ if (dd->dfs_node)
+ debugfs_remove_recursive(dd->dfs_node);
}
@@ -3863,7 +3864,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
struct bio_vec *bvec;
- int nents = 0;
+ int i, nents = 0;
int tag = 0, unaligned = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3921,11 +3922,12 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
}
/* Create the scatter list for this bio. */
- bio_for_each_segment(bvec, bio, nents) {
+ bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg[nents],
bvec->bv_page,
bvec->bv_len,
bvec->bv_offset);
+ nents++;
}
/* Issue the read/write. */
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8efdfaa44a59..ce79a590b45b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
struct nvme_command *cmnd;
struct nvme_iod *iod;
enum dma_data_direction dma_dir;
- int cmdid, length, result = -ENOMEM;
+ int cmdid, length, result;
u16 control;
u32 dsmgmt;
int psegs = bio_phys_segments(ns->queue, bio);
@@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return result;
}
+ result = -ENOMEM;
iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
if (!iod)
goto nomem;
@@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
if (timeout && !time_after(now, info[cmdid].timeout))
continue;
+ if (info[cmdid].ctx == CMD_CTX_CANCELLED)
+ continue;
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
@@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
if (addr & 3)
return ERR_PTR(-EINVAL);
- if (!length)
+ if (!length || length > INT_MAX - PAGE_SIZE)
return ERR_PTR(-EINVAL);
offset = offset_in_page(addr);
@@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
sg_init_table(sg, count);
for (i = 0; i < count; i++) {
sg_set_page(&sg[i], pages[i],
- min_t(int, length, PAGE_SIZE - offset), offset);
+ min_t(unsigned, length, PAGE_SIZE - offset),
+ offset);
length -= (PAGE_SIZE - offset);
offset = 0;
}
@@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
nvme_free_iod(dev, iod);
}
- if (!status && copy_to_user(&ucmd->result, &cmd.result,
+ if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
sizeof(cmd.result)))
status = -EFAULT;
@@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
- int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
+ struct pci_dev *pdev = dev->pci_dev;
+ int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
nr_io_queues = num_online_cpus();
result = set_queue_count(dev, nr_io_queues);
@@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result < nr_io_queues)
nr_io_queues = result;
+ q_count = nr_io_queues;
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[0].vector, dev->queues[0]);
db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
if (db_bar_size > 8192) {
iounmap(dev->bar);
- dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
- db_bar_size);
+ dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
dev->dbs = ((void __iomem *)dev->bar) + 4096;
dev->queues[0]->q_db = dev->dbs;
}
@@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = 0; i < nr_io_queues; i++)
dev->entry[i].entry = i;
for (;;) {
- result = pci_enable_msix(dev->pci_dev, dev->entry,
- nr_io_queues);
+ result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
if (result == 0) {
break;
} else if (result > 0) {
nr_io_queues = result;
continue;
} else {
- nr_io_queues = 1;
+ nr_io_queues = 0;
break;
}
}
+ if (nr_io_queues == 0) {
+ nr_io_queues = q_count;
+ for (;;) {
+ result = pci_enable_msi_block(pdev, nr_io_queues);
+ if (result == 0) {
+ for (i = 0; i < nr_io_queues; i++)
+ dev->entry[i].vector = i + pdev->irq;
+ break;
+ } else if (result > 0) {
+ nr_io_queues = result;
+ continue;
+ } else {
+ nr_io_queues = 1;
+ break;
+ }
+ }
+ }
+
result = queue_request_irq(dev, dev->queues[0], "nvme admin");
/* XXX: handle failure here */
@@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
nvme_dev_remove(dev);
- pci_disable_msix(dev->pci_dev);
+ if (dev->pci_dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+ else if (dev->pci_dev->msix_enabled)
+ pci_disable_msix(dev->pci_dev);
iounmap(dev->bar);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
@@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
dev->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ else
+ goto disable;
+
result = nvme_set_instance(dev);
if (result)
goto disable;
@@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
unmap:
iounmap(dev->bar);
disable_msix:
- pci_disable_msix(pdev);
+ if (dev->pci_dev->msi_enabled)
+ pci_disable_msi(dev->pci_dev);
+ else if (dev->pci_dev->msix_enabled)
+ pci_disable_msix(dev->pci_dev);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
disable:
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index fed54b039893..102de2f52b5c 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -44,7 +44,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <scsi/sg.h>
#include <scsi/scsi.h>
@@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
}
}
-static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
+static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *mode_page, u8 page_code)
{
int res = SNTI_TRANSLATION_SUCCESS;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 3c08983e600a..f5d0ea11d9fd 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -83,7 +83,8 @@
#define MAX_SPEED 0xffff
-#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
+#define ZONE(sector, pd) (((sector) + (pd)->offset) & \
+ ~(sector_t)((pd)->settings.size - 1))
static DEFINE_MUTEX(pktcdvd_mutex);
static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d6d314027b5d..3063452e55da 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -519,8 +519,8 @@ static const struct block_device_operations rbd_bd_ops = {
};
/*
- * Initialize an rbd client instance.
- * We own *ceph_opts.
+ * Initialize an rbd client instance. Success or not, this function
+ * consumes ceph_opts.
*/
static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
{
@@ -675,7 +675,8 @@ static int parse_rbd_opts_token(char *c, void *private)
/*
* Get a ceph client with specific addr and configuration, if one does
- * not exist create it.
+ * not exist create it. Either way, ceph_opts is consumed by this
+ * function.
*/
static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
{
@@ -4697,8 +4698,10 @@ out:
return ret;
}
-/* Undo whatever state changes are made by v1 or v2 image probe */
-
+/*
+ * Undo whatever state changes are made by v1 or v2 header info
+ * call.
+ */
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
@@ -4902,9 +4905,10 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
int tmp;
/*
- * Get the id from the image id object. If it's not a
- * format 2 image, we'll get ENOENT back, and we'll assume
- * it's a format 1 image.
+ * Get the id from the image id object. Unless there's an
+ * error, rbd_dev->spec->image_id will be filled in with
+ * a dynamically-allocated string, and rbd_dev->image_format
+ * will be set to either 1 or 2.
*/
ret = rbd_dev_image_id(rbd_dev);
if (ret)
@@ -4992,7 +4996,6 @@ static ssize_t rbd_add(struct bus_type *bus,
rc = PTR_ERR(rbdc);
goto err_out_args;
}
- ceph_opts = NULL; /* rbd_dev client now owns this */
/* pick the pool */
osdc = &rbdc->client->osdc;
@@ -5027,18 +5030,18 @@ static ssize_t rbd_add(struct bus_type *bus,
rbd_dev->mapping.read_only = read_only;
rc = rbd_dev_device_setup(rbd_dev);
- if (!rc)
- return count;
+ if (rc) {
+ rbd_dev_image_release(rbd_dev);
+ goto err_out_module;
+ }
+
+ return count;
- rbd_dev_image_release(rbd_dev);
err_out_rbd_dev:
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
err_out_args:
- if (ceph_opts)
- ceph_destroy_options(ceph_opts);
- kfree(rbd_opts);
rbd_spec_put(spec);
err_out_module:
module_put(THIS_MODULE);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index fdfd61a2d523..11a6104a1e4f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -201,7 +201,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787/8797.
+ Marvell Bluetooth devices, such as 8688/8787/8797/8897.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -214,7 +214,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8897
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 3a4343b3bd6d..9a9f51875df5 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -498,6 +498,10 @@ static int btmrvl_service_main_thread(void *data)
add_wait_queue(&thread->wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ BT_DBG("main_thread: break from main thread");
+ break;
+ }
if (adapter->wakeup_tries ||
((!adapter->int_count) &&
@@ -513,11 +517,6 @@ static int btmrvl_service_main_thread(void *data)
BT_DBG("main_thread woke up");
- if (kthread_should_stop()) {
- BT_DBG("main_thread: break from main thread");
- break;
- }
-
spin_lock_irqsave(&priv->driver_lock, flags);
if (adapter->int_count) {
adapter->int_count = 0;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index c63488c54f4a..13693b7a0d5c 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -82,6 +82,23 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.io_port_2 = 0x7a,
};
+static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
+ .cfg = 0x00,
+ .host_int_mask = 0x02,
+ .host_intstatus = 0x03,
+ .card_status = 0x50,
+ .sq_read_base_addr_a0 = 0x60,
+ .sq_read_base_addr_a1 = 0x61,
+ .card_revision = 0xbc,
+ .card_fw_status0 = 0xc0,
+ .card_fw_status1 = 0xc1,
+ .card_rx_len = 0xc2,
+ .card_rx_unit = 0xc3,
+ .io_port_0 = 0xd8,
+ .io_port_1 = 0xd9,
+ .io_port_2 = 0xda,
+};
+
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
@@ -103,6 +120,13 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.sd_blksz_fw_dl = 256,
};
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8897_uapsta.bin",
+ .reg = &btmrvl_reg_88xx,
+ .sd_blksz_fw_dl = 256,
+};
+
static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8688 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105),
@@ -116,6 +140,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
+ /* Marvell SD8897 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8897 },
{ } /* Terminating entry */
};
@@ -1194,3 +1221,4 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
MODULE_FIRMWARE("mrvl/sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 11b8b4b54ceb..edc089e9d0c4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -347,11 +347,11 @@ static u32 get_cur_val(const struct cpumask *mask)
switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
break;
case SYSTEM_AMD_MSR_CAPABLE:
cmd.type = SYSTEM_AMD_MSR_CAPABLE;
- cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
+ cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
break;
case SYSTEM_IO_CAPABLE:
cmd.type = SYSTEM_IO_CAPABLE;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index a64eb8b70444..ad1fde277661 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -45,7 +45,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
struct cpufreq_freqs freqs;
struct opp *opp;
unsigned long volt = 0, volt_old = 0, tol = 0;
- long freq_Hz;
+ long freq_Hz, freq_exact;
unsigned int index;
int ret;
@@ -60,6 +60,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
if (freq_Hz < 0)
freq_Hz = freq_table[index].frequency * 1000;
+ freq_exact = freq_Hz;
freqs.new = freq_Hz / 1000;
freqs.old = clk_get_rate(cpu_clk) / 1000;
@@ -98,7 +99,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
}
}
- ret = clk_set_rate(cpu_clk, freqs.new * 1000);
+ ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5af40ad82d23..dc9b72e25c1a 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -26,6 +26,7 @@
#include <linux/tick.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/cpu.h>
#include "cpufreq_governor.h"
@@ -180,8 +181,10 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
if (!all_cpus) {
__gov_queue_work(smp_processor_id(), dbs_data, delay);
} else {
+ get_online_cpus();
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
+ put_online_cpus();
}
}
EXPORT_SYMBOL_GPL(gov_queue_work);
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index a97bb6c1596c..c3dc1c04a5df 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -863,7 +863,7 @@ static struct of_device_id sahara_dt_ids[] = {
{ .compatible = "fsl,imx27-sahara" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(platform, sahara_dt_ids);
+MODULE_DEVICE_TABLE(of, sahara_dt_ids);
static int sahara_probe(struct platform_device *pdev)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d8ce4ecfef18..e88ded2c8d2f 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -716,8 +716,7 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- wait_event_freezable_timeout(done_wait,
- done.done || kthread_should_stop(),
+ wait_event_freezable_timeout(done_wait, done.done,
msecs_to_jiffies(params->timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
@@ -997,7 +996,6 @@ static void stop_threaded_test(struct dmatest_info *info)
static int __restart_threaded_test(struct dmatest_info *info, bool run)
{
struct dmatest_params *params = &info->params;
- int ret;
/* Stop any running test first */
__stop_threaded_test(info);
@@ -1012,13 +1010,23 @@ static int __restart_threaded_test(struct dmatest_info *info, bool run)
memcpy(params, &info->dbgfs_params, sizeof(*params));
/* Run test with new parameters */
- ret = __run_threaded_test(info);
- if (ret) {
- __stop_threaded_test(info);
- pr_err("dmatest: Can't run test\n");
+ return __run_threaded_test(info);
+}
+
+static bool __is_threaded_test_run(struct dmatest_info *info)
+{
+ struct dmatest_chan *dtc;
+
+ list_for_each_entry(dtc, &info->channels, node) {
+ struct dmatest_thread *thread;
+
+ list_for_each_entry(thread, &dtc->threads, node) {
+ if (!thread->done)
+ return true;
+ }
}
- return ret;
+ return false;
}
static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos,
@@ -1091,22 +1099,10 @@ static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
{
struct dmatest_info *info = file->private_data;
char buf[3];
- struct dmatest_chan *dtc;
- bool alive = false;
mutex_lock(&info->lock);
- list_for_each_entry(dtc, &info->channels, node) {
- struct dmatest_thread *thread;
-
- list_for_each_entry(thread, &dtc->threads, node) {
- if (!thread->done) {
- alive = true;
- break;
- }
- }
- }
- if (alive) {
+ if (__is_threaded_test_run(info)) {
buf[0] = 'Y';
} else {
__stop_threaded_test(info);
@@ -1132,7 +1128,12 @@ static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
if (strtobool(buf, &bv) == 0) {
mutex_lock(&info->lock);
- ret = __restart_threaded_test(info, bv);
+
+ if (__is_threaded_test_run(info))
+ ret = -EBUSY;
+ else
+ ret = __restart_threaded_test(info, bv);
+
mutex_unlock(&info->lock);
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1734feec47b1..71bf4ec300ea 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1566,10 +1566,12 @@ static void dma_tc_handle(struct d40_chan *d40c)
return;
}
- if (d40_queue_start(d40c) == NULL)
+ if (d40_queue_start(d40c) == NULL) {
d40c->busy = false;
- pm_runtime_mark_last_busy(d40c->base->dev);
- pm_runtime_put_autosuspend(d40c->base->dev);
+
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
d40_desc_remove(d40d);
d40_desc_done(d40c, d40d);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a6a8643a6a77..8bcce7866d36 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1054,7 +1054,7 @@ EXPORT_SYMBOL(drm_vblank_off);
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
- /* vblank is not initialized (IRQ not installed ?) */
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
/*
@@ -1076,6 +1076,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
+ /* vblank is not initialized (IRQ not installed ?), or has been freed */
+ if (!dev->num_crtcs)
+ return;
+
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 3cfd0931fbfb..82430ad8ba62 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -1462,7 +1462,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
size_t addr = 0;
struct gtt_range *gt;
struct drm_gem_object *obj;
- int ret;
+ int ret = 0;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@@ -1499,7 +1499,8 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "buffer is to small\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unref_cursor;
}
gt = container_of(obj, struct gtt_range, gem);
@@ -1508,7 +1509,7 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = psb_gtt_pin(gt);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- return ret;
+ goto unref_cursor;
}
addr = gt->offset; /* Or resource.start ??? */
@@ -1532,9 +1533,14 @@ static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = obj;
}
- return 0;
+
+ psb_intel_crtc->cursor_obj = obj;
+ return ret;
+
+unref_cursor:
+ drm_gem_object_unreference(obj);
+ return ret;
}
static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1750,6 +1756,19 @@ static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
kfree(psb_intel_crtc);
}
+static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
+{
+ struct gtt_range *gt;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ gt = to_psb_fb(crtc->fb)->gtt;
+ psb_gtt_unpin(gt);
+ }
+}
+
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.dpms = cdv_intel_crtc_dpms,
.mode_fixup = cdv_intel_crtc_mode_fixup,
@@ -1757,6 +1776,7 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.mode_set_base = cdv_intel_pipe_set_base,
.prepare = cdv_intel_crtc_prepare,
.commit = cdv_intel_crtc_commit,
+ .disable = cdv_intel_crtc_disable,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1534e220097a..8b1b6d923abe 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -121,8 +121,8 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address;
int ret;
unsigned long pfn;
- /* FIXME: assumes fb at stolen base which may not be true */
- unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
+ unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
+ psbfb->gtt->offset;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6e8f42b61ff6..6666493789d1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -843,7 +843,7 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
struct drm_gem_object *obj;
void *tmp_dst, *tmp_src;
- int ret, i, cursor_pages;
+ int ret = 0, i, cursor_pages;
/* if we want to turn of the cursor ignore width and height */
if (!handle) {
@@ -880,7 +880,8 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
if (obj->size < width * height * 4) {
dev_dbg(dev->dev, "buffer is to small\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unref_cursor;
}
gt = container_of(obj, struct gtt_range, gem);
@@ -889,13 +890,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
ret = psb_gtt_pin(gt);
if (ret) {
dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- return ret;
+ goto unref_cursor;
}
if (dev_priv->ops->cursor_needs_phys) {
if (cursor_gt == NULL) {
dev_err(dev->dev, "No hardware cursor mem available");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto unref_cursor;
}
/* Prevent overflow */
@@ -936,9 +938,14 @@ static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
struct gtt_range, gem);
psb_gtt_unpin(gt);
drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = obj;
}
- return 0;
+
+ psb_intel_crtc->cursor_obj = obj;
+ return ret;
+
+unref_cursor:
+ drm_gem_object_unreference(obj);
+ return ret;
}
static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -1150,6 +1157,19 @@ static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
kfree(psb_intel_crtc);
}
+static void psb_intel_crtc_disable(struct drm_crtc *crtc)
+{
+ struct gtt_range *gt;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ gt = to_psb_fb(crtc->fb)->gtt;
+ psb_gtt_unpin(gt);
+ }
+}
+
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.dpms = psb_intel_crtc_dpms,
.mode_fixup = psb_intel_crtc_mode_fixup,
@@ -1157,6 +1177,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.mode_set_base = psb_intel_pipe_set_base,
.prepare = psb_intel_crtc_prepare,
.commit = psb_intel_crtc_commit,
+ .disable = psb_intel_crtc_disable,
};
const struct drm_crtc_funcs psb_intel_crtc_funcs = {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a6cf8e843973..970ad17c99ab 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -91,14 +91,11 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error))
+#define EXIT_COND (!i915_reset_in_progress(error) || \
+ i915_terminally_wedged(error))
if (EXIT_COND)
return 0;
- /* GPU is already declared terminally dead, give up. */
- if (i915_terminally_wedged(error))
- return -EIO;
-
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ad1117bebd7e..56746dcac40f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7937,6 +7937,11 @@ intel_modeset_check_state(struct drm_device *dev)
memset(&pipe_config, 0, sizeof(pipe_config));
active = dev_priv->display.get_pipe_config(crtc,
&pipe_config);
+
+ /* hw state is inconsistent with the pipe A quirk */
+ if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
+ active = crtc->active;
+
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f36f1baabd5a..29412cc89c7a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -815,10 +815,10 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
- .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .ident = "Hewlett-Packard HP t5740",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ DMI_MATCH(DMI_PRODUCT_NAME, " t5740"),
},
},
{
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d15428404b9a..d4ea6c265ce1 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1776,11 +1776,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
* Assume that the preferred modes are
* arranged in priority order.
*/
- intel_ddc_get_modes(connector, intel_sdvo->i2c);
- if (list_empty(&connector->probed_modes) == false)
- goto end;
+ intel_ddc_get_modes(connector, &intel_sdvo->ddc);
- /* Fetch modes from VBT */
+ /*
+ * Fetch modes from VBT. For SDVO prefer the VBT mode since some
+ * SDVO->LVDS transcoders can't cope with the EDID mode. Since
+ * drm_mode_probed_add adds the mode at the head of the list we add it
+ * last.
+ */
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
@@ -1792,7 +1795,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
}
}
-end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
intel_sdvo->sdvo_lvds_fixed_mode =
@@ -2790,12 +2792,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
- /* Only enable the hotplug irq if we need it, to work around noisy
- * hotplug lines.
- */
- if (intel_sdvo->hotplug_active)
- intel_encoder->hpd_pin = HPD_SDVO_B ? HPD_SDVO_B : HPD_SDVO_C;
-
intel_encoder->compute_config = intel_sdvo_compute_config;
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->mode_set = intel_sdvo_mode_set;
@@ -2814,6 +2810,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
goto err_output;
}
+ /* Only enable the hotplug irq if we need it, to work around noisy
+ * hotplug lines.
+ */
+ if (intel_sdvo->hotplug_active) {
+ intel_encoder->hpd_pin =
+ intel_sdvo->is_sdvob ? HPD_SDVO_B : HPD_SDVO_C;
+ }
+
/*
* Cloning SDVO with anything is often impossible, since the SDVO
* encoder can request a special input timing mode. And even if that's
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 77b8a45fb10a..ee66badc8bb6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1034,13 +1034,14 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
else
hi_pri_lvl = 5;
- WREG8(0x1fde, 0x06);
- WREG8(0x1fdf, hi_pri_lvl);
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
+ WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
} else {
+ WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
if (mdev->reg_1e24 >= 0x01)
- WREG8(0x1fdf, 0x03);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x03);
else
- WREG8(0x1fdf, 0x04);
+ WREG8(MGAREG_CRTCEXT_DATA, 0x04);
}
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index d0817d94454c..f02fd9f443ff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -50,11 +50,16 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
{
const u32 doff = (or * 0x800);
int load = -EINVAL;
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
- udelay(9500);
+ mdelay(9);
+ udelay(500);
nv_wr32(priv, 0x61a00c + doff, 0x80000000);
load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
+ nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
return load;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
index 0d36bdc51417..7fdade6e604d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -55,6 +55,10 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_wr32(priv, 0x616510 + hoff, 0x00000000);
nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
+ nv_mask(priv, 0x6165d0 + hoff, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ nv_mask(priv, 0x616568 + hoff, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ nv_mask(priv, 0x616578 + hoff, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
/* ??? */
nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 89bf459d584b..e9b8217d0075 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -40,14 +40,13 @@
* FIFO channel objects
******************************************************************************/
-void
-nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+static void
+nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv)
{
struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
int i, p;
- mutex_lock(&nv_subdev(priv)->mutex);
cur = priv->playlist[priv->cur_playlist];
priv->cur_playlist = !priv->cur_playlist;
@@ -61,6 +60,13 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
nv_wr32(priv, 0x0032f4, cur->addr >> 12);
nv_wr32(priv, 0x0032ec, p);
nv_wr32(priv, 0x002500, 0x00000101);
+}
+
+void
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
+{
+ mutex_lock(&nv_subdev(priv)->mutex);
+ nv50_fifo_playlist_update_locked(priv);
mutex_unlock(&nv_subdev(priv)->mutex);
}
@@ -489,7 +495,7 @@ nv50_fifo_init(struct nouveau_object *object)
for (i = 0; i < 128; i++)
nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
- nv50_fifo_playlist_update(priv);
+ nv50_fifo_playlist_update_locked(priv);
nv_wr32(priv, 0x003200, 0x00000001);
nv_wr32(priv, 0x003250, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 0a393f7f055f..5a5961b6a6a3 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -218,7 +218,7 @@ struct nv04_display_class {
#define NV50_DISP_DAC_PWR_STATE 0x00000040
#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
-#define NV50_DISP_DAC_LOAD 0x0002000c
+#define NV50_DISP_DAC_LOAD 0x00020100
#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
#define NV50_DISP_PIOR_MTHD 0x00030000
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ebf0a683305e..dd5e01f89f28 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1554,7 +1554,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct nv50_disp *disp = nv50_disp(encoder->dev);
int ret, or = nouveau_encoder(encoder)->or;
- u32 load = 0;
+ u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
+ if (load == 0)
+ load = 340;
ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
if (ret || load != 7)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 44a7da66e081..8406c8251fbf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -667,6 +667,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
@@ -693,7 +695,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -704,7 +707,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -718,7 +722,8 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio)
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8546e3b333b4..0f89ce3d02b9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4754,6 +4754,12 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -4923,10 +4929,6 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 7969c0c8ec20..84583302b081 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2025,6 +2025,12 @@ static int cayman_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2190,10 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 4973bff37fec..d0314ecbd7c1 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3869,6 +3869,12 @@ static int r100_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -4024,9 +4030,6 @@ int r100_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c60350e6872d..b9b776f1e582 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1382,6 +1382,12 @@ static int r300_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -1516,9 +1522,6 @@ int r300_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 6fce2eb4dd16..4e796ecf9ea4 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -265,6 +265,12 @@ static int r420_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -411,10 +417,6 @@ int r420_init(struct radeon_device *rdev)
if (r) {
return r;
}
- r = radeon_irq_kms_init(rdev);
- if (r) {
- return r;
- }
/* Memory manager */
r = radeon_bo_init(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f795a4e092cb..e1aece73b370 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -194,6 +194,12 @@ static int r520_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -297,9 +303,6 @@ int r520_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index b45e64848677..0e5341695922 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1046,6 +1046,24 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
+uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
+ r = RREG32(R_0028FC_MC_DATA);
+ WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
+ return r;
+}
+
+void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
+ S_0028F8_MC_IND_WR_EN(1));
+ WREG32(R_0028FC_MC_DATA, v);
+ WREG32(R_0028F8_MC_INDEX, 0x7F);
+}
+
static void r600_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -1181,6 +1199,8 @@ static int r600_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
+ uint32_t h_addr, l_addr;
+ unsigned long long k8_addr;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -1221,7 +1241,30 @@ static int r600_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP) {
rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ /* Use K8 direct mapping for fast fb access. */
+ rdev->fastfb_working = false;
+ h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
+ l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
+ k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+ if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
+#endif
+ {
+ /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
+ * memory is present.
+ */
+ if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
+ DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
+ (unsigned long long)rdev->mc.aper_base, k8_addr);
+ rdev->mc.aper_base = (resource_size_t)k8_addr;
+ rdev->fastfb_working = true;
+ }
+ }
+ }
}
+
radeon_update_bandwidth_info(rdev);
return 0;
}
@@ -3202,6 +3245,12 @@ static int r600_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -3356,10 +3405,6 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index acb146c06973..79df558f8c40 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1342,6 +1342,14 @@
#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
+#define R_000011_K8_FB_LOCATION 0x11
+#define R_000012_MC_MISC_UMA_CNTL 0x12
+#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
+#define R_0028F8_MC_INDEX 0x28F8
+#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
+#define C_0028F8_MC_IND_ADDR 0xFFFFFE00
+#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
+#define R_0028FC_MC_DATA 0x28FC
#define R_008020_GRBM_SOFT_RESET 0x8020
#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 06b8c19ab19e..a2802b47ee95 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -122,6 +122,10 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
+ if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
+ rdev->mc_rreg = &rs780_mc_rreg;
+ rdev->mc_wreg = &rs780_mc_wreg;
+ }
if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2c87365d345f..a72759ede753 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -347,6 +347,8 @@ extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
+extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 73051ce3121e..233a9b9fa1f7 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -417,6 +417,12 @@ static int rs400_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -535,9 +541,6 @@ int rs400_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 46fa1b07c560..670b555d2ca2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -923,6 +923,12 @@ static int rs600_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -1047,9 +1053,6 @@ int rs600_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index ab4c86cfd552..55880d5962c3 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -651,6 +651,12 @@ static int rs690_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -776,9 +782,6 @@ int rs690_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ffcba730c57c..21c7d7b26e55 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -532,6 +532,12 @@ static int rv515_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
@@ -662,9 +668,6 @@ int rv515_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 08aef24afe40..4a62ad2e5399 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1887,6 +1887,12 @@ static int rv770_startup(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = r600_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -2045,10 +2051,6 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d1ba9d88f311..a1b0da6b5808 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5350,6 +5350,12 @@ static int si_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
+ if (!rdev->irq.installed) {
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+ }
+
r = si_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
@@ -5533,10 +5539,6 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
- r = radeon_irq_kms_init(rdev);
- if (r)
- return r;
-
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index e461e9972455..7a4d10106906 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -6,6 +6,7 @@ config DRM_TILCDC
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
select BACKLIGHT_CLASS_DEVICE
+ select BACKLIGHT_LCD_SUPPORT
help
Choose this option if you have an TI SoC with LCDC display
controller, for example AM33xx in beagle-bone, DA8xx, or
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index dc3ae5c56f56..d39a5cede0b0 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -264,9 +264,12 @@ static struct mt_class mt_classes[] = {
static void mt_free_input_name(struct hid_input *hi)
{
struct hid_device *hdev = hi->report->device;
+ const char *name = hi->input->name;
- if (hi->input->name != hdev->name)
- kfree(hi->input->name);
+ if (name != hdev->name) {
+ hi->input->name = hdev->name;
+ kfree(name);
+ }
}
static ssize_t mt_show_quirks(struct device *dev,
@@ -1040,11 +1043,11 @@ static void mt_remove(struct hid_device *hdev)
struct hid_input *hi;
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
- hid_hw_stop(hdev);
-
list_for_each_entry(hi, &hdev->inputs, list)
mt_free_input_name(hi);
+ hid_hw_stop(hdev);
+
kfree(td);
hid_set_drvdata(hdev, NULL);
}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 7e76922a4ba9..f920619cd6da 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -331,26 +331,68 @@ static int adm1021_detect(struct i2c_client *client,
man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
+ if (man_id < 0 || dev_id < 0)
+ return -ENODEV;
+
if (man_id == 0x4d && dev_id == 0x01)
type_name = "max1617a";
else if (man_id == 0x41) {
if ((dev_id & 0xF0) == 0x30)
type_name = "adm1023";
- else
+ else if ((dev_id & 0xF0) == 0x00)
type_name = "adm1021";
+ else
+ return -ENODEV;
} else if (man_id == 0x49)
type_name = "thmc10";
else if (man_id == 0x23)
type_name = "gl523sm";
else if (man_id == 0x54)
type_name = "mc1066";
- /* LM84 Mfr ID in a different place, and it has more unused bits */
- else if (conv_rate == 0x00
- && (config & 0x7F) == 0x00
- && (status & 0xAB) == 0x00)
- type_name = "lm84";
- else
- type_name = "max1617";
+ else {
+ int lte, rte, lhi, rhi, llo, rlo;
+
+ /* extra checks for LM84 and MAX1617 to avoid misdetections */
+
+ llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0));
+ rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1));
+
+ /* fail if any of the additional register reads failed */
+ if (llo < 0 || rlo < 0)
+ return -ENODEV;
+
+ lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0));
+ rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1));
+ lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0));
+ rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1));
+
+ /*
+ * Fail for negative temperatures and negative high limits.
+ * This check also catches read errors on the tested registers.
+ */
+ if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0)
+ return -ENODEV;
+
+ /* fail if all registers hold the same value */
+ if (lte == rte && lte == lhi && lte == rhi && lte == llo
+ && lte == rlo)
+ return -ENODEV;
+
+ /*
+ * LM84 Mfr ID is in a different place,
+ * and it has more unused bits.
+ */
+ if (conv_rate == 0x00
+ && (config & 0x7F) == 0x00
+ && (status & 0xAB) == 0x00) {
+ type_name = "lm84";
+ } else {
+ /* fail if low limits are larger than high limits */
+ if ((s8)llo > lhi || (s8)rlo > rhi)
+ return -ENODEV;
+ type_name = "max1617";
+ }
+ }
pr_debug("Detected chip %s at adapter %d, address 0x%02x.\n",
type_name, i2c_adapter_id(adapter), client->addr);
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 81c7b73695d2..3b9afccaaade 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -61,7 +61,7 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
if (dma_region) {
struct qib_mregion *tmr;
- tmr = rcu_dereference(dev->dma_mr);
+ tmr = rcu_access_pointer(dev->dma_mr);
if (!tmr) {
qib_get_mr(mr);
rcu_assign_pointer(dev->dma_mr, mr);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index f19b0998a53c..2e84ef859c5b 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -5,6 +5,7 @@
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
* maintained by openib-general@openib.org
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 06f578cde75b..4f069c0d4c04 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -8,6 +8,7 @@
*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index a00ccd1ca333..b6d81a86c976 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 68ebb7fe072a..7827baf455a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 5278916c3103..2c4941d0656b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
+ * Copyright (c) 2013 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -292,10 +293,10 @@ out_err:
}
/**
- * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
+ * releases the FMR pool and QP objects, returns 0 on success,
* -1 on failure
*/
-static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
+static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
{
int cq_index;
BUG_ON(ib_conn == NULL);
@@ -314,13 +315,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
rdma_destroy_qp(ib_conn->cma_id);
}
- /* if cma handler context, the caller acts s.t the cma destroy the id */
- if (ib_conn->cma_id != NULL && can_destroy_id)
- rdma_destroy_id(ib_conn->cma_id);
ib_conn->fmr_pool = NULL;
ib_conn->qp = NULL;
- ib_conn->cma_id = NULL;
kfree(ib_conn->page_vec);
if (ib_conn->login_buf) {
@@ -415,11 +412,16 @@ static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id)
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
iser_free_rx_descriptors(ib_conn);
- iser_free_ib_conn_res(ib_conn, can_destroy_id);
+ iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
if (device != NULL)
iser_device_try_release(device);
+ /* if cma handler context, the caller actually destroy the id */
+ if (ib_conn->cma_id != NULL && can_destroy_id) {
+ rdma_destroy_id(ib_conn->cma_id);
+ ib_conn->cma_id = NULL;
+ }
iscsi_destroy_endpoint(ib_conn->ep);
}
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 29889bbdcc6d..63b3d4eb0ef7 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -76,16 +76,10 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
{
u32 irqnr;
- do {
- irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
- if (irqnr != 0x7f) {
- __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
- irqnr = irq_find_mapping(icoll_domain, irqnr);
- handle_IRQ(irqnr, regs);
- continue;
- }
- break;
- } while (1);
+ irqnr = __raw_readl(icoll_base + HW_ICOLL_STAT_OFFSET);
+ __raw_writel(irqnr, icoll_base + HW_ICOLL_VECTOR);
+ irqnr = irq_find_mapping(icoll_domain, irqnr);
+ handle_IRQ(irqnr, regs);
}
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 065b7a31a478..47a52ab580d8 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -119,7 +119,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
/* Skip invalid IRQs, only register handlers for the real ones */
if (!(f->valid & BIT(hwirq)))
- return -ENOTSUPP;
+ return -EPERM;
irq_set_chip_data(irq, f);
irq_set_chip_and_handler(irq, &f->chip,
handle_level_irq);
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 884d11c7355f..2bbb00404cf5 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -197,7 +197,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
/* Skip invalid IRQs, only register handlers for the real ones */
if (!(v->valid_sources & (1 << hwirq)))
- return -ENOTSUPP;
+ return -EPERM;
irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
irq_set_chip_data(irq, v->base);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig
index 05c220d05e23..f950c9d29f3e 100644
--- a/drivers/md/bcache/Kconfig
+++ b/drivers/md/bcache/Kconfig
@@ -1,7 +1,6 @@
config BCACHE
tristate "Block device as cache"
- select CLOSURES
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 340146d7c17f..d3e15b42a4ab 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -1241,7 +1241,7 @@ void bch_cache_set_stop(struct cache_set *);
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
-void bch_writeback_init_cached_dev(struct cached_dev *);
+void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *);
void bch_cache_allocator_exit(struct cache *ca);
diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c
index 64e679449c2a..b8730e714d69 100644
--- a/drivers/md/bcache/stats.c
+++ b/drivers/md/bcache/stats.c
@@ -93,24 +93,6 @@ static struct attribute *bch_stats_files[] = {
};
static KTYPE(bch_stats);
-static void scale_accounting(unsigned long data);
-
-void bch_cache_accounting_init(struct cache_accounting *acc,
- struct closure *parent)
-{
- kobject_init(&acc->total.kobj, &bch_stats_ktype);
- kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
- kobject_init(&acc->hour.kobj, &bch_stats_ktype);
- kobject_init(&acc->day.kobj, &bch_stats_ktype);
-
- closure_init(&acc->cl, parent);
- init_timer(&acc->timer);
- acc->timer.expires = jiffies + accounting_delay;
- acc->timer.data = (unsigned long) acc;
- acc->timer.function = scale_accounting;
- add_timer(&acc->timer);
-}
-
int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
struct kobject *parent)
{
@@ -244,3 +226,19 @@ void bch_mark_sectors_bypassed(struct search *s, int sectors)
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
}
+
+void bch_cache_accounting_init(struct cache_accounting *acc,
+ struct closure *parent)
+{
+ kobject_init(&acc->total.kobj, &bch_stats_ktype);
+ kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
+ kobject_init(&acc->hour.kobj, &bch_stats_ktype);
+ kobject_init(&acc->day.kobj, &bch_stats_ktype);
+
+ closure_init(&acc->cl, parent);
+ init_timer(&acc->timer);
+ acc->timer.expires = jiffies + accounting_delay;
+ acc->timer.data = (unsigned long) acc;
+ acc->timer.function = scale_accounting;
+ add_timer(&acc->timer);
+}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c8046bc4aa57..f88e2b653a3f 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -634,11 +634,10 @@ static int open_dev(struct block_device *b, fmode_t mode)
return 0;
}
-static int release_dev(struct gendisk *b, fmode_t mode)
+static void release_dev(struct gendisk *b, fmode_t mode)
{
struct bcache_device *d = b->private_data;
closure_put(&d->cl);
- return 0;
}
static int ioctl_dev(struct block_device *b, fmode_t mode,
@@ -732,8 +731,7 @@ static void bcache_device_free(struct bcache_device *d)
if (d->c)
bcache_device_detach(d);
-
- if (d->disk)
+ if (d->disk && d->disk->flags & GENHD_FL_UP)
del_gendisk(d->disk);
if (d->disk && d->disk->queue)
blk_cleanup_queue(d->disk->queue);
@@ -756,12 +754,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
- bio_split_pool_init(&d->bio_split_hook))
-
- return -ENOMEM;
-
- d->disk = alloc_disk(1);
- if (!d->disk)
+ bio_split_pool_init(&d->bio_split_hook) ||
+ !(d->disk = alloc_disk(1)) ||
+ !(q = blk_alloc_queue(GFP_KERNEL)))
return -ENOMEM;
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
@@ -771,10 +766,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
d->disk->fops = &bcache_ops;
d->disk->private_data = d;
- q = blk_alloc_queue(GFP_KERNEL);
- if (!q)
- return -ENOMEM;
-
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
@@ -999,14 +990,17 @@ static void cached_dev_free(struct closure *cl)
mutex_lock(&bch_register_lock);
- bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
+ if (atomic_read(&dc->running))
+ bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
bcache_device_free(&dc->disk);
list_del(&dc->list);
mutex_unlock(&bch_register_lock);
if (!IS_ERR_OR_NULL(dc->bdev)) {
- blk_sync_queue(bdev_get_queue(dc->bdev));
+ if (dc->bdev->bd_disk)
+ blk_sync_queue(bdev_get_queue(dc->bdev));
+
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
@@ -1028,73 +1022,67 @@ static void cached_dev_flush(struct closure *cl)
static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
{
- int err;
+ int ret;
struct io *io;
-
- closure_init(&dc->disk.cl, NULL);
- set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
+ struct request_queue *q = bdev_get_queue(dc->bdev);
__module_get(THIS_MODULE);
INIT_LIST_HEAD(&dc->list);
+ closure_init(&dc->disk.cl, NULL);
+ set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
-
- bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
-
- err = bcache_device_init(&dc->disk, block_size);
- if (err)
- goto err;
-
- spin_lock_init(&dc->io_lock);
- closure_init_unlocked(&dc->sb_write);
INIT_WORK(&dc->detach, cached_dev_detach_finish);
+ closure_init_unlocked(&dc->sb_write);
+ INIT_LIST_HEAD(&dc->io_lru);
+ spin_lock_init(&dc->io_lock);
+ bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20;
- INIT_LIST_HEAD(&dc->io_lru);
- dc->sb_bio.bi_max_vecs = 1;
- dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
-
for (io = dc->io; io < dc->io + RECENT_IO; io++) {
list_add(&io->lru, &dc->io_lru);
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
- bch_writeback_init_cached_dev(dc);
+ ret = bcache_device_init(&dc->disk, block_size);
+ if (ret)
+ return ret;
+
+ set_capacity(dc->disk.disk,
+ dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
+
+ dc->disk.disk->queue->backing_dev_info.ra_pages =
+ max(dc->disk.disk->queue->backing_dev_info.ra_pages,
+ q->backing_dev_info.ra_pages);
+
+ bch_cached_dev_request_init(dc);
+ bch_cached_dev_writeback_init(dc);
return 0;
-err:
- bcache_device_stop(&dc->disk);
- return err;
}
/* Cached device - bcache superblock */
-static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
+static void register_bdev(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev,
struct cached_dev *dc)
{
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
- struct gendisk *g;
struct cache_set *c;
- if (!dc || cached_dev_init(dc, sb->block_size << 9) != 0)
- return err;
-
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
- dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
- g = dc->disk.disk;
-
- set_capacity(g, dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
-
- g->queue->backing_dev_info.ra_pages =
- max(g->queue->backing_dev_info.ra_pages,
- bdev->bd_queue->backing_dev_info.ra_pages);
+ bio_init(&dc->sb_bio);
+ dc->sb_bio.bi_max_vecs = 1;
+ dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
+ dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ get_page(sb_page);
- bch_cached_dev_request_init(dc);
+ if (cached_dev_init(dc, sb->block_size << 9))
+ goto err;
err = "error creating kobject";
if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
@@ -1103,6 +1091,8 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
+ pr_info("registered backing device %s", bdevname(bdev, name));
+
list_add(&dc->list, &uncached_devices);
list_for_each_entry(c, &bch_cache_sets, list)
bch_cached_dev_attach(dc, c);
@@ -1111,15 +1101,10 @@ static const char *register_bdev(struct cache_sb *sb, struct page *sb_page,
BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
bch_cached_dev_run(dc);
- return NULL;
+ return;
err:
- kobject_put(&dc->disk.kobj);
pr_notice("error opening %s: %s", bdevname(bdev, name), err);
- /*
- * Return NULL instead of an error because kobject_put() cleans
- * everything up
- */
- return NULL;
+ bcache_device_stop(&dc->disk);
}
/* Flash only volumes */
@@ -1717,20 +1702,11 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
size_t free;
struct bucket *b;
- if (!ca)
- return -ENOMEM;
-
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- memcpy(&ca->sb, sb, sizeof(struct cache_sb));
-
INIT_LIST_HEAD(&ca->discards);
- bio_init(&ca->sb_bio);
- ca->sb_bio.bi_max_vecs = 1;
- ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
-
bio_init(&ca->journal.bio);
ca->journal.bio.bi_max_vecs = 8;
ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
@@ -1742,18 +1718,17 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
!init_fifo(&ca->unused, free << 2, GFP_KERNEL) ||
!init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
- !(ca->buckets = vmalloc(sizeof(struct bucket) *
+ !(ca->buckets = vzalloc(sizeof(struct bucket) *
ca->sb.nbuckets)) ||
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
!(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
!(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
bio_split_pool_init(&ca->bio_split_hook))
- goto err;
+ return -ENOMEM;
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
- memset(ca->buckets, 0, ca->sb.nbuckets * sizeof(struct bucket));
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
@@ -1766,22 +1741,28 @@ err:
return -ENOMEM;
}
-static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
+static void register_cache(struct cache_sb *sb, struct page *sb_page,
struct block_device *bdev, struct cache *ca)
{
char name[BDEVNAME_SIZE];
const char *err = "cannot allocate memory";
- if (cache_alloc(sb, ca) != 0)
- return err;
-
- ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
+ bio_init(&ca->sb_bio);
+ ca->sb_bio.bi_max_vecs = 1;
+ ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
+ ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
+ get_page(sb_page);
+
if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
+ if (cache_alloc(sb, ca) != 0)
+ goto err;
+
err = "error creating kobject";
if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
goto err;
@@ -1791,15 +1772,10 @@ static const char *register_cache(struct cache_sb *sb, struct page *sb_page,
goto err;
pr_info("registered cache device %s", bdevname(bdev, name));
-
- return NULL;
+ return;
err:
+ pr_notice("error opening %s: %s", bdevname(bdev, name), err);
kobject_put(&ca->kobj);
- pr_info("error opening %s: %s", bdevname(bdev, name), err);
- /* Return NULL instead of an error because kobject_put() cleans
- * everything up
- */
- return NULL;
}
/* Global interfaces/init */
@@ -1833,12 +1809,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
bdev = blkdev_get_by_path(strim(path),
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
sb);
- if (bdev == ERR_PTR(-EBUSY))
- err = "device busy";
-
- if (IS_ERR(bdev) ||
- set_blocksize(bdev, 4096))
+ if (IS_ERR(bdev)) {
+ if (bdev == ERR_PTR(-EBUSY))
+ err = "device busy";
goto err;
+ }
+
+ err = "failed to set blocksize";
+ if (set_blocksize(bdev, 4096))
+ goto err_close;
err = read_super(sb, bdev, &sb_page);
if (err)
@@ -1846,33 +1825,33 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ if (!dc)
+ goto err_close;
- err = register_bdev(sb, sb_page, bdev, dc);
+ register_bdev(sb, sb_page, bdev, dc);
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca)
+ goto err_close;
- err = register_cache(sb, sb_page, bdev, ca);
+ register_cache(sb, sb_page, bdev, ca);
}
-
- if (err) {
- /* register_(bdev|cache) will only return an error if they
- * didn't get far enough to create the kobject - if they did,
- * the kobject destructor will do this cleanup.
- */
+out:
+ if (sb_page)
put_page(sb_page);
-err_close:
- blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-err:
- if (attr != &ksysfs_register_quiet)
- pr_info("error opening %s: %s", path, err);
- ret = -EINVAL;
- }
-
kfree(sb);
kfree(path);
mutex_unlock(&bch_register_lock);
module_put(THIS_MODULE);
return ret;
+
+err_close:
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+err:
+ if (attr != &ksysfs_register_quiet)
+ pr_info("error opening %s: %s", path, err);
+ ret = -EINVAL;
+ goto out;
}
static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 93e7e31a4bd3..2714ed3991d1 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -375,7 +375,7 @@ err:
refill_dirty(cl);
}
-void bch_writeback_init_cached_dev(struct cached_dev *dc)
+void bch_cached_dev_writeback_init(struct cached_dev *dc)
{
closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 681d1099a2d5..9b82377a833b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5268,8 +5268,8 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
md_reap_sync_thread(mddev);
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 55951182af73..6e17f8181c4b 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -417,7 +417,17 @@ static void raid1_end_write_request(struct bio *bio, int error)
r1_bio->bios[mirror] = NULL;
to_put = bio;
- set_bit(R1BIO_Uptodate, &r1_bio->state);
+ /*
+ * Do not set R1BIO_Uptodate if the current device is
+ * rebuilding or Faulty. This is because we cannot use
+ * such device for properly reading the data back (we could
+ * potentially use it, if the current write would have felt
+ * before rdev->recovery_offset, but for simplicity we don't
+ * check this here.
+ */
+ if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
+ !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
+ set_bit(R1BIO_Uptodate, &r1_bio->state);
/* Maybe we can clear some bad blocks. */
if (is_badblock(conf->mirrors[mirror].rdev,
@@ -870,17 +880,17 @@ static void allow_barrier(struct r1conf *conf)
wake_up(&conf->wait_barrier);
}
-static void freeze_array(struct r1conf *conf)
+static void freeze_array(struct r1conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quite.
* We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+1
+ * wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
* pending IO requests to complete or be queued for re-try.
- * Thus the number queued (nr_queued) plus this request (1)
+ * Thus the number queued (nr_queued) plus this request (extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
@@ -888,7 +898,7 @@ static void freeze_array(struct r1conf *conf)
conf->barrier++;
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
+ conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
spin_unlock_irq(&conf->resync_lock);
@@ -1544,8 +1554,8 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* we wait for all outstanding requests to complete.
*/
synchronize_sched();
- raise_barrier(conf);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
@@ -1595,11 +1605,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
struct md_rdev *repl =
conf->mirrors[conf->raid_disks + number].rdev;
- raise_barrier(conf);
+ freeze_array(conf, 0);
clear_bit(Replacement, &repl->flags);
p->rdev = repl;
conf->mirrors[conf->raid_disks + number].rdev = NULL;
- lower_barrier(conf);
+ unfreeze_array(conf);
clear_bit(WantReplacement, &rdev->flags);
} else
clear_bit(WantReplacement, &rdev->flags);
@@ -2195,7 +2205,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
* frozen
*/
if (mddev->ro == 0) {
- freeze_array(conf);
+ freeze_array(conf, 1);
fix_read_error(conf, r1_bio->read_disk,
r1_bio->sector, r1_bio->sectors);
unfreeze_array(conf);
@@ -2780,8 +2790,8 @@ static int run(struct mddev *mddev)
return PTR_ERR(conf);
if (mddev->queue)
- blk_queue_max_write_same_sectors(mddev->queue,
- mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, 0);
+
rdev_for_each(rdev, mddev) {
if (!mddev->gendisk)
continue;
@@ -2963,7 +2973,7 @@ static int raid1_reshape(struct mddev *mddev)
return -ENOMEM;
}
- raise_barrier(conf);
+ freeze_array(conf, 0);
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
@@ -2994,7 +3004,7 @@ static int raid1_reshape(struct mddev *mddev)
conf->raid_disks = mddev->raid_disks = raid_disks;
mddev->delta_disks = 0;
- lower_barrier(conf);
+ unfreeze_array(conf);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 59d4daa5f4c7..6ddae2501b9a 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -490,7 +490,17 @@ static void raid10_end_write_request(struct bio *bio, int error)
sector_t first_bad;
int bad_sectors;
- set_bit(R10BIO_Uptodate, &r10_bio->state);
+ /*
+ * Do not set R10BIO_Uptodate if the current device is
+ * rebuilding or Faulty. This is because we cannot use
+ * such device for properly reading the data back (we could
+ * potentially use it, if the current write would have felt
+ * before rdev->recovery_offset, but for simplicity we don't
+ * check this here.
+ */
+ if (test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
+ set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
if (is_badblock(rdev,
@@ -1055,17 +1065,17 @@ static void allow_barrier(struct r10conf *conf)
wake_up(&conf->wait_barrier);
}
-static void freeze_array(struct r10conf *conf)
+static void freeze_array(struct r10conf *conf, int extra)
{
/* stop syncio and normal IO and wait for everything to
* go quiet.
* We increment barrier and nr_waiting, and then
- * wait until nr_pending match nr_queued+1
+ * wait until nr_pending match nr_queued+extra
* This is called in the context of one normal IO request
* that has failed. Thus any sync request that might be pending
* will be blocked by nr_pending, and we need to wait for
* pending IO requests to complete or be queued for re-try.
- * Thus the number queued (nr_queued) plus this request (1)
+ * Thus the number queued (nr_queued) plus this request (extra)
* must match the number of pending IOs (nr_pending) before
* we continue.
*/
@@ -1073,7 +1083,7 @@ static void freeze_array(struct r10conf *conf)
conf->barrier++;
conf->nr_waiting++;
wait_event_lock_irq_cmd(conf->wait_barrier,
- conf->nr_pending == conf->nr_queued+1,
+ conf->nr_pending == conf->nr_queued+extra,
conf->resync_lock,
flush_pending_writes(conf));
@@ -1837,8 +1847,8 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
* we wait for all outstanding requests to complete.
*/
synchronize_sched();
- raise_barrier(conf, 0);
- lower_barrier(conf);
+ freeze_array(conf, 0);
+ unfreeze_array(conf);
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
@@ -2612,7 +2622,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
r10_bio->devs[slot].bio = NULL;
if (mddev->ro == 0) {
- freeze_array(conf);
+ freeze_array(conf, 1);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
} else
@@ -3609,8 +3619,7 @@ static int run(struct mddev *mddev)
if (mddev->queue) {
blk_queue_max_discard_sectors(mddev->queue,
mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue,
- mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, 0);
blk_queue_io_min(mddev->queue, chunk_size);
if (conf->geo.raid_disks % conf->geo.near_copies)
blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9359828ffe26..05e4a105b9c7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -664,6 +664,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
bi->bi_rw |= REQ_FLUSH;
+ bi->bi_vcnt = 1;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
@@ -701,6 +702,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
else
rbi->bi_sector = (sh->sector
+ rrdev->data_offset);
+ rbi->bi_vcnt = 1;
rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
rbi->bi_io_vec[0].bv_offset = 0;
rbi->bi_size = STRIPE_SIZE;
@@ -5464,7 +5466,7 @@ static int run(struct mddev *mddev)
if (mddev->major_version == 0 &&
mddev->minor_version > 90)
rdev->recovery_offset = reshape_offset;
-
+
if (rdev->recovery_offset < reshape_offset) {
/* We need to check old and new layout */
if (!only_parity(rdev->raid_disk,
@@ -5587,6 +5589,8 @@ static int run(struct mddev *mddev)
*/
mddev->queue->limits.discard_zeroes_data = 0;
+ blk_queue_max_write_same_sectors(mddev->queue, 0);
+
rdev_for_each(rdev, mddev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 713d89fedc46..f580d30bb784 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -197,6 +197,8 @@ void mei_stop(struct mei_device *dev)
{
dev_dbg(&dev->pdev->dev, "stopping the device.\n");
+ flush_scheduled_work();
+
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
@@ -210,8 +212,6 @@ void mei_stop(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
- flush_scheduled_work();
-
mei_watchdog_unregister(dev);
}
EXPORT_SYMBOL_GPL(mei_stop);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 3adf8a70f26e..d0c6907dfd92 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -142,6 +142,8 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
mei_cl_unlink(ndev->cl_info);
kfree(ndev->cl_info);
}
+
+ memset(ndev, 0, sizeof(struct mei_nfc_dev));
}
static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a727464e9c3f..0f268329bd3a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -325,6 +325,7 @@ static int mei_me_pci_resume(struct device *device)
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_UP;
+ mei_clear_interrupts(dev);
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 44d273c5e19d..0535d1e0bc78 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -172,6 +172,7 @@ static long gru_get_config_info(unsigned long arg)
nodesperblade = 2;
else
nodesperblade = 1;
+ memset(&info, 0, sizeof(info));
info.cpus = num_online_cpus();
info.nodes = num_online_nodes();
info.blades = info.nodes / nodesperblade;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bc1246f6f86a..3b31c19972d4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -725,8 +725,8 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;
- rcu_read_lock();
read_lock(&bond->lock);
+ rcu_read_lock();
bond_dev = bond->dev;
@@ -748,12 +748,19 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
}
+ rcu_read_unlock();
- if (--bond->igmp_retrans > 0)
+ /* We use curr_slave_lock to protect against concurrent access to
+ * igmp_retrans from multiple running instances of this function and
+ * bond_change_active_slave
+ */
+ write_lock_bh(&bond->curr_slave_lock);
+ if (bond->igmp_retrans > 1) {
+ bond->igmp_retrans--;
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
+ }
+ write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
- rcu_read_unlock();
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -1901,6 +1908,10 @@ err_free:
err_undo_flags:
bond_compute_features(bond);
+ /* Enslave of first slave has failed and we need to fix master's mac */
+ if (bond->slave_cnt == 0 &&
+ ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+ eth_hw_addr_random(bond_dev);
return res;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index b38609b967b7..c990b42cf7a1 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -225,7 +225,7 @@ struct bonding {
rwlock_t curr_slave_lock;
u8 send_peer_notif;
s8 setup_by_slave;
- s8 igmp_retrans;
+ u8 igmp_retrans;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 6e15ef08f301..cbd388eea682 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
err = usb_8dev_cmd_version(priv, &version);
if (err) {
netdev_err(netdev, "can't get firmware version\n");
- goto cleanup_cmd_msg_buffer;
+ goto cleanup_unregister_candev;
} else {
netdev_info(netdev,
"firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@ static int usb_8dev_probe(struct usb_interface *intf,
return 0;
+cleanup_unregister_candev:
+ unregister_netdev(priv->netdev);
+
cleanup_cmd_msg_buffer:
kfree(priv->cmd_msg_buffer);
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd1cfff..ad6aa1e98348 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@ config ATL1C
To compile this driver as a module, choose M here. The module
will be called atl1c.
+config ALX
+ tristate "Qualcomm Atheros AR816x/AR817x support"
+ depends on PCI
+ select CRC32
+ select NET_CORE
+ select MDIO
+ help
+ This driver supports the Qualcomm Atheros L1F ethernet adapter,
+ i.e. the following chipsets:
+
+ 1969:1091 - AR8161 Gigabit Ethernet
+ 1969:1090 - AR8162 Fast Ethernet
+ 1969:10A1 - AR8171 Gigabit Ethernet
+ 1969:10A0 - AR8172 Fast Ethernet
+
+ To compile this driver as a module, choose M here. The module
+ will be called alx.
+
endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb576ff..5cf1c65bbce9 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_ATL2) += atlx/
obj-$(CONFIG_ATL1E) += atl1e/
obj-$(CONFIG_ATL1C) += atl1c/
+obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 000000000000..5901fa407d52
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ALX) += alx.o
+alx-objs := main.o ethtool.o hw.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 000000000000..50b3ae2b143d
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_H_
+#define _ALX_H_
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include "hw.h"
+
+#define ALX_WATCHDOG_TIME (5 * HZ)
+
+struct alx_buffer {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(size);
+};
+
+struct alx_rx_queue {
+ struct alx_rrd *rrd;
+ dma_addr_t rrd_dma;
+
+ struct alx_rfd *rfd;
+ dma_addr_t rfd_dma;
+
+ struct alx_buffer *bufs;
+
+ u16 write_idx, read_idx;
+ u16 rrd_read_idx;
+};
+#define ALX_RX_ALLOC_THRESH 32
+
+struct alx_tx_queue {
+ struct alx_txd *tpd;
+ dma_addr_t tpd_dma;
+ struct alx_buffer *bufs;
+ u16 write_idx, read_idx;
+};
+
+#define ALX_DEFAULT_TX_WORK 128
+
+enum alx_device_quirks {
+ ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
+};
+
+struct alx_priv {
+ struct net_device *dev;
+
+ struct alx_hw hw;
+
+ /* all descriptor memory */
+ struct {
+ dma_addr_t dma;
+ void *virt;
+ int size;
+ } descmem;
+
+ /* protect int_mask updates */
+ spinlock_t irq_lock;
+ u32 int_mask;
+
+ int tx_ringsz;
+ int rx_ringsz;
+ int rxbuf_size;
+
+ struct napi_struct napi;
+ struct alx_tx_queue txq;
+ struct alx_rx_queue rxq;
+
+ struct work_struct link_check_wk;
+ struct work_struct reset_wk;
+
+ u16 msg_enable;
+
+ bool msi;
+};
+
+extern const struct ethtool_ops alx_ethtool_ops;
+extern const char alx_drv_name[];
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 000000000000..6fa2aec2bc81
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#include "alx.h"
+#include "reg.h"
+#include "hw.h"
+
+
+static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ ecmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP |
+ SUPPORTED_Pause;
+ if (alx_hw_giga(hw))
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+ ecmd->advertising = ADVERTISED_TP;
+ if (hw->adv_cfg & ADVERTISED_Autoneg)
+ ecmd->advertising |= hw->adv_cfg;
+
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ if (hw->adv_cfg & ADVERTISED_Autoneg)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
+ if (hw->flowctrl & ALX_FC_RX) {
+ ecmd->advertising |= ADVERTISED_Pause;
+
+ if (!(hw->flowctrl & ALX_FC_TX))
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ } else if (hw->flowctrl & ALX_FC_TX) {
+ ecmd->advertising |= ADVERTISED_Asym_Pause;
+ }
+ }
+
+ if (hw->link_speed != SPEED_UNKNOWN) {
+ ethtool_cmd_speed_set(ecmd,
+ hw->link_speed - hw->link_speed % 10);
+ ecmd->duplex = hw->link_speed % 10;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ u32 adv_cfg;
+
+ ASSERT_RTNL();
+
+ if (ecmd->autoneg == AUTONEG_ENABLE) {
+ if (ecmd->advertising & ADVERTISED_1000baseT_Half)
+ return -EINVAL;
+ adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
+ } else {
+ int speed = ethtool_cmd_speed(ecmd);
+
+ switch (speed + ecmd->duplex) {
+ case SPEED_10 + DUPLEX_HALF:
+ adv_cfg = ADVERTISED_10baseT_Half;
+ break;
+ case SPEED_10 + DUPLEX_FULL:
+ adv_cfg = ADVERTISED_10baseT_Full;
+ break;
+ case SPEED_100 + DUPLEX_HALF:
+ adv_cfg = ADVERTISED_100baseT_Half;
+ break;
+ case SPEED_100 + DUPLEX_FULL:
+ adv_cfg = ADVERTISED_100baseT_Full;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ hw->adv_cfg = adv_cfg;
+ return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+}
+
+static void alx_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (hw->flowctrl & ALX_FC_ANEG &&
+ hw->adv_cfg & ADVERTISED_Autoneg)
+ pause->autoneg = AUTONEG_ENABLE;
+ else
+ pause->autoneg = AUTONEG_DISABLE;
+
+ if (hw->flowctrl & ALX_FC_TX)
+ pause->tx_pause = 1;
+ else
+ pause->tx_pause = 0;
+
+ if (hw->flowctrl & ALX_FC_RX)
+ pause->rx_pause = 1;
+ else
+ pause->rx_pause = 0;
+}
+
+
+static int alx_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ int err = 0;
+ bool reconfig_phy = false;
+ u8 fc = 0;
+
+ if (pause->tx_pause)
+ fc |= ALX_FC_TX;
+ if (pause->rx_pause)
+ fc |= ALX_FC_RX;
+ if (pause->autoneg)
+ fc |= ALX_FC_ANEG;
+
+ ASSERT_RTNL();
+
+ /* restart auto-neg for auto-mode */
+ if (hw->adv_cfg & ADVERTISED_Autoneg) {
+ if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
+ reconfig_phy = true;
+ if (fc & hw->flowctrl & ALX_FC_ANEG &&
+ (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+ reconfig_phy = true;
+ }
+
+ if (reconfig_phy) {
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
+ return err;
+ }
+
+ /* flow control on mac */
+ if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+ alx_cfg_mac_flowcontrol(hw, fc);
+
+ hw->flowctrl = fc;
+
+ return 0;
+}
+
+static u32 alx_get_msglevel(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ return alx->msg_enable;
+}
+
+static void alx_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ alx->msg_enable = data;
+}
+
+static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ wol->supported = WAKE_MAGIC | WAKE_PHY;
+ wol->wolopts = 0;
+
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+ wol->wolopts |= WAKE_MAGIC;
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+ return -EOPNOTSUPP;
+
+ hw->sleep_ctrl = 0;
+
+ if (wol->wolopts & WAKE_MAGIC)
+ hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
+ if (wol->wolopts & WAKE_PHY)
+ hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
+
+ device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
+
+ return 0;
+}
+
+static void alx_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+const struct ethtool_ops alx_ethtool_ops = {
+ .get_settings = alx_get_settings,
+ .set_settings = alx_set_settings,
+ .get_pauseparam = alx_get_pauseparam,
+ .set_pauseparam = alx_set_pauseparam,
+ .get_drvinfo = alx_get_drvinfo,
+ .get_msglevel = alx_get_msglevel,
+ .set_msglevel = alx_set_msglevel,
+ .get_wol = alx_get_wol,
+ .set_wol = alx_set_wol,
+ .get_link = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 000000000000..220a16ad0e49
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+#include "reg.h"
+#include "hw.h"
+
+static inline bool alx_is_rev_a(u8 rev)
+{
+ return rev == ALX_REV_A0 || rev == ALX_REV_A1;
+}
+
+static int alx_wait_mdio_idle(struct alx_hw *hw)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MDIO);
+ if (!(val & ALX_MDIO_BUSY))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+ u16 reg, u16 *phy_data)
+{
+ u32 val, clk_sel;
+ int err;
+
+ *phy_data = 0;
+
+ /* use slow clock when it's in hibernation status */
+ clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+ ALX_MDIO_CLK_SEL_25MD4 :
+ ALX_MDIO_CLK_SEL_25MD128;
+
+ if (ext) {
+ val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+ reg << ALX_MDIO_EXTN_REG_SHIFT;
+ alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+ val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
+ ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
+ } else {
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ reg << ALX_MDIO_REG_SHIFT |
+ ALX_MDIO_START | ALX_MDIO_OP_READ;
+ }
+ alx_write_mem32(hw, ALX_MDIO, val);
+
+ err = alx_wait_mdio_idle(hw);
+ if (err)
+ return err;
+ val = alx_read_mem32(hw, ALX_MDIO);
+ *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
+ return 0;
+}
+
+static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+ u16 reg, u16 phy_data)
+{
+ u32 val, clk_sel;
+
+ /* use slow clock when it's in hibernation status */
+ clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+ ALX_MDIO_CLK_SEL_25MD4 :
+ ALX_MDIO_CLK_SEL_25MD128;
+
+ if (ext) {
+ val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+ reg << ALX_MDIO_EXTN_REG_SHIFT;
+ alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ phy_data << ALX_MDIO_DATA_SHIFT |
+ ALX_MDIO_START | ALX_MDIO_MODE_EXT;
+ } else {
+ val = ALX_MDIO_SPRES_PRMBL |
+ clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+ reg << ALX_MDIO_REG_SHIFT |
+ phy_data << ALX_MDIO_DATA_SHIFT |
+ ALX_MDIO_START;
+ }
+ alx_write_mem32(hw, ALX_MDIO, val);
+
+ return alx_wait_mdio_idle(hw);
+}
+
+static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+ return alx_read_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+ return alx_write_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+ return alx_read_phy_core(hw, true, dev, reg, pdata);
+}
+
+static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+ return alx_write_phy_core(hw, true, dev, reg, data);
+}
+
+static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+ int err;
+
+ err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+ if (err)
+ return err;
+
+ return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
+}
+
+static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+ int err;
+
+ err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+ if (err)
+ return err;
+
+ return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
+}
+
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_reg(hw, reg, phy_data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_reg(hw, reg, phy_data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_ext(hw, dev, reg, pdata);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_ext(hw, dev, reg, data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_read_phy_dbg(hw, reg, pdata);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+ int err;
+
+ spin_lock(&hw->mdio_lock);
+ err = __alx_write_phy_dbg(hw, reg, data);
+ spin_unlock(&hw->mdio_lock);
+
+ return err;
+}
+
+static u16 alx_get_phy_config(struct alx_hw *hw)
+{
+ u32 val;
+ u16 phy_val;
+
+ val = alx_read_mem32(hw, ALX_PHY_CTRL);
+ /* phy in reset */
+ if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
+ return ALX_DRV_PHY_UNKNOWN;
+
+ val = alx_read_mem32(hw, ALX_DRV);
+ val = ALX_GET_FIELD(val, ALX_DRV_PHY);
+ if (ALX_DRV_PHY_UNKNOWN == val)
+ return ALX_DRV_PHY_UNKNOWN;
+
+ alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
+ if (ALX_PHY_INITED == phy_val)
+ return val;
+
+ return ALX_DRV_PHY_UNKNOWN;
+}
+
+static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
+{
+ u32 read;
+ int i;
+
+ for (i = 0; i < ALX_SLD_MAX_TO; i++) {
+ read = alx_read_mem32(hw, reg);
+ if ((read & wait) == 0) {
+ if (val)
+ *val = read;
+ return true;
+ }
+ mdelay(1);
+ }
+
+ return false;
+}
+
+static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
+{
+ u32 mac0, mac1;
+
+ mac0 = alx_read_mem32(hw, ALX_STAD0);
+ mac1 = alx_read_mem32(hw, ALX_STAD1);
+
+ /* addr should be big-endian */
+ *(__be32 *)(addr + 2) = cpu_to_be32(mac0);
+ *(__be16 *)addr = cpu_to_be16(mac1);
+
+ return is_valid_ether_addr(addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+ u32 val;
+
+ /* try to get it from register first */
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+
+ /* try to load from efuse */
+ if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
+ return -EIO;
+ alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
+ if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
+ return -EIO;
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+
+ /* try to load from flash/eeprom (if present) */
+ val = alx_read_mem32(hw, ALX_EFLD);
+ if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
+ if (!alx_wait_reg(hw, ALX_EFLD,
+ ALX_EFLD_STAT | ALX_EFLD_START, &val))
+ return -EIO;
+ alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
+ if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
+ return -EIO;
+ if (alx_read_macaddr(hw, addr))
+ return 0;
+ }
+
+ return -EIO;
+}
+
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
+{
+ u32 val;
+
+ /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
+ val = be32_to_cpu(*(__be32 *)(addr + 2));
+ alx_write_mem32(hw, ALX_STAD0, val);
+ val = be16_to_cpu(*(__be16 *)addr);
+ alx_write_mem32(hw, ALX_STAD1, val);
+}
+
+static void alx_enable_osc(struct alx_hw *hw)
+{
+ u32 val;
+
+ /* rising edge */
+ val = alx_read_mem32(hw, ALX_MISC);
+ alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+}
+
+static void alx_reset_osc(struct alx_hw *hw, u8 rev)
+{
+ u32 val, val2;
+
+ /* clear Internal OSC settings, switching OSC by hw itself */
+ val = alx_read_mem32(hw, ALX_MISC3);
+ alx_write_mem32(hw, ALX_MISC3,
+ (val & ~ALX_MISC3_25M_BY_SW) |
+ ALX_MISC3_25M_NOTO_INTNL);
+
+ /* 25M clk from chipset may be unstable 1s after de-assert of
+ * PERST, driver need re-calibrate before enter Sleep for WoL
+ */
+ val = alx_read_mem32(hw, ALX_MISC);
+ if (rev >= ALX_REV_B0) {
+ /* restore over current protection def-val,
+ * this val could be reset by MAC-RST
+ */
+ ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
+ /* a 0->1 change will update the internal val of osc */
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ alx_write_mem32(hw, ALX_MISC, val);
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+ /* hw will automatically dis OSC after cab. */
+ val2 = alx_read_mem32(hw, ALX_MSIC2);
+ val2 &= ~ALX_MSIC2_CALB_START;
+ alx_write_mem32(hw, ALX_MSIC2, val2);
+ alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
+ } else {
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ /* disable isolate for rev A devices */
+ if (alx_is_rev_a(rev))
+ val &= ~ALX_MISC_ISO_EN;
+
+ alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+ alx_write_mem32(hw, ALX_MISC, val);
+ }
+
+ udelay(20);
+}
+
+static int alx_stop_mac(struct alx_hw *hw)
+{
+ u32 rxq, txq, val;
+ u16 i;
+
+ rxq = alx_read_mem32(hw, ALX_RXQ0);
+ alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
+ txq = alx_read_mem32(hw, ALX_TXQ0);
+ alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
+
+ udelay(40);
+
+ hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+ for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MAC_STS);
+ if (!(val & ALX_MAC_STS_IDLE))
+ return 0;
+ udelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int alx_reset_mac(struct alx_hw *hw)
+{
+ u32 val, pmctrl;
+ int i, ret;
+ u8 rev;
+ bool a_cr;
+
+ pmctrl = 0;
+ rev = alx_hw_revision(hw);
+ a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
+
+ /* disable all interrupts, RXQ/TXQ */
+ alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
+ alx_write_mem32(hw, ALX_IMR, 0);
+ alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+
+ ret = alx_stop_mac(hw);
+ if (ret)
+ return ret;
+
+ /* mac reset workaroud */
+ alx_write_mem32(hw, ALX_RFD_PIDX, 1);
+
+ /* dis l0s/l1 before mac reset */
+ if (a_cr) {
+ pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+ if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+ alx_write_mem32(hw, ALX_PMCTRL,
+ pmctrl & ~(ALX_PMCTRL_L1_EN |
+ ALX_PMCTRL_L0S_EN));
+ }
+
+ /* reset whole mac safely */
+ val = alx_read_mem32(hw, ALX_MASTER);
+ alx_write_mem32(hw, ALX_MASTER,
+ val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
+
+ /* make sure it's real idle */
+ udelay(10);
+ for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_RFD_PIDX);
+ if (val == 0)
+ break;
+ udelay(10);
+ }
+ for (; i < ALX_DMA_MAC_RST_TO; i++) {
+ val = alx_read_mem32(hw, ALX_MASTER);
+ if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
+ break;
+ udelay(10);
+ }
+ if (i == ALX_DMA_MAC_RST_TO)
+ return -EIO;
+ udelay(10);
+
+ if (a_cr) {
+ alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
+ /* restore l0s / l1 */
+ if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+ alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+ }
+
+ alx_reset_osc(hw, rev);
+
+ /* clear Internal OSC settings, switching OSC by hw itself,
+ * disable isolate for rev A devices
+ */
+ val = alx_read_mem32(hw, ALX_MISC3);
+ alx_write_mem32(hw, ALX_MISC3,
+ (val & ~ALX_MISC3_25M_BY_SW) |
+ ALX_MISC3_25M_NOTO_INTNL);
+ val = alx_read_mem32(hw, ALX_MISC);
+ val &= ~ALX_MISC_INTNLOSC_OPEN;
+ if (alx_is_rev_a(rev))
+ val &= ~ALX_MISC_ISO_EN;
+ alx_write_mem32(hw, ALX_MISC, val);
+ udelay(20);
+
+ /* driver control speed/duplex, hash-alg */
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+ val = alx_read_mem32(hw, ALX_SERDES);
+ alx_write_mem32(hw, ALX_SERDES,
+ val | ALX_SERDES_MACCLK_SLWDWN |
+ ALX_SERDES_PHYCLK_SLWDWN);
+
+ return 0;
+}
+
+void alx_reset_phy(struct alx_hw *hw)
+{
+ int i;
+ u32 val;
+ u16 phy_val;
+
+ /* (DSP)reset PHY core */
+ val = alx_read_mem32(hw, ALX_PHY_CTRL);
+ val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
+ ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
+ ALX_PHY_CTRL_CLS);
+ val |= ALX_PHY_CTRL_RST_ANALOG;
+
+ val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
+ alx_write_mem32(hw, ALX_PHY_CTRL, val);
+ udelay(10);
+ alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
+
+ for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
+ udelay(10);
+
+ /* phy power saving & hib */
+ alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
+ ALX_SYSMODCTRL_IECHOADJ_DEF);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
+ ALX_VDRVBIAS_DEF);
+
+ /* EEE advertisement */
+ val = alx_read_mem32(hw, ALX_LPI_CTRL);
+ alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
+
+ /* phy power saving */
+ alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+ phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
+ /* rtl8139c, 120m issue */
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
+ ALX_MIIEXT_NLP78_120M_DEF);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
+ ALX_MIIEXT_S3DIG10_DEF);
+
+ if (hw->lnk_patch) {
+ /* Turn off half amplitude */
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+ phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
+ /* Turn off Green feature */
+ alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+ phy_val | ALX_GREENCFG2_BP_GREEN);
+ /* Turn off half Bias */
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+ phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
+ }
+
+ /* set phy interrupt mask */
+ alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
+}
+
+#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
+void alx_reset_pcie(struct alx_hw *hw)
+{
+ u8 rev = alx_hw_revision(hw);
+ u32 val;
+ u16 val16;
+
+ /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+ pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
+ if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
+ val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
+ }
+
+ /* clear WoL setting/status */
+ val = alx_read_mem32(hw, ALX_WOL0);
+ alx_write_mem32(hw, ALX_WOL0, 0);
+
+ val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+ alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
+
+ /* mask some pcie error bits */
+ val = alx_read_mem32(hw, ALX_UE_SVRT);
+ val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
+ alx_write_mem32(hw, ALX_UE_SVRT, val);
+
+ /* wol 25M & pclk */
+ val = alx_read_mem32(hw, ALX_MASTER);
+ if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
+ if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+ (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
+ alx_write_mem32(hw, ALX_MASTER,
+ val | ALX_MASTER_PCLKSEL_SRDS |
+ ALX_MASTER_WAKEN_25M);
+ } else {
+ if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+ (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
+ alx_write_mem32(hw, ALX_MASTER,
+ (val & ~ALX_MASTER_PCLKSEL_SRDS) |
+ ALX_MASTER_WAKEN_25M);
+ }
+
+ /* ASPM setting */
+ alx_enable_aspm(hw, true, true);
+
+ udelay(10);
+}
+
+void alx_start_mac(struct alx_hw *hw)
+{
+ u32 mac, txq, rxq;
+
+ rxq = alx_read_mem32(hw, ALX_RXQ0);
+ alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
+ txq = alx_read_mem32(hw, ALX_TXQ0);
+ alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
+
+ mac = hw->rx_ctrl;
+ if (hw->link_speed % 10 == DUPLEX_FULL)
+ mac |= ALX_MAC_CTRL_FULLD;
+ else
+ mac &= ~ALX_MAC_CTRL_FULLD;
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+ hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
+ ALX_MAC_CTRL_SPEED_10_100);
+ mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
+ hw->rx_ctrl = mac;
+ alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+}
+
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
+{
+ if (fc & ALX_FC_RX)
+ hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
+ else
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
+
+ if (fc & ALX_FC_TX)
+ hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
+ else
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+ u32 pmctrl;
+ u8 rev = alx_hw_revision(hw);
+
+ pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
+ ALX_PMCTRL_LCKDET_TIMER_DEF);
+ pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
+ ALX_PMCTRL_L1_CLKSW_EN |
+ ALX_PMCTRL_L1_SRDSRX_PWD;
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
+ ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
+ pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
+ ALX_PMCTRL_L1_SRDSPLL_EN |
+ ALX_PMCTRL_L1_BUFSRX_EN |
+ ALX_PMCTRL_SADLY_EN |
+ ALX_PMCTRL_HOTRST_WTEN|
+ ALX_PMCTRL_L0S_EN |
+ ALX_PMCTRL_L1_EN |
+ ALX_PMCTRL_ASPM_FCEN |
+ ALX_PMCTRL_TXL1_AFTER_L0S |
+ ALX_PMCTRL_RXL1_AFTER_L0S);
+ if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
+ pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
+
+ if (l0s_en)
+ pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
+ if (l1_en)
+ pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
+
+ alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+}
+
+
+static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
+{
+ u32 cfg = 0;
+
+ if (ethadv_cfg & ADVERTISED_Autoneg) {
+ cfg |= ALX_DRV_PHY_AUTO;
+ if (ethadv_cfg & ADVERTISED_10baseT_Half)
+ cfg |= ALX_DRV_PHY_10;
+ if (ethadv_cfg & ADVERTISED_10baseT_Full)
+ cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_100baseT_Half)
+ cfg |= ALX_DRV_PHY_100;
+ if (ethadv_cfg & ADVERTISED_100baseT_Full)
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_1000baseT_Half)
+ cfg |= ALX_DRV_PHY_1000;
+ if (ethadv_cfg & ADVERTISED_1000baseT_Full)
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ if (ethadv_cfg & ADVERTISED_Pause)
+ cfg |= ADVERTISE_PAUSE_CAP;
+ if (ethadv_cfg & ADVERTISED_Asym_Pause)
+ cfg |= ADVERTISE_PAUSE_ASYM;
+ } else {
+ switch (ethadv_cfg) {
+ case ADVERTISED_10baseT_Half:
+ cfg |= ALX_DRV_PHY_10;
+ break;
+ case ADVERTISED_100baseT_Half:
+ cfg |= ALX_DRV_PHY_100;
+ break;
+ case ADVERTISED_10baseT_Full:
+ cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+ break;
+ case ADVERTISED_100baseT_Full:
+ cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+ break;
+ }
+ }
+
+ return cfg;
+}
+
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
+{
+ u16 adv, giga, cr;
+ u32 val;
+ int err = 0;
+
+ alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
+ val = alx_read_mem32(hw, ALX_DRV);
+ ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
+
+ if (ethadv & ADVERTISED_Autoneg) {
+ adv = ADVERTISE_CSMA;
+ adv |= ethtool_adv_to_mii_adv_t(ethadv);
+
+ if (flowctrl & ALX_FC_ANEG) {
+ if (flowctrl & ALX_FC_RX) {
+ adv |= ADVERTISED_Pause;
+ if (!(flowctrl & ALX_FC_TX))
+ adv |= ADVERTISED_Asym_Pause;
+ } else if (flowctrl & ALX_FC_TX) {
+ adv |= ADVERTISED_Asym_Pause;
+ }
+ }
+ giga = 0;
+ if (alx_hw_giga(hw))
+ giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
+
+ cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+
+ if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
+ alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
+ alx_write_phy_reg(hw, MII_BMCR, cr))
+ err = -EBUSY;
+ } else {
+ cr = BMCR_RESET;
+ if (ethadv == ADVERTISED_100baseT_Half ||
+ ethadv == ADVERTISED_100baseT_Full)
+ cr |= BMCR_SPEED100;
+ if (ethadv == ADVERTISED_10baseT_Full ||
+ ethadv == ADVERTISED_100baseT_Full)
+ cr |= BMCR_FULLDPLX;
+
+ err = alx_write_phy_reg(hw, MII_BMCR, cr);
+ }
+
+ if (!err) {
+ alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
+ val |= ethadv_to_hw_cfg(hw, ethadv);
+ }
+
+ alx_write_mem32(hw, ALX_DRV, val);
+
+ return err;
+}
+
+
+void alx_post_phy_link(struct alx_hw *hw)
+{
+ u16 phy_val, len, agc;
+ u8 revid = alx_hw_revision(hw);
+ bool adj_th = revid == ALX_REV_B0;
+ int speed;
+
+ if (hw->link_speed == SPEED_UNKNOWN)
+ speed = SPEED_UNKNOWN;
+ else
+ speed = hw->link_speed - hw->link_speed % 10;
+
+ if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
+ return;
+
+ /* 1000BT/AZ, wrong cable length */
+ if (speed != SPEED_UNKNOWN) {
+ alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
+ &phy_val);
+ len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
+ agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
+
+ if ((speed == SPEED_1000 &&
+ (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
+ (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
+ (speed == SPEED_100 &&
+ (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
+ (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+ ALX_AZ_ANADECT_LONG);
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val | ALX_AFE_10BT_100M_TH);
+ } else {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+ ALX_AZ_ANADECT_DEF);
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
+ ALX_MIIEXT_AFE, &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val & ~ALX_AFE_10BT_100M_TH);
+ }
+
+ /* threshold adjust */
+ if (adj_th && hw->lnk_patch) {
+ if (speed == SPEED_100) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+ ALX_MSE16DB_UP);
+ } else if (speed == SPEED_1000) {
+ /*
+ * Giga link threshold, raise the tolerance of
+ * noise 50%
+ */
+ alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+ &phy_val);
+ ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+ ALX_MSE20DB_TH_HI);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+ phy_val);
+ }
+ }
+ } else {
+ alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ &phy_val);
+ alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+ phy_val & ~ALX_AFE_10BT_100M_TH);
+
+ if (adj_th && hw->lnk_patch) {
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+ ALX_MSE16DB_DOWN);
+ alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
+ ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+ ALX_MSE20DB_TH_DEF);
+ alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
+ }
+ }
+}
+
+
+/* NOTE:
+ * 1. phy link must be established before calling this function
+ * 2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+int alx_pre_suspend(struct alx_hw *hw, int speed)
+{
+ u32 master, mac, phy, val;
+ int err = 0;
+
+ master = alx_read_mem32(hw, ALX_MASTER);
+ master &= ~ALX_MASTER_PCLKSEL_SRDS;
+ mac = hw->rx_ctrl;
+ /* 10/100 half */
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED, ALX_MAC_CTRL_SPEED_10_100);
+ mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+
+ phy = alx_read_mem32(hw, ALX_PHY_CTRL);
+ phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
+ phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
+ ALX_PHY_CTRL_HIB_EN;
+
+ /* without any activity */
+ if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
+ err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+ if (err)
+ return err;
+ phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
+ } else {
+ if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
+ mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
+ if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
+ mac |= ALX_MAC_CTRL_TX_EN;
+ if (speed % 10 == DUPLEX_FULL)
+ mac |= ALX_MAC_CTRL_FULLD;
+ if (speed >= SPEED_1000)
+ ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+ ALX_MAC_CTRL_SPEED_1000);
+ phy |= ALX_PHY_CTRL_DSPRST_OUT;
+ err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
+ ALX_MIIEXT_S3DIG10,
+ ALX_MIIEXT_S3DIG10_SL);
+ if (err)
+ return err;
+ }
+
+ alx_enable_osc(hw);
+ hw->rx_ctrl = mac;
+ alx_write_mem32(hw, ALX_MASTER, master);
+ alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+ alx_write_mem32(hw, ALX_PHY_CTRL, phy);
+
+ /* set val of PDLL D3PLLOFF */
+ val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+ val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
+ alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
+
+ return 0;
+}
+
+bool alx_phy_configured(struct alx_hw *hw)
+{
+ u32 cfg, hw_cfg;
+
+ cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
+ cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
+ hw_cfg = alx_get_phy_config(hw);
+
+ if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
+ return false;
+
+ return cfg == hw_cfg;
+}
+
+int alx_get_phy_link(struct alx_hw *hw, int *speed)
+{
+ struct pci_dev *pdev = hw->pdev;
+ u16 bmsr, giga;
+ int err;
+
+ err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+ if (err)
+ return err;
+
+ err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+ if (err)
+ return err;
+
+ if (!(bmsr & BMSR_LSTATUS)) {
+ *speed = SPEED_UNKNOWN;
+ return 0;
+ }
+
+ /* speed/duplex result is saved in PHY Specific Status Register */
+ err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
+ if (err)
+ return err;
+
+ if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
+ goto wrong_speed;
+
+ switch (giga & ALX_GIGA_PSSR_SPEED) {
+ case ALX_GIGA_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case ALX_GIGA_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case ALX_GIGA_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ goto wrong_speed;
+ }
+
+ *speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+ return 1;
+
+wrong_speed:
+ dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
+ return -EINVAL;
+}
+
+int alx_clear_phy_intr(struct alx_hw *hw)
+{
+ u16 isr;
+
+ /* clear interrupt status by reading it */
+ return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
+}
+
+int alx_config_wol(struct alx_hw *hw)
+{
+ u32 wol = 0;
+ int err = 0;
+
+ /* turn on magic packet event */
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+ wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
+
+ /* turn on link up event */
+ if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
+ wol |= ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
+ /* only link up can wake up */
+ err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
+ }
+ alx_write_mem32(hw, ALX_WOL0, wol);
+
+ return err;
+}
+
+void alx_disable_rss(struct alx_hw *hw)
+{
+ u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
+
+ ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
+ alx_write_mem32(hw, ALX_RXQ0, ctrl);
+}
+
+void alx_configure_basic(struct alx_hw *hw)
+{
+ u32 val, raw_mtu, max_payload;
+ u16 val16;
+ u8 chip_rev = alx_hw_revision(hw);
+
+ alx_set_macaddr(hw, hw->mac_addr);
+
+ alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
+
+ /* idle timeout to switch clk_125M */
+ if (chip_rev >= ALX_REV_B0)
+ alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
+ ALX_IDLE_DECISN_TIMER_DEF);
+
+ alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
+
+ val = alx_read_mem32(hw, ALX_MASTER);
+ val |= ALX_MASTER_IRQMOD2_EN |
+ ALX_MASTER_IRQMOD1_EN |
+ ALX_MASTER_SYSALVTIMER_EN;
+ alx_write_mem32(hw, ALX_MASTER, val);
+ alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
+ (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
+ /* intr re-trig timeout */
+ alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
+ /* tpd threshold to trig int */
+ alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
+ alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
+
+ raw_mtu = hw->mtu + ETH_HLEN;
+ alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
+ if (raw_mtu > ALX_MTU_JUMBO_TH)
+ hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
+
+ if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
+ val = (raw_mtu + 8 + 7) >> 3;
+ else
+ val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
+ alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
+
+ max_payload = pcie_get_readrq(hw->pdev) >> 8;
+ /*
+ * if BIOS had changed the default dma read max length,
+ * restore it to default value
+ */
+ if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
+ pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
+
+ val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
+ ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
+ ALX_TXQ0_SUPT_IPOPT |
+ ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
+ alx_write_mem32(hw, ALX_TXQ0, val);
+ val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
+ ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
+ ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
+ ALX_HQTPD_BURST_EN;
+ alx_write_mem32(hw, ALX_HQTPD, val);
+
+ /* rxq, flow control */
+ val = alx_read_mem32(hw, ALX_SRAM5);
+ val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
+ if (val > ALX_SRAM_RXF_LEN_8K) {
+ val16 = ALX_MTU_STD_ALGN >> 3;
+ val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
+ } else {
+ val16 = ALX_MTU_STD_ALGN >> 3;
+ val = (val - ALX_MTU_STD_ALGN) >> 3;
+ }
+ alx_write_mem32(hw, ALX_RXQ2,
+ val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
+ val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
+ val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
+ ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
+ ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
+ ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
+ ALX_RXQ0_IPV6_PARSE_EN;
+
+ if (alx_hw_giga(hw))
+ ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
+ ALX_RXQ0_ASPM_THRESH_100M);
+
+ alx_write_mem32(hw, ALX_RXQ0, val);
+
+ val = alx_read_mem32(hw, ALX_DMA);
+ val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
+ ALX_DMA_RREQ_PRI_DATA |
+ max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
+ ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
+ ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
+ (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
+ alx_write_mem32(hw, ALX_DMA, val);
+
+ /* default multi-tx-q weights */
+ val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
+ 4 << ALX_WRR_PRI0_SHIFT |
+ 4 << ALX_WRR_PRI1_SHIFT |
+ 4 << ALX_WRR_PRI2_SHIFT |
+ 4 << ALX_WRR_PRI3_SHIFT;
+ alx_write_mem32(hw, ALX_WRR, val);
+}
+
+static inline u32 alx_speed_to_ethadv(int speed)
+{
+ switch (speed) {
+ case SPEED_1000 + DUPLEX_FULL:
+ return ADVERTISED_1000baseT_Full;
+ case SPEED_100 + DUPLEX_FULL:
+ return ADVERTISED_100baseT_Full;
+ case SPEED_100 + DUPLEX_HALF:
+ return ADVERTISED_10baseT_Half;
+ case SPEED_10 + DUPLEX_FULL:
+ return ADVERTISED_10baseT_Full;
+ case SPEED_10 + DUPLEX_HALF:
+ return ADVERTISED_10baseT_Half;
+ default:
+ return 0;
+ }
+}
+
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
+{
+ int i, err, spd;
+ u16 lpa;
+
+ err = alx_get_phy_link(hw, &spd);
+ if (err < 0)
+ return err;
+
+ if (spd == SPEED_UNKNOWN)
+ return 0;
+
+ err = alx_read_phy_reg(hw, MII_LPA, &lpa);
+ if (err)
+ return err;
+
+ if (!(lpa & LPA_LPACK)) {
+ *speed = spd;
+ return 0;
+ }
+
+ if (lpa & LPA_10FULL)
+ *speed = SPEED_10 + DUPLEX_FULL;
+ else if (lpa & LPA_10HALF)
+ *speed = SPEED_10 + DUPLEX_HALF;
+ else if (lpa & LPA_100FULL)
+ *speed = SPEED_100 + DUPLEX_FULL;
+ else
+ *speed = SPEED_100 + DUPLEX_HALF;
+
+ if (*speed != spd) {
+ err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+ if (err)
+ return err;
+ err = alx_setup_speed_duplex(hw,
+ alx_speed_to_ethadv(*speed) |
+ ADVERTISED_Autoneg,
+ ALX_FC_ANEG | ALX_FC_RX |
+ ALX_FC_TX);
+ if (err)
+ return err;
+
+ /* wait for linkup */
+ for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+ int speed2;
+
+ msleep(100);
+
+ err = alx_get_phy_link(hw, &speed2);
+ if (err < 0)
+ return err;
+ if (speed2 != SPEED_UNKNOWN)
+ break;
+ }
+ if (i == ALX_MAX_SETUP_LNK_CYCLE)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+bool alx_get_phy_info(struct alx_hw *hw)
+{
+ u16 devs1, devs2;
+
+ if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
+ alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
+ return false;
+
+ /* since we haven't PMA/PMD status2 register, we can't
+ * use mdio45_probe function for prtad and mmds.
+ * use fixed MMD3 to get mmds.
+ */
+ if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
+ alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
+ return false;
+ hw->mdio.mmds = devs1 | devs2 << 16;
+
+ return true;
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 000000000000..65e723d2172a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_HW_H_
+#define ALX_HW_H_
+#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/pci.h>
+#include "reg.h"
+
+/* Transmit Packet Descriptor, contains 4 32-bit words.
+ *
+ * 31 16 0
+ * +----------------+----------------+
+ * | vlan-tag | buf length |
+ * +----------------+----------------+
+ * | Word 1 |
+ * +----------------+----------------+
+ * | Word 2: buf addr lo |
+ * +----------------+----------------+
+ * | Word 3: buf addr hi |
+ * +----------------+----------------+
+ *
+ * Word 2 and 3 combine to form a 64-bit buffer address
+ *
+ * Word 1 has three forms, depending on the state of bit 8/12/13:
+ * if bit8 =='1', the definition is just for custom checksum offload.
+ * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
+ * for the skb is special for LSO V2, Word 2 become total skb length ,
+ * Word 3 is meaningless.
+ * other condition, the definition is for general skb or ip/tcp/udp
+ * checksum or LSO(TSO) offload.
+ *
+ * Here is the depiction:
+ *
+ * 0-+ 0-+
+ * 1 | 1 |
+ * 2 | 2 |
+ * 3 | Payload offset 3 | L4 header offset
+ * 4 | (7:0) 4 | (7:0)
+ * 5 | 5 |
+ * 6 | 6 |
+ * 7-+ 7-+
+ * 8 Custom csum enable = 1 8 Custom csum enable = 0
+ * 9 General IPv4 checksum 9 General IPv4 checksum
+ * 10 General TCP checksum 10 General TCP checksum
+ * 11 General UDP checksum 11 General UDP checksum
+ * 12 Large Send Segment enable 12 Large Send Segment enable
+ * 13 Large Send Segment type 13 Large Send Segment type
+ * 14 VLAN tagged 14 VLAN tagged
+ * 15 Insert VLAN tag 15 Insert VLAN tag
+ * 16 IPv4 packet 16 IPv4 packet
+ * 17 Ethernet frame type 17 Ethernet frame type
+ * 18-+ 18-+
+ * 19 | 19 |
+ * 20 | 20 |
+ * 21 | Custom csum offset 21 |
+ * 22 | (25:18) 22 |
+ * 23 | 23 | MSS (30:18)
+ * 24 | 24 |
+ * 25-+ 25 |
+ * 26-+ 26 |
+ * 27 | 27 |
+ * 28 | Reserved 28 |
+ * 29 | 29 |
+ * 30-+ 30-+
+ * 31 End of packet 31 End of packet
+ */
+struct alx_txd {
+ __le16 len;
+ __le16 vlan_tag;
+ __le32 word1;
+ union {
+ __le64 addr;
+ struct {
+ __le32 pkt_len;
+ __le32 resvd;
+ } l;
+ } adrl;
+} __packed;
+
+/* tpd word 1 */
+#define TPD_CXSUMSTART_MASK 0x00FF
+#define TPD_CXSUMSTART_SHIFT 0
+#define TPD_L4HDROFFSET_MASK 0x00FF
+#define TPD_L4HDROFFSET_SHIFT 0
+#define TPD_CXSUM_EN_MASK 0x0001
+#define TPD_CXSUM_EN_SHIFT 8
+#define TPD_IP_XSUM_MASK 0x0001
+#define TPD_IP_XSUM_SHIFT 9
+#define TPD_TCP_XSUM_MASK 0x0001
+#define TPD_TCP_XSUM_SHIFT 10
+#define TPD_UDP_XSUM_MASK 0x0001
+#define TPD_UDP_XSUM_SHIFT 11
+#define TPD_LSO_EN_MASK 0x0001
+#define TPD_LSO_EN_SHIFT 12
+#define TPD_LSO_V2_MASK 0x0001
+#define TPD_LSO_V2_SHIFT 13
+#define TPD_VLTAGGED_MASK 0x0001
+#define TPD_VLTAGGED_SHIFT 14
+#define TPD_INS_VLTAG_MASK 0x0001
+#define TPD_INS_VLTAG_SHIFT 15
+#define TPD_IPV4_MASK 0x0001
+#define TPD_IPV4_SHIFT 16
+#define TPD_ETHTYPE_MASK 0x0001
+#define TPD_ETHTYPE_SHIFT 17
+#define TPD_CXSUMOFFSET_MASK 0x00FF
+#define TPD_CXSUMOFFSET_SHIFT 18
+#define TPD_MSS_MASK 0x1FFF
+#define TPD_MSS_SHIFT 18
+#define TPD_EOP_MASK 0x0001
+#define TPD_EOP_SHIFT 31
+
+#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
+
+/* Receive Free Descriptor */
+struct alx_rfd {
+ __le64 addr; /* data buffer address, length is
+ * declared in register --- every
+ * buffer has the same size
+ */
+} __packed;
+
+/* Receive Return Descriptor, contains 4 32-bit words.
+ *
+ * 31 16 0
+ * +----------------+----------------+
+ * | Word 0 |
+ * +----------------+----------------+
+ * | Word 1: RSS Hash value |
+ * +----------------+----------------+
+ * | Word 2 |
+ * +----------------+----------------+
+ * | Word 3 |
+ * +----------------+----------------+
+ *
+ * Word 0 depiction & Word 2 depiction:
+ *
+ * 0--+ 0--+
+ * 1 | 1 |
+ * 2 | 2 |
+ * 3 | 3 |
+ * 4 | 4 |
+ * 5 | 5 |
+ * 6 | 6 |
+ * 7 | IP payload checksum 7 | VLAN tag
+ * 8 | (15:0) 8 | (15:0)
+ * 9 | 9 |
+ * 10 | 10 |
+ * 11 | 11 |
+ * 12 | 12 |
+ * 13 | 13 |
+ * 14 | 14 |
+ * 15-+ 15-+
+ * 16-+ 16-+
+ * 17 | Number of RFDs 17 |
+ * 18 | (19:16) 18 |
+ * 19-+ 19 | Protocol ID
+ * 20-+ 20 | (23:16)
+ * 21 | 21 |
+ * 22 | 22 |
+ * 23 | 23-+
+ * 24 | 24 | Reserved
+ * 25 | Start index of RFD-ring 25-+
+ * 26 | (31:20) 26 | RSS Q-num (27:25)
+ * 27 | 27-+
+ * 28 | 28-+
+ * 29 | 29 | RSS Hash algorithm
+ * 30 | 30 | (31:28)
+ * 31-+ 31-+
+ *
+ * Word 3 depiction:
+ *
+ * 0--+
+ * 1 |
+ * 2 |
+ * 3 |
+ * 4 |
+ * 5 |
+ * 6 |
+ * 7 | Packet length (include FCS)
+ * 8 | (13:0)
+ * 9 |
+ * 10 |
+ * 11 |
+ * 12 |
+ * 13-+
+ * 14 L4 Header checksum error
+ * 15 IPv4 checksum error
+ * 16 VLAN tagged
+ * 17-+
+ * 18 | Protocol ID (19:17)
+ * 19-+
+ * 20 Receive error summary
+ * 21 FCS(CRC) error
+ * 22 Frame alignment error
+ * 23 Truncated packet
+ * 24 Runt packet
+ * 25 Incomplete packet due to insufficient rx-desc
+ * 26 Broadcast packet
+ * 27 Multicast packet
+ * 28 Ethernet type (EII or 802.3)
+ * 29 FIFO overflow
+ * 30 Length error (for 802.3, length field mismatch with actual len)
+ * 31 Updated, indicate to driver that this RRD is refreshed.
+ */
+struct alx_rrd {
+ __le32 word0;
+ __le32 rss_hash;
+ __le32 word2;
+ __le32 word3;
+} __packed;
+
+/* rrd word 0 */
+#define RRD_XSUM_MASK 0xFFFF
+#define RRD_XSUM_SHIFT 0
+#define RRD_NOR_MASK 0x000F
+#define RRD_NOR_SHIFT 16
+#define RRD_SI_MASK 0x0FFF
+#define RRD_SI_SHIFT 20
+
+/* rrd word 2 */
+#define RRD_VLTAG_MASK 0xFFFF
+#define RRD_VLTAG_SHIFT 0
+#define RRD_PID_MASK 0x00FF
+#define RRD_PID_SHIFT 16
+/* non-ip packet */
+#define RRD_PID_NONIP 0
+/* ipv4(only) */
+#define RRD_PID_IPV4 1
+/* tcp/ipv6 */
+#define RRD_PID_IPV6TCP 2
+/* tcp/ipv4 */
+#define RRD_PID_IPV4TCP 3
+/* udp/ipv6 */
+#define RRD_PID_IPV6UDP 4
+/* udp/ipv4 */
+#define RRD_PID_IPV4UDP 5
+/* ipv6(only) */
+#define RRD_PID_IPV6 6
+/* LLDP packet */
+#define RRD_PID_LLDP 7
+/* 1588 packet */
+#define RRD_PID_1588 8
+#define RRD_RSSQ_MASK 0x0007
+#define RRD_RSSQ_SHIFT 25
+#define RRD_RSSALG_MASK 0x000F
+#define RRD_RSSALG_SHIFT 28
+#define RRD_RSSALG_TCPV6 0x1
+#define RRD_RSSALG_IPV6 0x2
+#define RRD_RSSALG_TCPV4 0x4
+#define RRD_RSSALG_IPV4 0x8
+
+/* rrd word 3 */
+#define RRD_PKTLEN_MASK 0x3FFF
+#define RRD_PKTLEN_SHIFT 0
+#define RRD_ERR_L4_MASK 0x0001
+#define RRD_ERR_L4_SHIFT 14
+#define RRD_ERR_IPV4_MASK 0x0001
+#define RRD_ERR_IPV4_SHIFT 15
+#define RRD_VLTAGGED_MASK 0x0001
+#define RRD_VLTAGGED_SHIFT 16
+#define RRD_OLD_PID_MASK 0x0007
+#define RRD_OLD_PID_SHIFT 17
+#define RRD_ERR_RES_MASK 0x0001
+#define RRD_ERR_RES_SHIFT 20
+#define RRD_ERR_FCS_MASK 0x0001
+#define RRD_ERR_FCS_SHIFT 21
+#define RRD_ERR_FAE_MASK 0x0001
+#define RRD_ERR_FAE_SHIFT 22
+#define RRD_ERR_TRUNC_MASK 0x0001
+#define RRD_ERR_TRUNC_SHIFT 23
+#define RRD_ERR_RUNT_MASK 0x0001
+#define RRD_ERR_RUNT_SHIFT 24
+#define RRD_ERR_ICMP_MASK 0x0001
+#define RRD_ERR_ICMP_SHIFT 25
+#define RRD_BCAST_MASK 0x0001
+#define RRD_BCAST_SHIFT 26
+#define RRD_MCAST_MASK 0x0001
+#define RRD_MCAST_SHIFT 27
+#define RRD_ETHTYPE_MASK 0x0001
+#define RRD_ETHTYPE_SHIFT 28
+#define RRD_ERR_FIFOV_MASK 0x0001
+#define RRD_ERR_FIFOV_SHIFT 29
+#define RRD_ERR_LEN_MASK 0x0001
+#define RRD_ERR_LEN_SHIFT 30
+#define RRD_UPDATED_MASK 0x0001
+#define RRD_UPDATED_SHIFT 31
+
+
+#define ALX_MAX_SETUP_LNK_CYCLE 50
+
+/* for FlowControl */
+#define ALX_FC_RX 0x01
+#define ALX_FC_TX 0x02
+#define ALX_FC_ANEG 0x04
+
+/* for sleep control */
+#define ALX_SLEEP_WOL_PHY 0x00000001
+#define ALX_SLEEP_WOL_MAGIC 0x00000002
+#define ALX_SLEEP_CIFS 0x00000004
+#define ALX_SLEEP_ACTIVE (ALX_SLEEP_WOL_PHY | \
+ ALX_SLEEP_WOL_MAGIC | \
+ ALX_SLEEP_CIFS)
+
+/* for RSS hash type */
+#define ALX_RSS_HASH_TYPE_IPV4 0x1
+#define ALX_RSS_HASH_TYPE_IPV4_TCP 0x2
+#define ALX_RSS_HASH_TYPE_IPV6 0x4
+#define ALX_RSS_HASH_TYPE_IPV6_TCP 0x8
+#define ALX_RSS_HASH_TYPE_ALL (ALX_RSS_HASH_TYPE_IPV4 | \
+ ALX_RSS_HASH_TYPE_IPV4_TCP | \
+ ALX_RSS_HASH_TYPE_IPV6 | \
+ ALX_RSS_HASH_TYPE_IPV6_TCP)
+#define ALX_DEF_RXBUF_SIZE 1536
+#define ALX_MAX_JUMBO_PKT_SIZE (9*1024)
+#define ALX_MAX_TSO_PKT_SIZE (7*1024)
+#define ALX_MAX_FRAME_SIZE ALX_MAX_JUMBO_PKT_SIZE
+#define ALX_MIN_FRAME_SIZE 68
+#define ALX_RAW_MTU(_mtu) (_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+#define ALX_MAX_RX_QUEUES 8
+#define ALX_MAX_TX_QUEUES 4
+#define ALX_MAX_HANDLED_INTRS 5
+
+#define ALX_ISR_MISC (ALX_ISR_PCIE_LNKDOWN | \
+ ALX_ISR_DMAW | \
+ ALX_ISR_DMAR | \
+ ALX_ISR_SMB | \
+ ALX_ISR_MANU | \
+ ALX_ISR_TIMER)
+
+#define ALX_ISR_FATAL (ALX_ISR_PCIE_LNKDOWN | \
+ ALX_ISR_DMAW | ALX_ISR_DMAR)
+
+#define ALX_ISR_ALERT (ALX_ISR_RXF_OV | \
+ ALX_ISR_TXF_UR | \
+ ALX_ISR_RFD_UR)
+
+#define ALX_ISR_ALL_QUEUES (ALX_ISR_TX_Q0 | \
+ ALX_ISR_TX_Q1 | \
+ ALX_ISR_TX_Q2 | \
+ ALX_ISR_TX_Q3 | \
+ ALX_ISR_RX_Q0 | \
+ ALX_ISR_RX_Q1 | \
+ ALX_ISR_RX_Q2 | \
+ ALX_ISR_RX_Q3 | \
+ ALX_ISR_RX_Q4 | \
+ ALX_ISR_RX_Q5 | \
+ ALX_ISR_RX_Q6 | \
+ ALX_ISR_RX_Q7)
+
+/* maximum interrupt vectors for msix */
+#define ALX_MAX_MSIX_INTRS 16
+
+#define ALX_GET_FIELD(_data, _field) \
+ (((_data) >> _field ## _SHIFT) & _field ## _MASK)
+
+#define ALX_SET_FIELD(_data, _field, _value) do { \
+ (_data) &= ~(_field ## _MASK << _field ## _SHIFT); \
+ (_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
+ } while (0)
+
+struct alx_hw {
+ struct pci_dev *pdev;
+ u8 __iomem *hw_addr;
+
+ /* current & permanent mac addr */
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+
+ u16 mtu;
+ u16 imt;
+ u8 dma_chnl;
+ u8 max_dma_chnl;
+ /* tpd threshold to trig INT */
+ u32 ith_tpd;
+ u32 rx_ctrl;
+ u32 mc_hash[2];
+
+ u32 smb_timer;
+ /* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
+ int link_speed;
+
+ /* auto-neg advertisement or force mode config */
+ u32 adv_cfg;
+ u8 flowctrl;
+
+ u32 sleep_ctrl;
+
+ spinlock_t mdio_lock;
+ struct mdio_if_info mdio;
+ u16 phy_id[2];
+
+ /* PHY link patch flag */
+ bool lnk_patch;
+};
+
+static inline int alx_hw_revision(struct alx_hw *hw)
+{
+ return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
+}
+
+static inline bool alx_hw_with_cr(struct alx_hw *hw)
+{
+ return hw->pdev->revision & 1;
+}
+
+static inline bool alx_hw_giga(struct alx_hw *hw)
+{
+ return hw->pdev->device & 1;
+}
+
+static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
+{
+ writeb(val, hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
+{
+ writew(val, hw->hw_addr + reg);
+}
+
+static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
+{
+ return readw(hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+static inline void alx_post_write(struct alx_hw *hw)
+{
+ readl(hw->hw_addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+void alx_reset_phy(struct alx_hw *hw);
+void alx_reset_pcie(struct alx_hw *hw);
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
+void alx_post_phy_link(struct alx_hw *hw);
+int alx_pre_suspend(struct alx_hw *hw, int speed);
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
+int alx_get_phy_link(struct alx_hw *hw, int *speed);
+int alx_clear_phy_intr(struct alx_hw *hw);
+int alx_config_wol(struct alx_hw *hw);
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
+void alx_start_mac(struct alx_hw *hw);
+int alx_reset_mac(struct alx_hw *hw);
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
+bool alx_phy_configured(struct alx_hw *hw);
+void alx_configure_basic(struct alx_hw *hw);
+void alx_disable_rss(struct alx_hw *hw);
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
+bool alx_get_phy_info(struct alx_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 000000000000..418de8b13165
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ip6_checksum.h>
+#include <linux/crc32.h>
+#include "alx.h"
+#include "hw.h"
+#include "reg.h"
+
+const char alx_drv_name[] = "alx";
+
+
+static void alx_free_txbuf(struct alx_priv *alx, int entry)
+{
+ struct alx_buffer *txb = &alx->txq.bufs[entry];
+
+ if (dma_unmap_len(txb, size)) {
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(txb, dma),
+ dma_unmap_len(txb, size),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(txb, size, 0);
+ }
+
+ if (txb->skb) {
+ dev_kfree_skb_any(txb->skb);
+ txb->skb = NULL;
+ }
+}
+
+static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct sk_buff *skb;
+ struct alx_buffer *cur_buf;
+ dma_addr_t dma;
+ u16 cur, next, count = 0;
+
+ next = cur = rxq->write_idx;
+ if (++next == alx->rx_ringsz)
+ next = 0;
+ cur_buf = &rxq->bufs[cur];
+
+ while (!cur_buf->skb && next != rxq->read_idx) {
+ struct alx_rfd *rfd = &rxq->rfd[cur];
+
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+ if (!skb)
+ break;
+ dma = dma_map_single(&alx->hw.pdev->dev,
+ skb->data, alx->rxbuf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ /* Unfortunately, RX descriptor buffers must be 4-byte
+ * aligned, so we can't use IP alignment.
+ */
+ if (WARN_ON(dma & 3)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ cur_buf->skb = skb;
+ dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
+ dma_unmap_addr_set(cur_buf, dma, dma);
+ rfd->addr = cpu_to_le64(dma);
+
+ cur = next;
+ if (++next == alx->rx_ringsz)
+ next = 0;
+ cur_buf = &rxq->bufs[cur];
+ count++;
+ }
+
+ if (count) {
+ /* flush all updates before updating hardware */
+ wmb();
+ rxq->write_idx = cur;
+ alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
+ }
+
+ return count;
+}
+
+static inline int alx_tpd_avail(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+
+ if (txq->write_idx >= txq->read_idx)
+ return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
+ return txq->read_idx - txq->write_idx - 1;
+}
+
+static bool alx_clean_tx_irq(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ u16 hw_read_idx, sw_read_idx;
+ unsigned int total_bytes = 0, total_packets = 0;
+ int budget = ALX_DEFAULT_TX_WORK;
+
+ sw_read_idx = txq->read_idx;
+ hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
+
+ if (sw_read_idx != hw_read_idx) {
+ while (sw_read_idx != hw_read_idx && budget > 0) {
+ struct sk_buff *skb;
+
+ skb = txq->bufs[sw_read_idx].skb;
+ if (skb) {
+ total_bytes += skb->len;
+ total_packets++;
+ budget--;
+ }
+
+ alx_free_txbuf(alx, sw_read_idx);
+
+ if (++sw_read_idx == alx->tx_ringsz)
+ sw_read_idx = 0;
+ }
+ txq->read_idx = sw_read_idx;
+
+ netdev_completed_queue(alx->dev, total_packets, total_bytes);
+ }
+
+ if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
+ alx_tpd_avail(alx) > alx->tx_ringsz/4)
+ netif_wake_queue(alx->dev);
+
+ return sw_read_idx == hw_read_idx;
+}
+
+static void alx_schedule_link_check(struct alx_priv *alx)
+{
+ schedule_work(&alx->link_check_wk);
+}
+
+static void alx_schedule_reset(struct alx_priv *alx)
+{
+ schedule_work(&alx->reset_wk);
+}
+
+static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_rrd *rrd;
+ struct alx_buffer *rxb;
+ struct sk_buff *skb;
+ u16 length, rfd_cleaned = 0;
+
+ while (budget > 0) {
+ rrd = &rxq->rrd[rxq->rrd_read_idx];
+ if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+ break;
+ rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
+
+ if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ RRD_SI) != rxq->read_idx ||
+ ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+ RRD_NOR) != 1) {
+ alx_schedule_reset(alx);
+ return 0;
+ }
+
+ rxb = &rxq->bufs[rxq->read_idx];
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(rxb, dma),
+ dma_unmap_len(rxb, size),
+ DMA_FROM_DEVICE);
+ dma_unmap_len_set(rxb, size, 0);
+ skb = rxb->skb;
+ rxb->skb = NULL;
+
+ if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
+ rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
+ rrd->word3 = 0;
+ dev_kfree_skb_any(skb);
+ goto next_pkt;
+ }
+
+ length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
+ RRD_PKTLEN) - ETH_FCS_LEN;
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, alx->dev);
+
+ skb_checksum_none_assert(skb);
+ if (alx->dev->features & NETIF_F_RXCSUM &&
+ !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
+ cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
+ switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
+ RRD_PID)) {
+ case RRD_PID_IPV6UDP:
+ case RRD_PID_IPV4UDP:
+ case RRD_PID_IPV4TCP:
+ case RRD_PID_IPV6TCP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ }
+ }
+
+ napi_gro_receive(&alx->napi, skb);
+ budget--;
+
+next_pkt:
+ if (++rxq->read_idx == alx->rx_ringsz)
+ rxq->read_idx = 0;
+ if (++rxq->rrd_read_idx == alx->rx_ringsz)
+ rxq->rrd_read_idx = 0;
+
+ if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
+ rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
+ }
+
+ if (rfd_cleaned)
+ alx_refill_rx_ring(alx, GFP_ATOMIC);
+
+ return budget > 0;
+}
+
+static int alx_poll(struct napi_struct *napi, int budget)
+{
+ struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+ struct alx_hw *hw = &alx->hw;
+ bool complete = true;
+ unsigned long flags;
+
+ complete = alx_clean_tx_irq(alx) &&
+ alx_clean_rx_irq(alx, budget);
+
+ if (!complete)
+ return 1;
+
+ napi_complete(&alx->napi);
+
+ /* enable interrupt */
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+ alx_post_write(hw);
+
+ return 0;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+ struct alx_hw *hw = &alx->hw;
+ bool write_int_mask = false;
+
+ spin_lock(&alx->irq_lock);
+
+ /* ACK interrupt */
+ alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+ intr &= alx->int_mask;
+
+ if (intr & ALX_ISR_FATAL) {
+ netif_warn(alx, hw, alx->dev,
+ "fatal interrupt 0x%x, resetting\n", intr);
+ alx_schedule_reset(alx);
+ goto out;
+ }
+
+ if (intr & ALX_ISR_ALERT)
+ netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
+
+ if (intr & ALX_ISR_PHY) {
+ /* suppress PHY interrupt, because the source
+ * is from PHY internal. only the internal status
+ * is cleared, the interrupt status could be cleared.
+ */
+ alx->int_mask &= ~ALX_ISR_PHY;
+ write_int_mask = true;
+ alx_schedule_link_check(alx);
+ }
+
+ if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
+ napi_schedule(&alx->napi);
+ /* mask rx/tx interrupt, enable them when napi complete */
+ alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+ write_int_mask = true;
+ }
+
+ if (write_int_mask)
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+
+ alx_write_mem32(hw, ALX_ISR, 0);
+
+ out:
+ spin_unlock(&alx->irq_lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msi(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+
+ return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
+}
+
+static irqreturn_t alx_intr_legacy(int irq, void *data)
+{
+ struct alx_priv *alx = data;
+ struct alx_hw *hw = &alx->hw;
+ u32 intr;
+
+ intr = alx_read_mem32(hw, ALX_ISR);
+
+ if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
+ return IRQ_NONE;
+
+ return alx_intr_handle(alx, intr);
+}
+
+static void alx_init_ring_ptrs(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
+
+ alx->rxq.read_idx = 0;
+ alx->rxq.write_idx = 0;
+ alx->rxq.rrd_read_idx = 0;
+ alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
+ alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
+ alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
+ alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
+ alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
+ alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
+
+ alx->txq.read_idx = 0;
+ alx->txq.write_idx = 0;
+ alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
+ alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
+ alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
+
+ /* load these pointers into the chip */
+ alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
+}
+
+static void alx_free_txring_buf(struct alx_priv *alx)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ int i;
+
+ if (!txq->bufs)
+ return;
+
+ for (i = 0; i < alx->tx_ringsz; i++)
+ alx_free_txbuf(alx, i);
+
+ memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
+ memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
+ txq->write_idx = 0;
+ txq->read_idx = 0;
+
+ netdev_reset_queue(alx->dev);
+}
+
+static void alx_free_rxring_buf(struct alx_priv *alx)
+{
+ struct alx_rx_queue *rxq = &alx->rxq;
+ struct alx_buffer *cur_buf;
+ u16 i;
+
+ if (rxq == NULL)
+ return;
+
+ for (i = 0; i < alx->rx_ringsz; i++) {
+ cur_buf = rxq->bufs + i;
+ if (cur_buf->skb) {
+ dma_unmap_single(&alx->hw.pdev->dev,
+ dma_unmap_addr(cur_buf, dma),
+ dma_unmap_len(cur_buf, size),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(cur_buf->skb);
+ cur_buf->skb = NULL;
+ dma_unmap_len_set(cur_buf, size, 0);
+ dma_unmap_addr_set(cur_buf, dma, 0);
+ }
+ }
+
+ rxq->write_idx = 0;
+ rxq->read_idx = 0;
+ rxq->rrd_read_idx = 0;
+}
+
+static void alx_free_buffers(struct alx_priv *alx)
+{
+ alx_free_txring_buf(alx);
+ alx_free_rxring_buf(alx);
+}
+
+static int alx_reinit_rings(struct alx_priv *alx)
+{
+ alx_free_buffers(alx);
+
+ alx_init_ring_ptrs(alx);
+
+ if (!alx_refill_rx_ring(alx, GFP_KERNEL))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
+{
+ u32 crc32, bit, reg;
+
+ crc32 = ether_crc(ETH_ALEN, addr);
+ reg = (crc32 >> 31) & 0x1;
+ bit = (crc32 >> 26) & 0x1F;
+
+ mc_hash[reg] |= BIT(bit);
+}
+
+static void __alx_set_rx_mode(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ struct netdev_hw_addr *ha;
+ u32 mc_hash[2] = {};
+
+ if (!(netdev->flags & IFF_ALLMULTI)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ alx_add_mc_addr(hw, ha->addr, mc_hash);
+
+ alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
+ alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
+ }
+
+ hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
+ if (netdev->flags & IFF_PROMISC)
+ hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
+ if (netdev->flags & IFF_ALLMULTI)
+ hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_set_rx_mode(struct net_device *netdev)
+{
+ __alx_set_rx_mode(netdev);
+}
+
+static int alx_set_mac_address(struct net_device *netdev, void *data)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ struct sockaddr *addr = data;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netdev->addr_assign_type & NET_ADDR_RANDOM)
+ netdev->addr_assign_type ^= NET_ADDR_RANDOM;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+ alx_set_macaddr(hw, hw->mac_addr);
+
+ return 0;
+}
+
+static int alx_alloc_descriptors(struct alx_priv *alx)
+{
+ alx->txq.bufs = kcalloc(alx->tx_ringsz,
+ sizeof(struct alx_buffer),
+ GFP_KERNEL);
+ if (!alx->txq.bufs)
+ return -ENOMEM;
+
+ alx->rxq.bufs = kcalloc(alx->rx_ringsz,
+ sizeof(struct alx_buffer),
+ GFP_KERNEL);
+ if (!alx->rxq.bufs)
+ goto out_free;
+
+ /* physical tx/rx ring descriptors
+ *
+ * Allocate them as a single chunk because they must not cross a
+ * 4G boundary (hardware has a single register for high 32 bits
+ * of addresses only)
+ */
+ alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz +
+ sizeof(struct alx_rfd) * alx->rx_ringsz;
+ alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ &alx->descmem.dma,
+ GFP_KERNEL);
+ if (!alx->descmem.virt)
+ goto out_free;
+
+ alx->txq.tpd = (void *)alx->descmem.virt;
+ alx->txq.tpd_dma = alx->descmem.dma;
+
+ /* alignment requirement for next block */
+ BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
+
+ alx->rxq.rrd =
+ (void *)((u8 *)alx->descmem.virt +
+ sizeof(struct alx_txd) * alx->tx_ringsz);
+ alx->rxq.rrd_dma = alx->descmem.dma +
+ sizeof(struct alx_txd) * alx->tx_ringsz;
+
+ /* alignment requirement for next block */
+ BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
+
+ alx->rxq.rfd =
+ (void *)((u8 *)alx->descmem.virt +
+ sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz);
+ alx->rxq.rfd_dma = alx->descmem.dma +
+ sizeof(struct alx_txd) * alx->tx_ringsz +
+ sizeof(struct alx_rrd) * alx->rx_ringsz;
+
+ return 0;
+out_free:
+ kfree(alx->txq.bufs);
+ kfree(alx->rxq.bufs);
+ return -ENOMEM;
+}
+
+static int alx_alloc_rings(struct alx_priv *alx)
+{
+ int err;
+
+ err = alx_alloc_descriptors(alx);
+ if (err)
+ return err;
+
+ alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+ alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+ alx->tx_ringsz = alx->tx_ringsz;
+
+ netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
+
+ alx_reinit_rings(alx);
+ return 0;
+}
+
+static void alx_free_rings(struct alx_priv *alx)
+{
+ netif_napi_del(&alx->napi);
+ alx_free_buffers(alx);
+
+ kfree(alx->txq.bufs);
+ kfree(alx->rxq.bufs);
+
+ dma_free_coherent(&alx->hw.pdev->dev,
+ alx->descmem.size,
+ alx->descmem.virt,
+ alx->descmem.dma);
+}
+
+static void alx_config_vector_mapping(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+ alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
+ alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
+}
+
+static void alx_irq_enable(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ /* level-1 interrupt switch */
+ alx_write_mem32(hw, ALX_ISR, 0);
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ alx_post_write(hw);
+}
+
+static void alx_irq_disable(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+ alx_write_mem32(hw, ALX_IMR, 0);
+ alx_post_write(hw);
+
+ synchronize_irq(alx->hw.pdev->irq);
+}
+
+static int alx_request_irq(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+ u32 msi_ctrl;
+
+ msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
+
+ if (!pci_enable_msi(alx->hw.pdev)) {
+ alx->msi = true;
+
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
+ msi_ctrl | ALX_MSI_MASK_SEL_LINE);
+ err = request_irq(pdev->irq, alx_intr_msi, 0,
+ alx->dev->name, alx);
+ if (!err)
+ goto out;
+ /* fall back to legacy interrupt */
+ pci_disable_msi(alx->hw.pdev);
+ }
+
+ alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
+ err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+ alx->dev->name, alx);
+out:
+ if (!err)
+ alx_config_vector_mapping(alx);
+ return err;
+}
+
+static void alx_free_irq(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+
+ free_irq(pdev->irq, alx);
+
+ if (alx->msi) {
+ pci_disable_msi(alx->hw.pdev);
+ alx->msi = false;
+ }
+}
+
+static int alx_identify_hw(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ int rev = alx_hw_revision(hw);
+
+ if (rev > ALX_REV_C0)
+ return -EINVAL;
+
+ hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
+
+ return 0;
+}
+
+static int alx_init_sw(struct alx_priv *alx)
+{
+ struct pci_dev *pdev = alx->hw.pdev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+
+ err = alx_identify_hw(alx);
+ if (err) {
+ dev_err(&pdev->dev, "unrecognized chip, aborting\n");
+ return err;
+ }
+
+ alx->hw.lnk_patch =
+ pdev->device == ALX_DEV_ID_AR8161 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
+ pdev->subsystem_device == 0x0091 &&
+ pdev->revision == 0;
+
+ hw->smb_timer = 400;
+ hw->mtu = alx->dev->mtu;
+ alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
+ alx->tx_ringsz = 256;
+ alx->rx_ringsz = 512;
+ hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
+ hw->imt = 200;
+ alx->int_mask = ALX_ISR_MISC;
+ hw->dma_chnl = hw->max_dma_chnl;
+ hw->ith_tpd = alx->tx_ringsz / 3;
+ hw->link_speed = SPEED_UNKNOWN;
+ hw->adv_cfg = ADVERTISED_Autoneg |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_1000baseT_Full;
+ hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
+
+ hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
+ ALX_MAC_CTRL_MHASH_ALG_HI5B |
+ ALX_MAC_CTRL_BRD_EN |
+ ALX_MAC_CTRL_PCRCE |
+ ALX_MAC_CTRL_CRCE |
+ ALX_MAC_CTRL_RXFC_EN |
+ ALX_MAC_CTRL_TXFC_EN |
+ 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
+
+ return err;
+}
+
+
+static netdev_features_t alx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return features;
+}
+
+static void alx_netif_stop(struct alx_priv *alx)
+{
+ alx->dev->trans_start = jiffies;
+ if (netif_carrier_ok(alx->dev)) {
+ netif_carrier_off(alx->dev);
+ netif_tx_disable(alx->dev);
+ napi_disable(&alx->napi);
+ }
+}
+
+static void alx_halt(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_netif_stop(alx);
+ hw->link_speed = SPEED_UNKNOWN;
+
+ alx_reset_mac(hw);
+
+ /* disable l0s/l1 */
+ alx_enable_aspm(hw, false, false);
+ alx_irq_disable(alx);
+ alx_free_buffers(alx);
+}
+
+static void alx_configure(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+
+ alx_configure_basic(hw);
+ alx_disable_rss(hw);
+ __alx_set_rx_mode(alx->dev);
+
+ alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_activate(struct alx_priv *alx)
+{
+ /* hardware setting lost, restore it */
+ alx_reinit_rings(alx);
+ alx_configure(alx);
+
+ /* clear old interrupts */
+ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+ alx_irq_enable(alx);
+
+ alx_schedule_link_check(alx);
+}
+
+static void alx_reinit(struct alx_priv *alx)
+{
+ ASSERT_RTNL();
+
+ alx_halt(alx);
+ alx_activate(alx);
+}
+
+static int alx_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if ((max_frame < ALX_MIN_FRAME_SIZE) ||
+ (max_frame > ALX_MAX_FRAME_SIZE))
+ return -EINVAL;
+
+ if (netdev->mtu == mtu)
+ return 0;
+
+ netdev->mtu = mtu;
+ alx->hw.mtu = mtu;
+ alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
+ ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
+ netdev_update_features(netdev);
+ if (netif_running(netdev))
+ alx_reinit(alx);
+ return 0;
+}
+
+static void alx_netif_start(struct alx_priv *alx)
+{
+ netif_tx_wake_all_queues(alx->dev);
+ napi_enable(&alx->napi);
+ netif_carrier_on(alx->dev);
+}
+
+static int __alx_open(struct alx_priv *alx, bool resume)
+{
+ int err;
+
+ if (!resume)
+ netif_carrier_off(alx->dev);
+
+ err = alx_alloc_rings(alx);
+ if (err)
+ return err;
+
+ alx_configure(alx);
+
+ err = alx_request_irq(alx);
+ if (err)
+ goto out_free_rings;
+
+ /* clear old interrupts */
+ alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+ alx_irq_enable(alx);
+
+ if (!resume)
+ netif_tx_start_all_queues(alx->dev);
+
+ alx_schedule_link_check(alx);
+ return 0;
+
+out_free_rings:
+ alx_free_rings(alx);
+ return err;
+}
+
+static void __alx_stop(struct alx_priv *alx)
+{
+ alx_halt(alx);
+ alx_free_irq(alx);
+ alx_free_rings(alx);
+}
+
+static const char *alx_speed_desc(u16 speed)
+{
+ switch (speed) {
+ case SPEED_1000 + DUPLEX_FULL:
+ return "1 Gbps Full";
+ case SPEED_100 + DUPLEX_FULL:
+ return "100 Mbps Full";
+ case SPEED_100 + DUPLEX_HALF:
+ return "100 Mbps Half";
+ case SPEED_10 + DUPLEX_FULL:
+ return "10 Mbps Full";
+ case SPEED_10 + DUPLEX_HALF:
+ return "10 Mbps Half";
+ default:
+ return "Unknown speed";
+ }
+}
+
+static void alx_check_link(struct alx_priv *alx)
+{
+ struct alx_hw *hw = &alx->hw;
+ unsigned long flags;
+ int speed, old_speed;
+ int err;
+
+ /* clear PHY internal interrupt status, otherwise the main
+ * interrupt status will be asserted forever
+ */
+ alx_clear_phy_intr(hw);
+
+ err = alx_get_phy_link(hw, &speed);
+ if (err < 0)
+ goto reset;
+
+ spin_lock_irqsave(&alx->irq_lock, flags);
+ alx->int_mask |= ALX_ISR_PHY;
+ alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+ spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+ old_speed = hw->link_speed;
+
+ if (old_speed == speed)
+ return;
+ hw->link_speed = speed;
+
+ if (speed != SPEED_UNKNOWN) {
+ netif_info(alx, link, alx->dev,
+ "NIC Up: %s\n", alx_speed_desc(speed));
+ alx_post_phy_link(hw);
+ alx_enable_aspm(hw, true, true);
+ alx_start_mac(hw);
+
+ if (old_speed == SPEED_UNKNOWN)
+ alx_netif_start(alx);
+ } else {
+ /* link is now down */
+ alx_netif_stop(alx);
+ netif_info(alx, link, alx->dev, "Link Down\n");
+ err = alx_reset_mac(hw);
+ if (err)
+ goto reset;
+ alx_irq_disable(alx);
+
+ /* MAC reset causes all HW settings to be lost, restore all */
+ err = alx_reinit_rings(alx);
+ if (err)
+ goto reset;
+ alx_configure(alx);
+ alx_enable_aspm(hw, false, true);
+ alx_post_phy_link(hw);
+ alx_irq_enable(alx);
+ }
+
+ return;
+
+reset:
+ alx_schedule_reset(alx);
+}
+
+static int alx_open(struct net_device *netdev)
+{
+ return __alx_open(netdev_priv(netdev), false);
+}
+
+static int alx_stop(struct net_device *netdev)
+{
+ __alx_stop(netdev_priv(netdev));
+ return 0;
+}
+
+static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ struct alx_hw *hw = &alx->hw;
+ int err, speed;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __alx_stop(alx);
+
+#ifdef CONFIG_PM_SLEEP
+ err = pci_save_state(pdev);
+ if (err)
+ return err;
+#endif
+
+ err = alx_select_powersaving_speed(hw, &speed);
+ if (err)
+ return err;
+ err = alx_clear_phy_intr(hw);
+ if (err)
+ return err;
+ err = alx_pre_suspend(hw, speed);
+ if (err)
+ return err;
+ err = alx_config_wol(hw);
+ if (err)
+ return err;
+
+ *wol_en = false;
+ if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
+ netif_info(alx, wol, netdev,
+ "wol: ctrl=%X, speed=%X\n",
+ hw->sleep_ctrl, speed);
+ device_set_wakeup_enable(&pdev->dev, true);
+ *wol_en = true;
+ }
+
+ pci_disable_device(pdev);
+
+ return 0;
+}
+
+static void alx_shutdown(struct pci_dev *pdev)
+{
+ int err;
+ bool wol_en;
+
+ err = __alx_shutdown(pdev, &wol_en);
+ if (!err) {
+ pci_wake_from_d3(pdev, wol_en);
+ pci_set_power_state(pdev, PCI_D3hot);
+ } else {
+ dev_err(&pdev->dev, "shutdown fail %d\n", err);
+ }
+}
+
+static void alx_link_check(struct work_struct *work)
+{
+ struct alx_priv *alx;
+
+ alx = container_of(work, struct alx_priv, link_check_wk);
+
+ rtnl_lock();
+ alx_check_link(alx);
+ rtnl_unlock();
+}
+
+static void alx_reset(struct work_struct *work)
+{
+ struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
+
+ rtnl_lock();
+ alx_reinit(alx);
+ rtnl_unlock();
+}
+
+static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
+{
+ u8 cso, css;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ cso = skb_checksum_start_offset(skb);
+ if (cso & 1)
+ return -EINVAL;
+
+ css = cso + skb->csum_offset;
+ first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
+ first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
+ first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
+
+ return 0;
+}
+
+static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
+{
+ struct alx_tx_queue *txq = &alx->txq;
+ struct alx_txd *tpd, *first_tpd;
+ dma_addr_t dma;
+ int maplen, f, first_idx = txq->write_idx;
+
+ first_tpd = &txq->tpd[txq->write_idx];
+ tpd = first_tpd;
+
+ maplen = skb_headlen(skb);
+ dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ goto err_dma;
+
+ dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+ dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+ tpd->adrl.addr = cpu_to_le64(dma);
+ tpd->len = cpu_to_le16(maplen);
+
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+ tpd = &txq->tpd[txq->write_idx];
+
+ tpd->word1 = first_tpd->word1;
+
+ maplen = skb_frag_size(frag);
+ dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
+ maplen, DMA_TO_DEVICE);
+ if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+ goto err_dma;
+ dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+ dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+ tpd->adrl.addr = cpu_to_le64(dma);
+ tpd->len = cpu_to_le16(maplen);
+ }
+
+ /* last TPD, set EOP flag and store skb */
+ tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
+ txq->bufs[txq->write_idx].skb = skb;
+
+ if (++txq->write_idx == alx->tx_ringsz)
+ txq->write_idx = 0;
+
+ return 0;
+
+err_dma:
+ f = first_idx;
+ while (f != txq->write_idx) {
+ alx_free_txbuf(alx, f);
+ if (++f == alx->tx_ringsz)
+ f = 0;
+ }
+ return -ENOMEM;
+}
+
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_tx_queue *txq = &alx->txq;
+ struct alx_txd *first;
+ int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+
+ if (alx_tpd_avail(alx) < tpdreq) {
+ netif_stop_queue(alx->dev);
+ goto drop;
+ }
+
+ first = &txq->tpd[txq->write_idx];
+ memset(first, 0, sizeof(*first));
+
+ if (alx_tx_csum(skb, first))
+ goto drop;
+
+ if (alx_map_tx_skb(alx, skb) < 0)
+ goto drop;
+
+ netdev_sent_queue(alx->dev, skb->len);
+
+ /* flush updates before updating hardware */
+ wmb();
+ alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
+
+ if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
+ netif_stop_queue(alx->dev);
+
+ return NETDEV_TX_OK;
+
+drop:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static void alx_tx_timeout(struct net_device *dev)
+{
+ struct alx_priv *alx = netdev_priv(dev);
+
+ alx_schedule_reset(alx);
+}
+
+static int alx_mdio_read(struct net_device *netdev,
+ int prtad, int devad, u16 addr)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+ u16 val;
+ int err;
+
+ if (prtad != hw->mdio.prtad)
+ return -EINVAL;
+
+ if (devad == MDIO_DEVAD_NONE)
+ err = alx_read_phy_reg(hw, addr, &val);
+ else
+ err = alx_read_phy_ext(hw, devad, addr, &val);
+
+ if (err)
+ return err;
+ return val;
+}
+
+static int alx_mdio_write(struct net_device *netdev,
+ int prtad, int devad, u16 addr, u16 val)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+ struct alx_hw *hw = &alx->hw;
+
+ if (prtad != hw->mdio.prtad)
+ return -EINVAL;
+
+ if (devad == MDIO_DEVAD_NONE)
+ return alx_write_phy_reg(hw, addr, val);
+
+ return alx_write_phy_ext(hw, devad, addr, val);
+}
+
+static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void alx_poll_controller(struct net_device *netdev)
+{
+ struct alx_priv *alx = netdev_priv(netdev);
+
+ if (alx->msi)
+ alx_intr_msi(0, alx);
+ else
+ alx_intr_legacy(0, alx);
+}
+#endif
+
+static const struct net_device_ops alx_netdev_ops = {
+ .ndo_open = alx_open,
+ .ndo_stop = alx_stop,
+ .ndo_start_xmit = alx_start_xmit,
+ .ndo_set_rx_mode = alx_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = alx_set_mac_address,
+ .ndo_change_mtu = alx_change_mtu,
+ .ndo_do_ioctl = alx_ioctl,
+ .ndo_tx_timeout = alx_tx_timeout,
+ .ndo_fix_features = alx_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = alx_poll_controller,
+#endif
+};
+
+static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct alx_priv *alx;
+ struct alx_hw *hw;
+ bool phy_configured;
+ int bars, pm_cap, err;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ /* The alx chip can DMA to 64-bit addresses, but it uses a single
+ * shared register for the high 32 bits, so only a single, aligned,
+ * 4 GB physical address range can be used for descriptors.
+ */
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
+ } else {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA config, aborting\n");
+ goto out_pci_disable;
+ }
+ }
+ }
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed(bars:%d)\n", bars);
+ goto out_pci_disable;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (pm_cap == 0) {
+ dev_err(&pdev->dev,
+ "Can't find power management capability, aborting\n");
+ err = -EIO;
+ goto out_pci_release;
+ }
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ goto out_pci_release;
+
+ netdev = alloc_etherdev(sizeof(*alx));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto out_pci_release;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ alx = netdev_priv(netdev);
+ alx->dev = netdev;
+ alx->hw.pdev = pdev;
+ alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
+ NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
+ hw = &alx->hw;
+ pci_set_drvdata(pdev, alx);
+
+ hw->hw_addr = pci_ioremap_bar(pdev, 0);
+ if (!hw->hw_addr) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ err = -EIO;
+ goto out_free_netdev;
+ }
+
+ netdev->netdev_ops = &alx_netdev_ops;
+ SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+ netdev->irq = pdev->irq;
+ netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
+
+ if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
+ pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+
+ err = alx_init_sw(alx);
+ if (err) {
+ dev_err(&pdev->dev, "net device private data init failed\n");
+ goto out_unmap;
+ }
+
+ alx_reset_pcie(hw);
+
+ phy_configured = alx_phy_configured(hw);
+
+ if (!phy_configured)
+ alx_reset_phy(hw);
+
+ err = alx_reset_mac(hw);
+ if (err) {
+ dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
+ goto out_unmap;
+ }
+
+ /* setup link to put it in a known good starting state */
+ if (!phy_configured) {
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed to configure PHY speed/duplex (err=%d)\n",
+ err);
+ goto out_unmap;
+ }
+ }
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+
+ if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
+ dev_warn(&pdev->dev,
+ "Invalid permanent address programmed, using random one\n");
+ eth_hw_addr_random(netdev);
+ memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
+ }
+
+ memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
+ memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+ memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
+
+ hw->mdio.prtad = 0;
+ hw->mdio.mmds = 0;
+ hw->mdio.dev = netdev;
+ hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
+ MDIO_SUPPORTS_C22 |
+ MDIO_EMULATE_C22;
+ hw->mdio.mdio_read = alx_mdio_read;
+ hw->mdio.mdio_write = alx_mdio_write;
+
+ if (!alx_get_phy_info(hw)) {
+ dev_err(&pdev->dev, "failed to identify PHY\n");
+ err = -EIO;
+ goto out_unmap;
+ }
+
+ INIT_WORK(&alx->link_check_wk, alx_link_check);
+ INIT_WORK(&alx->reset_wk, alx_reset);
+ spin_lock_init(&alx->hw.mdio_lock);
+ spin_lock_init(&alx->irq_lock);
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "register netdevice failed\n");
+ goto out_unmap;
+ }
+
+ device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
+
+ netdev_info(netdev,
+ "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
+ netdev->dev_addr);
+
+ return 0;
+
+out_unmap:
+ iounmap(hw->hw_addr);
+out_free_netdev:
+ free_netdev(netdev);
+out_pci_release:
+ pci_release_selected_regions(pdev, bars);
+out_pci_disable:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void alx_remove(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+
+ cancel_work_sync(&alx->link_check_wk);
+ cancel_work_sync(&alx->reset_wk);
+
+ /* restore permanent mac address */
+ alx_set_macaddr(hw, hw->perm_addr);
+
+ unregister_netdev(alx->dev);
+ iounmap(hw->hw_addr);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(alx->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int alx_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int err;
+ bool wol_en;
+
+ err = __alx_shutdown(pdev, &wol_en);
+ if (err) {
+ dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
+ return err;
+ }
+
+ if (wol_en) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
+
+ return 0;
+}
+
+static int alx_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ struct alx_hw *hw = &alx->hw;
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ hw->link_speed = SPEED_UNKNOWN;
+ alx->int_mask = ALX_ISR_MISC;
+
+ alx_reset_pcie(hw);
+ alx_reset_phy(hw);
+
+ err = alx_reset_mac(hw);
+ if (err) {
+ netif_err(alx, hw, alx->dev,
+ "resume:reset_mac fail %d\n", err);
+ return -EIO;
+ }
+
+ err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+ if (err) {
+ netif_err(alx, hw, alx->dev,
+ "resume:setup_speed_duplex fail %d\n", err);
+ return -EIO;
+ }
+
+ if (netif_running(netdev)) {
+ err = __alx_open(alx, true);
+ if (err)
+ return err;
+ }
+
+ netif_device_attach(netdev);
+
+ return err;
+}
+#endif
+
+static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+ pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
+
+ dev_info(&pdev->dev, "pci error detected\n");
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ alx_halt(alx);
+ }
+
+ if (state == pci_channel_io_perm_failure)
+ rc = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_hw *hw = &alx->hw;
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+ dev_info(&pdev->dev, "pci error slot reset\n");
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
+ goto out;
+ }
+
+ pci_set_master(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ alx_reset_pcie(hw);
+ if (!alx_reset_mac(hw))
+ rc = PCI_ERS_RESULT_RECOVERED;
+out:
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ rtnl_unlock();
+
+ return rc;
+}
+
+static void alx_pci_error_resume(struct pci_dev *pdev)
+{
+ struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct net_device *netdev = alx->dev;
+
+ dev_info(&pdev->dev, "pci error resume\n");
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ alx_activate(alx);
+ netif_device_attach(netdev);
+ }
+
+ rtnl_unlock();
+}
+
+static const struct pci_error_handlers alx_err_handlers = {
+ .error_detected = alx_pci_error_detected,
+ .slot_reset = alx_pci_error_slot_reset,
+ .resume = alx_pci_error_resume,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+#define ALX_PM_OPS (&alx_pm_ops)
+#else
+#define ALX_PM_OPS NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
+ {}
+};
+
+static struct pci_driver alx_driver = {
+ .name = alx_drv_name,
+ .id_table = alx_pci_tbl,
+ .probe = alx_probe,
+ .remove = alx_remove,
+ .shutdown = alx_shutdown,
+ .err_handler = &alx_err_handlers,
+ .driver.pm = ALX_PM_OPS,
+};
+
+module_pci_driver(alx_driver);
+MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION(
+ "Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 000000000000..e4358c98bc4e
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This file is free software: you may copy, redistribute and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_REG_H
+#define ALX_REG_H
+
+#define ALX_DEV_ID_AR8161 0x1091
+#define ALX_DEV_ID_E2200 0xe091
+#define ALX_DEV_ID_AR8162 0x1090
+#define ALX_DEV_ID_AR8171 0x10A1
+#define ALX_DEV_ID_AR8172 0x10A0
+
+/* rev definition,
+ * bit(0): with xD support
+ * bit(1): with Card Reader function
+ * bit(7:2): real revision
+ */
+#define ALX_PCI_REVID_SHIFT 3
+#define ALX_REV_A0 0
+#define ALX_REV_A1 1
+#define ALX_REV_B0 2
+#define ALX_REV_C0 3
+
+#define ALX_DEV_CTRL 0x0060
+#define ALX_DEV_CTRL_MAXRRS_MIN 2
+
+#define ALX_MSIX_MASK 0x0090
+
+#define ALX_UE_SVRT 0x010C
+#define ALX_UE_SVRT_FCPROTERR BIT(13)
+#define ALX_UE_SVRT_DLPROTERR BIT(4)
+
+/* eeprom & flash load register */
+#define ALX_EFLD 0x0204
+#define ALX_EFLD_F_EXIST BIT(10)
+#define ALX_EFLD_E_EXIST BIT(9)
+#define ALX_EFLD_STAT BIT(5)
+#define ALX_EFLD_START BIT(0)
+
+/* eFuse load register */
+#define ALX_SLD 0x0218
+#define ALX_SLD_STAT BIT(12)
+#define ALX_SLD_START BIT(11)
+#define ALX_SLD_MAX_TO 100
+
+#define ALX_PDLL_TRNS1 0x1104
+#define ALX_PDLL_TRNS1_D3PLLOFF_EN BIT(11)
+
+#define ALX_PMCTRL 0x12F8
+#define ALX_PMCTRL_HOTRST_WTEN BIT(31)
+/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
+#define ALX_PMCTRL_ASPM_FCEN BIT(30)
+#define ALX_PMCTRL_SADLY_EN BIT(29)
+#define ALX_PMCTRL_LCKDET_TIMER_MASK 0xF
+#define ALX_PMCTRL_LCKDET_TIMER_SHIFT 24
+#define ALX_PMCTRL_LCKDET_TIMER_DEF 0xC
+/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
+#define ALX_PMCTRL_L1REQ_TO_MASK 0xF
+#define ALX_PMCTRL_L1REQ_TO_SHIFT 20
+#define ALX_PMCTRL_L1REG_TO_DEF 0xF
+#define ALX_PMCTRL_TXL1_AFTER_L0S BIT(19)
+#define ALX_PMCTRL_L1_TIMER_MASK 0x7
+#define ALX_PMCTRL_L1_TIMER_SHIFT 16
+#define ALX_PMCTRL_L1_TIMER_16US 4
+#define ALX_PMCTRL_RCVR_WT_1US BIT(15)
+/* bit13: enable pcie clk switch in L1 state */
+#define ALX_PMCTRL_L1_CLKSW_EN BIT(13)
+#define ALX_PMCTRL_L0S_EN BIT(12)
+#define ALX_PMCTRL_RXL1_AFTER_L0S BIT(11)
+#define ALX_PMCTRL_L1_BUFSRX_EN BIT(7)
+/* bit6: power down serdes RX */
+#define ALX_PMCTRL_L1_SRDSRX_PWD BIT(6)
+#define ALX_PMCTRL_L1_SRDSPLL_EN BIT(5)
+#define ALX_PMCTRL_L1_SRDS_EN BIT(4)
+#define ALX_PMCTRL_L1_EN BIT(3)
+
+/*******************************************************/
+/* following registers are mapped only to memory space */
+/*******************************************************/
+
+#define ALX_MASTER 0x1400
+/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
+#define ALX_MASTER_PCLKSEL_SRDS BIT(12)
+/* bit11: irq moduration for rx */
+#define ALX_MASTER_IRQMOD2_EN BIT(11)
+/* bit10: irq moduration for tx/rx */
+#define ALX_MASTER_IRQMOD1_EN BIT(10)
+#define ALX_MASTER_SYSALVTIMER_EN BIT(7)
+#define ALX_MASTER_OOB_DIS BIT(6)
+/* bit5: wakeup without pcie clk */
+#define ALX_MASTER_WAKEN_25M BIT(5)
+/* bit0: MAC & DMA reset */
+#define ALX_MASTER_DMA_MAC_RST BIT(0)
+#define ALX_DMA_MAC_RST_TO 50
+
+#define ALX_IRQ_MODU_TIMER 0x1408
+#define ALX_IRQ_MODU_TIMER1_MASK 0xFFFF
+#define ALX_IRQ_MODU_TIMER1_SHIFT 0
+
+#define ALX_PHY_CTRL 0x140C
+#define ALX_PHY_CTRL_100AB_EN BIT(17)
+/* bit14: affect MAC & PHY, go to low power sts */
+#define ALX_PHY_CTRL_POWER_DOWN BIT(14)
+/* bit13: 1:pll always ON, 0:can switch in lpw */
+#define ALX_PHY_CTRL_PLL_ON BIT(13)
+#define ALX_PHY_CTRL_RST_ANALOG BIT(12)
+#define ALX_PHY_CTRL_HIB_PULSE BIT(11)
+#define ALX_PHY_CTRL_HIB_EN BIT(10)
+#define ALX_PHY_CTRL_IDDQ BIT(7)
+#define ALX_PHY_CTRL_GATE_25M BIT(5)
+#define ALX_PHY_CTRL_LED_MODE BIT(2)
+/* bit0: out of dsp RST state */
+#define ALX_PHY_CTRL_DSPRST_OUT BIT(0)
+#define ALX_PHY_CTRL_DSPRST_TO 80
+#define ALX_PHY_CTRL_CLS (ALX_PHY_CTRL_LED_MODE | \
+ ALX_PHY_CTRL_100AB_EN | \
+ ALX_PHY_CTRL_PLL_ON)
+
+#define ALX_MAC_STS 0x1410
+#define ALX_MAC_STS_TXQ_BUSY BIT(3)
+#define ALX_MAC_STS_RXQ_BUSY BIT(2)
+#define ALX_MAC_STS_TXMAC_BUSY BIT(1)
+#define ALX_MAC_STS_RXMAC_BUSY BIT(0)
+#define ALX_MAC_STS_IDLE (ALX_MAC_STS_TXQ_BUSY | \
+ ALX_MAC_STS_RXQ_BUSY | \
+ ALX_MAC_STS_TXMAC_BUSY | \
+ ALX_MAC_STS_RXMAC_BUSY)
+
+#define ALX_MDIO 0x1414
+#define ALX_MDIO_MODE_EXT BIT(30)
+#define ALX_MDIO_BUSY BIT(27)
+#define ALX_MDIO_CLK_SEL_MASK 0x7
+#define ALX_MDIO_CLK_SEL_SHIFT 24
+#define ALX_MDIO_CLK_SEL_25MD4 0
+#define ALX_MDIO_CLK_SEL_25MD128 7
+#define ALX_MDIO_START BIT(23)
+#define ALX_MDIO_SPRES_PRMBL BIT(22)
+/* bit21: 1:read,0:write */
+#define ALX_MDIO_OP_READ BIT(21)
+#define ALX_MDIO_REG_MASK 0x1F
+#define ALX_MDIO_REG_SHIFT 16
+#define ALX_MDIO_DATA_MASK 0xFFFF
+#define ALX_MDIO_DATA_SHIFT 0
+#define ALX_MDIO_MAX_AC_TO 120
+
+#define ALX_MDIO_EXTN 0x1448
+#define ALX_MDIO_EXTN_DEVAD_MASK 0x1F
+#define ALX_MDIO_EXTN_DEVAD_SHIFT 16
+#define ALX_MDIO_EXTN_REG_MASK 0xFFFF
+#define ALX_MDIO_EXTN_REG_SHIFT 0
+
+#define ALX_SERDES 0x1424
+#define ALX_SERDES_PHYCLK_SLWDWN BIT(18)
+#define ALX_SERDES_MACCLK_SLWDWN BIT(17)
+
+#define ALX_LPI_CTRL 0x1440
+#define ALX_LPI_CTRL_EN BIT(0)
+
+/* for B0+, bit[13..] for C0+ */
+#define ALX_HRTBT_EXT_CTRL 0x1AD0
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK 0x3F
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT 24
+#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN BIT(23)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED BIT(22)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED BIT(21)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN BIT(20)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN BIT(19)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023 BIT(18)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6 BIT(17)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN BIT(16)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN BIT(15)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023 BIT(14)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6 BIT(13)
+#define ALX_HRTBT_EXT_CTRL_NS_EN BIT(12)
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK 0xFF
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT 4
+#define ALX_HRTBT_EXT_CTRL_IS_8023 BIT(3)
+#define ALX_HRTBT_EXT_CTRL_IS_IPV6 BIT(2)
+#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN BIT(1)
+#define ALX_HRTBT_EXT_CTRL_ARP_EN BIT(0)
+
+#define ALX_HRTBT_REM_IPV4_ADDR 0x1AD4
+#define ALX_HRTBT_HOST_IPV4_ADDR 0x1478
+#define ALX_HRTBT_REM_IPV6_ADDR3 0x1AD8
+#define ALX_HRTBT_REM_IPV6_ADDR2 0x1ADC
+#define ALX_HRTBT_REM_IPV6_ADDR1 0x1AE0
+#define ALX_HRTBT_REM_IPV6_ADDR0 0x1AE4
+
+/* 1B8C ~ 1B94 for C0+ */
+#define ALX_SWOI_ACER_CTRL 0x1B8C
+#define ALX_SWOI_ORIG_ACK_NAK_EN BIT(20)
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK 0XFF
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT 12
+#define ALX_SWOI_ORIG_ACK_ADDR_MASK 0XFFF
+#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT 0
+
+#define ALX_SWOI_IOAC_CTRL_2 0x1B90
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK 0xFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT 24
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT 12
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT 0
+
+#define ALX_SWOI_IOAC_CTRL_3 0x1B94
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK 0xFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT 24
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT 12
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK 0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT 0
+
+/* for B0 */
+#define ALX_IDLE_DECISN_TIMER 0x1474
+/* 1ms */
+#define ALX_IDLE_DECISN_TIMER_DEF 0x400
+
+#define ALX_MAC_CTRL 0x1480
+#define ALX_MAC_CTRL_FAST_PAUSE BIT(31)
+#define ALX_MAC_CTRL_WOLSPED_SWEN BIT(30)
+/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
+#define ALX_MAC_CTRL_MHASH_ALG_HI5B BIT(29)
+#define ALX_MAC_CTRL_BRD_EN BIT(26)
+#define ALX_MAC_CTRL_MULTIALL_EN BIT(25)
+#define ALX_MAC_CTRL_SPEED_MASK 0x3
+#define ALX_MAC_CTRL_SPEED_SHIFT 20
+#define ALX_MAC_CTRL_SPEED_10_100 1
+#define ALX_MAC_CTRL_SPEED_1000 2
+#define ALX_MAC_CTRL_PROMISC_EN BIT(15)
+#define ALX_MAC_CTRL_VLANSTRIP BIT(14)
+#define ALX_MAC_CTRL_PRMBLEN_MASK 0xF
+#define ALX_MAC_CTRL_PRMBLEN_SHIFT 10
+#define ALX_MAC_CTRL_PCRCE BIT(7)
+#define ALX_MAC_CTRL_CRCE BIT(6)
+#define ALX_MAC_CTRL_FULLD BIT(5)
+#define ALX_MAC_CTRL_RXFC_EN BIT(3)
+#define ALX_MAC_CTRL_TXFC_EN BIT(2)
+#define ALX_MAC_CTRL_RX_EN BIT(1)
+#define ALX_MAC_CTRL_TX_EN BIT(0)
+
+#define ALX_STAD0 0x1488
+#define ALX_STAD1 0x148C
+
+#define ALX_HASH_TBL0 0x1490
+#define ALX_HASH_TBL1 0x1494
+
+#define ALX_MTU 0x149C
+#define ALX_MTU_JUMBO_TH 1514
+#define ALX_MTU_STD_ALGN 1536
+
+#define ALX_SRAM5 0x1524
+#define ALX_SRAM_RXF_LEN_MASK 0xFFF
+#define ALX_SRAM_RXF_LEN_SHIFT 0
+#define ALX_SRAM_RXF_LEN_8K (8*1024)
+
+#define ALX_SRAM9 0x1534
+#define ALX_SRAM_LOAD_PTR BIT(0)
+
+#define ALX_RX_BASE_ADDR_HI 0x1540
+
+#define ALX_TX_BASE_ADDR_HI 0x1544
+
+#define ALX_RFD_ADDR_LO 0x1550
+#define ALX_RFD_RING_SZ 0x1560
+#define ALX_RFD_BUF_SZ 0x1564
+
+#define ALX_RRD_ADDR_LO 0x1568
+#define ALX_RRD_RING_SZ 0x1578
+
+/* pri3: highest, pri0: lowest */
+#define ALX_TPD_PRI3_ADDR_LO 0x14E4
+#define ALX_TPD_PRI2_ADDR_LO 0x14E0
+#define ALX_TPD_PRI1_ADDR_LO 0x157C
+#define ALX_TPD_PRI0_ADDR_LO 0x1580
+
+/* producer index is 16bit */
+#define ALX_TPD_PRI3_PIDX 0x1618
+#define ALX_TPD_PRI2_PIDX 0x161A
+#define ALX_TPD_PRI1_PIDX 0x15F0
+#define ALX_TPD_PRI0_PIDX 0x15F2
+
+/* consumer index is 16bit */
+#define ALX_TPD_PRI3_CIDX 0x161C
+#define ALX_TPD_PRI2_CIDX 0x161E
+#define ALX_TPD_PRI1_CIDX 0x15F4
+#define ALX_TPD_PRI0_CIDX 0x15F6
+
+#define ALX_TPD_RING_SZ 0x1584
+
+#define ALX_TXQ0 0x1590
+#define ALX_TXQ0_TXF_BURST_PREF_MASK 0xFFFF
+#define ALX_TXQ0_TXF_BURST_PREF_SHIFT 16
+#define ALX_TXQ_TXF_BURST_PREF_DEF 0x200
+#define ALX_TXQ0_LSO_8023_EN BIT(7)
+#define ALX_TXQ0_MODE_ENHANCE BIT(6)
+#define ALX_TXQ0_EN BIT(5)
+#define ALX_TXQ0_SUPT_IPOPT BIT(4)
+#define ALX_TXQ0_TPD_BURSTPREF_MASK 0xF
+#define ALX_TXQ0_TPD_BURSTPREF_SHIFT 0
+#define ALX_TXQ_TPD_BURSTPREF_DEF 5
+
+#define ALX_TXQ1 0x1594
+/* bit11: drop large packet, len > (rfd buf) */
+#define ALX_TXQ1_ERRLGPKT_DROP_EN BIT(11)
+#define ALX_TXQ1_JUMBO_TSO_TH (7*1024)
+
+#define ALX_RXQ0 0x15A0
+#define ALX_RXQ0_EN BIT(31)
+#define ALX_RXQ0_RSS_HASH_EN BIT(29)
+#define ALX_RXQ0_RSS_MODE_MASK 0x3
+#define ALX_RXQ0_RSS_MODE_SHIFT 26
+#define ALX_RXQ0_RSS_MODE_DIS 0
+#define ALX_RXQ0_RSS_MODE_MQMI 3
+#define ALX_RXQ0_NUM_RFD_PREF_MASK 0x3F
+#define ALX_RXQ0_NUM_RFD_PREF_SHIFT 20
+#define ALX_RXQ0_NUM_RFD_PREF_DEF 8
+#define ALX_RXQ0_IDT_TBL_SIZE_MASK 0x1FF
+#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT 8
+#define ALX_RXQ0_IDT_TBL_SIZE_DEF 0x100
+#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL 128
+#define ALX_RXQ0_IPV6_PARSE_EN BIT(7)
+#define ALX_RXQ0_RSS_HSTYP_MASK 0xF
+#define ALX_RXQ0_RSS_HSTYP_SHIFT 2
+#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN BIT(5)
+#define ALX_RXQ0_RSS_HSTYP_IPV6_EN BIT(4)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN BIT(3)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_EN BIT(2)
+#define ALX_RXQ0_RSS_HSTYP_ALL (ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
+ ALX_RXQ0_RSS_HSTYP_IPV4_EN)
+#define ALX_RXQ0_ASPM_THRESH_MASK 0x3
+#define ALX_RXQ0_ASPM_THRESH_SHIFT 0
+#define ALX_RXQ0_ASPM_THRESH_100M 3
+
+#define ALX_RXQ2 0x15A8
+#define ALX_RXQ2_RXF_XOFF_THRESH_MASK 0xFFF
+#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT 16
+#define ALX_RXQ2_RXF_XON_THRESH_MASK 0xFFF
+#define ALX_RXQ2_RXF_XON_THRESH_SHIFT 0
+/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
+ * rx-packet(1522) + delay-of-link(64)
+ * = 3212.
+ */
+#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD 3212
+
+#define ALX_DMA 0x15C0
+#define ALX_DMA_RCHNL_SEL_MASK 0x3
+#define ALX_DMA_RCHNL_SEL_SHIFT 26
+#define ALX_DMA_WDLY_CNT_MASK 0xF
+#define ALX_DMA_WDLY_CNT_SHIFT 16
+#define ALX_DMA_WDLY_CNT_DEF 4
+#define ALX_DMA_RDLY_CNT_MASK 0x1F
+#define ALX_DMA_RDLY_CNT_SHIFT 11
+#define ALX_DMA_RDLY_CNT_DEF 15
+/* bit10: 0:tpd with pri, 1: data */
+#define ALX_DMA_RREQ_PRI_DATA BIT(10)
+#define ALX_DMA_RREQ_BLEN_MASK 0x7
+#define ALX_DMA_RREQ_BLEN_SHIFT 4
+#define ALX_DMA_RORDER_MODE_MASK 0x7
+#define ALX_DMA_RORDER_MODE_SHIFT 0
+#define ALX_DMA_RORDER_MODE_OUT 4
+
+#define ALX_WOL0 0x14A0
+#define ALX_WOL0_PME_LINK BIT(5)
+#define ALX_WOL0_LINK_EN BIT(4)
+#define ALX_WOL0_PME_MAGIC_EN BIT(3)
+#define ALX_WOL0_MAGIC_EN BIT(2)
+
+#define ALX_RFD_PIDX 0x15E0
+
+#define ALX_RFD_CIDX 0x15F8
+
+/* MIB */
+#define ALX_MIB_BASE 0x1700
+#define ALX_MIB_RX_OK (ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_ERRADDR (ALX_MIB_BASE + 92)
+#define ALX_MIB_TX_OK (ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_MCCNT (ALX_MIB_BASE + 192)
+
+#define ALX_RX_STATS_BIN ALX_MIB_RX_OK
+#define ALX_RX_STATS_END ALX_MIB_RX_ERRADDR
+#define ALX_TX_STATS_BIN ALX_MIB_TX_OK
+#define ALX_TX_STATS_END ALX_MIB_TX_MCCNT
+
+#define ALX_ISR 0x1600
+#define ALX_ISR_DIS BIT(31)
+#define ALX_ISR_RX_Q7 BIT(30)
+#define ALX_ISR_RX_Q6 BIT(29)
+#define ALX_ISR_RX_Q5 BIT(28)
+#define ALX_ISR_RX_Q4 BIT(27)
+#define ALX_ISR_PCIE_LNKDOWN BIT(26)
+#define ALX_ISR_RX_Q3 BIT(19)
+#define ALX_ISR_RX_Q2 BIT(18)
+#define ALX_ISR_RX_Q1 BIT(17)
+#define ALX_ISR_RX_Q0 BIT(16)
+#define ALX_ISR_TX_Q0 BIT(15)
+#define ALX_ISR_PHY BIT(12)
+#define ALX_ISR_DMAW BIT(10)
+#define ALX_ISR_DMAR BIT(9)
+#define ALX_ISR_TXF_UR BIT(8)
+#define ALX_ISR_TX_Q3 BIT(7)
+#define ALX_ISR_TX_Q2 BIT(6)
+#define ALX_ISR_TX_Q1 BIT(5)
+#define ALX_ISR_RFD_UR BIT(4)
+#define ALX_ISR_RXF_OV BIT(3)
+#define ALX_ISR_MANU BIT(2)
+#define ALX_ISR_TIMER BIT(1)
+#define ALX_ISR_SMB BIT(0)
+
+#define ALX_IMR 0x1604
+
+/* re-send assert msg if SW no response */
+#define ALX_INT_RETRIG 0x1608
+/* 40ms */
+#define ALX_INT_RETRIG_TO 20000
+
+#define ALX_SMB_TIMER 0x15C4
+
+#define ALX_TINT_TPD_THRSHLD 0x15C8
+
+#define ALX_TINT_TIMER 0x15CC
+
+#define ALX_CLK_GATE 0x1814
+#define ALX_CLK_GATE_RXMAC BIT(5)
+#define ALX_CLK_GATE_TXMAC BIT(4)
+#define ALX_CLK_GATE_RXQ BIT(3)
+#define ALX_CLK_GATE_TXQ BIT(2)
+#define ALX_CLK_GATE_DMAR BIT(1)
+#define ALX_CLK_GATE_DMAW BIT(0)
+#define ALX_CLK_GATE_ALL (ALX_CLK_GATE_RXMAC | \
+ ALX_CLK_GATE_TXMAC | \
+ ALX_CLK_GATE_RXQ | \
+ ALX_CLK_GATE_TXQ | \
+ ALX_CLK_GATE_DMAR | \
+ ALX_CLK_GATE_DMAW)
+
+/* interop between drivers */
+#define ALX_DRV 0x1804
+#define ALX_DRV_PHY_AUTO BIT(28)
+#define ALX_DRV_PHY_1000 BIT(27)
+#define ALX_DRV_PHY_100 BIT(26)
+#define ALX_DRV_PHY_10 BIT(25)
+#define ALX_DRV_PHY_DUPLEX BIT(24)
+/* bit23: adv Pause */
+#define ALX_DRV_PHY_PAUSE BIT(23)
+/* bit22: adv Asym Pause */
+#define ALX_DRV_PHY_MASK 0xFF
+#define ALX_DRV_PHY_SHIFT 21
+#define ALX_DRV_PHY_UNKNOWN 0
+
+/* flag of phy inited */
+#define ALX_PHY_INITED 0x003F
+
+/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
+#define ALX_WOL_CTRL2 0x1830
+#define ALX_WOL_CTRL2_DATA_STORE BIT(3)
+#define ALX_WOL_CTRL2_PTRN_EVT BIT(2)
+#define ALX_WOL_CTRL2_PME_PTRN_EN BIT(1)
+#define ALX_WOL_CTRL2_PTRN_EN BIT(0)
+
+#define ALX_WOL_CTRL3 0x1834
+#define ALX_WOL_CTRL3_PTRN_ADDR_MASK 0xFFFFF
+#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT 0
+
+#define ALX_WOL_CTRL4 0x1838
+#define ALX_WOL_CTRL4_PT15_MATCH BIT(31)
+#define ALX_WOL_CTRL4_PT14_MATCH BIT(30)
+#define ALX_WOL_CTRL4_PT13_MATCH BIT(29)
+#define ALX_WOL_CTRL4_PT12_MATCH BIT(28)
+#define ALX_WOL_CTRL4_PT11_MATCH BIT(27)
+#define ALX_WOL_CTRL4_PT10_MATCH BIT(26)
+#define ALX_WOL_CTRL4_PT9_MATCH BIT(25)
+#define ALX_WOL_CTRL4_PT8_MATCH BIT(24)
+#define ALX_WOL_CTRL4_PT7_MATCH BIT(23)
+#define ALX_WOL_CTRL4_PT6_MATCH BIT(22)
+#define ALX_WOL_CTRL4_PT5_MATCH BIT(21)
+#define ALX_WOL_CTRL4_PT4_MATCH BIT(20)
+#define ALX_WOL_CTRL4_PT3_MATCH BIT(19)
+#define ALX_WOL_CTRL4_PT2_MATCH BIT(18)
+#define ALX_WOL_CTRL4_PT1_MATCH BIT(17)
+#define ALX_WOL_CTRL4_PT0_MATCH BIT(16)
+#define ALX_WOL_CTRL4_PT15_EN BIT(15)
+#define ALX_WOL_CTRL4_PT14_EN BIT(14)
+#define ALX_WOL_CTRL4_PT13_EN BIT(13)
+#define ALX_WOL_CTRL4_PT12_EN BIT(12)
+#define ALX_WOL_CTRL4_PT11_EN BIT(11)
+#define ALX_WOL_CTRL4_PT10_EN BIT(10)
+#define ALX_WOL_CTRL4_PT9_EN BIT(9)
+#define ALX_WOL_CTRL4_PT8_EN BIT(8)
+#define ALX_WOL_CTRL4_PT7_EN BIT(7)
+#define ALX_WOL_CTRL4_PT6_EN BIT(6)
+#define ALX_WOL_CTRL4_PT5_EN BIT(5)
+#define ALX_WOL_CTRL4_PT4_EN BIT(4)
+#define ALX_WOL_CTRL4_PT3_EN BIT(3)
+#define ALX_WOL_CTRL4_PT2_EN BIT(2)
+#define ALX_WOL_CTRL4_PT1_EN BIT(1)
+#define ALX_WOL_CTRL4_PT0_EN BIT(0)
+
+#define ALX_WOL_CTRL5 0x183C
+#define ALX_WOL_CTRL5_PT3_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT3_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT2_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT2_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT1_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT1_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT0_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT0_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL6 0x1840
+#define ALX_WOL_CTRL5_PT7_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT7_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT6_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT6_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT5_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT5_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT4_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT4_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL7 0x1844
+#define ALX_WOL_CTRL5_PT11_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT11_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT10_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT10_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT9_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT9_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT8_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT8_LEN_SHIFT 0
+
+#define ALX_WOL_CTRL8 0x1848
+#define ALX_WOL_CTRL5_PT15_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT15_LEN_SHIFT 24
+#define ALX_WOL_CTRL5_PT14_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT14_LEN_SHIFT 16
+#define ALX_WOL_CTRL5_PT13_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT13_LEN_SHIFT 8
+#define ALX_WOL_CTRL5_PT12_LEN_MASK 0xFF
+#define ALX_WOL_CTRL5_PT12_LEN_SHIFT 0
+
+#define ALX_ACER_FIXED_PTN0 0x1850
+#define ALX_ACER_FIXED_PTN0_MASK 0xFFFFFFFF
+#define ALX_ACER_FIXED_PTN0_SHIFT 0
+
+#define ALX_ACER_FIXED_PTN1 0x1854
+#define ALX_ACER_FIXED_PTN1_MASK 0xFFFF
+#define ALX_ACER_FIXED_PTN1_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM0 0x1858
+#define ALX_ACER_RANDOM_NUM0_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM0_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM1 0x185C
+#define ALX_ACER_RANDOM_NUM1_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM1_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM2 0x1860
+#define ALX_ACER_RANDOM_NUM2_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM2_SHIFT 0
+
+#define ALX_ACER_RANDOM_NUM3 0x1864
+#define ALX_ACER_RANDOM_NUM3_MASK 0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM3_SHIFT 0
+
+#define ALX_ACER_MAGIC 0x1868
+#define ALX_ACER_MAGIC_EN BIT(31)
+#define ALX_ACER_MAGIC_PME_EN BIT(30)
+#define ALX_ACER_MAGIC_MATCH BIT(29)
+#define ALX_ACER_MAGIC_FF_CHECK BIT(10)
+#define ALX_ACER_MAGIC_RAN_LEN_MASK 0x1F
+#define ALX_ACER_MAGIC_RAN_LEN_SHIFT 5
+#define ALX_ACER_MAGIC_FIX_LEN_MASK 0x1F
+#define ALX_ACER_MAGIC_FIX_LEN_SHIFT 0
+
+#define ALX_ACER_TIMER 0x186C
+#define ALX_ACER_TIMER_EN BIT(31)
+#define ALX_ACER_TIMER_PME_EN BIT(30)
+#define ALX_ACER_TIMER_MATCH BIT(29)
+#define ALX_ACER_TIMER_THRES_MASK 0x1FFFF
+#define ALX_ACER_TIMER_THRES_SHIFT 0
+#define ALX_ACER_TIMER_THRES_DEF 1
+
+/* RSS definitions */
+#define ALX_RSS_KEY0 0x14B0
+#define ALX_RSS_KEY1 0x14B4
+#define ALX_RSS_KEY2 0x14B8
+#define ALX_RSS_KEY3 0x14BC
+#define ALX_RSS_KEY4 0x14C0
+#define ALX_RSS_KEY5 0x14C4
+#define ALX_RSS_KEY6 0x14C8
+#define ALX_RSS_KEY7 0x14CC
+#define ALX_RSS_KEY8 0x14D0
+#define ALX_RSS_KEY9 0x14D4
+
+#define ALX_RSS_IDT_TBL0 0x1B00
+
+#define ALX_MSI_MAP_TBL1 0x15D0
+#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT 20
+#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT 16
+#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT 12
+#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT 8
+#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT 4
+#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT 0
+
+#define ALX_MSI_MAP_TBL2 0x15D8
+#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT 20
+#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT 16
+#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT 12
+#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT 8
+#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT 4
+#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT 0
+
+#define ALX_MSI_ID_MAP 0x15D4
+
+#define ALX_MSI_RETRANS_TIMER 0x1920
+/* bit16: 1:line,0:standard */
+#define ALX_MSI_MASK_SEL_LINE BIT(16)
+#define ALX_MSI_RETRANS_TM_MASK 0xFFFF
+#define ALX_MSI_RETRANS_TM_SHIFT 0
+
+/* CR DMA ctrl */
+
+/* TX QoS */
+#define ALX_WRR 0x1938
+#define ALX_WRR_PRI_MASK 0x3
+#define ALX_WRR_PRI_SHIFT 29
+#define ALX_WRR_PRI_RESTRICT_NONE 3
+#define ALX_WRR_PRI3_MASK 0x1F
+#define ALX_WRR_PRI3_SHIFT 24
+#define ALX_WRR_PRI2_MASK 0x1F
+#define ALX_WRR_PRI2_SHIFT 16
+#define ALX_WRR_PRI1_MASK 0x1F
+#define ALX_WRR_PRI1_SHIFT 8
+#define ALX_WRR_PRI0_MASK 0x1F
+#define ALX_WRR_PRI0_SHIFT 0
+
+#define ALX_HQTPD 0x193C
+#define ALX_HQTPD_BURST_EN BIT(31)
+#define ALX_HQTPD_Q3_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q3_NUMPREF_SHIFT 8
+#define ALX_HQTPD_Q2_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q2_NUMPREF_SHIFT 4
+#define ALX_HQTPD_Q1_NUMPREF_MASK 0xF
+#define ALX_HQTPD_Q1_NUMPREF_SHIFT 0
+
+#define ALX_MISC 0x19C0
+#define ALX_MISC_PSW_OCP_MASK 0x7
+#define ALX_MISC_PSW_OCP_SHIFT 21
+#define ALX_MISC_PSW_OCP_DEF 0x7
+#define ALX_MISC_ISO_EN BIT(12)
+#define ALX_MISC_INTNLOSC_OPEN BIT(3)
+
+#define ALX_MSIC2 0x19C8
+#define ALX_MSIC2_CALB_START BIT(0)
+
+#define ALX_MISC3 0x19CC
+/* bit1: 1:Software control 25M */
+#define ALX_MISC3_25M_BY_SW BIT(1)
+/* bit0: 25M switch to intnl OSC */
+#define ALX_MISC3_25M_NOTO_INTNL BIT(0)
+
+/* MSIX tbl in memory space */
+#define ALX_MSIX_ENTRY_BASE 0x2000
+
+/********************* PHY regs definition ***************************/
+
+/* PHY Specific Status Register */
+#define ALX_MII_GIGA_PSSR 0x11
+#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800
+#define ALX_GIGA_PSSR_DPLX 0x2000
+#define ALX_GIGA_PSSR_SPEED 0xC000
+#define ALX_GIGA_PSSR_10MBS 0x0000
+#define ALX_GIGA_PSSR_100MBS 0x4000
+#define ALX_GIGA_PSSR_1000MBS 0x8000
+
+/* PHY Interrupt Enable Register */
+#define ALX_MII_IER 0x12
+#define ALX_IER_LINK_UP 0x0400
+#define ALX_IER_LINK_DOWN 0x0800
+
+/* PHY Interrupt Status Register */
+#define ALX_MII_ISR 0x13
+
+#define ALX_MII_DBG_ADDR 0x1D
+#define ALX_MII_DBG_DATA 0x1E
+
+/***************************** debug port *************************************/
+
+#define ALX_MIIDBG_ANACTRL 0x00
+#define ALX_ANACTRL_DEF 0x02EF
+
+#define ALX_MIIDBG_SYSMODCTRL 0x04
+/* en half bias */
+#define ALX_SYSMODCTRL_IECHOADJ_DEF 0xBB8B
+
+#define ALX_MIIDBG_SRDSYSMOD 0x05
+#define ALX_SRDSYSMOD_DEEMP_EN 0x0040
+#define ALX_SRDSYSMOD_DEF 0x2C46
+
+#define ALX_MIIDBG_HIBNEG 0x0B
+#define ALX_HIBNEG_PSHIB_EN 0x8000
+#define ALX_HIBNEG_HIB_PSE 0x1000
+#define ALX_HIBNEG_DEF 0xBC40
+#define ALX_HIBNEG_NOHIB (ALX_HIBNEG_DEF & \
+ ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
+
+#define ALX_MIIDBG_TST10BTCFG 0x12
+#define ALX_TST10BTCFG_DEF 0x4C04
+
+#define ALX_MIIDBG_AZ_ANADECT 0x15
+#define ALX_AZ_ANADECT_DEF 0x3220
+#define ALX_AZ_ANADECT_LONG 0x3210
+
+#define ALX_MIIDBG_MSE16DB 0x18
+#define ALX_MSE16DB_UP 0x05EA
+#define ALX_MSE16DB_DOWN 0x02EA
+
+#define ALX_MIIDBG_MSE20DB 0x1C
+#define ALX_MSE20DB_TH_MASK 0x7F
+#define ALX_MSE20DB_TH_SHIFT 2
+#define ALX_MSE20DB_TH_DEF 0x2E
+#define ALX_MSE20DB_TH_HI 0x54
+
+#define ALX_MIIDBG_AGC 0x23
+#define ALX_AGC_2_VGA_MASK 0x3FU
+#define ALX_AGC_2_VGA_SHIFT 8
+#define ALX_AGC_LONG1G_LIMT 40
+#define ALX_AGC_LONG100M_LIMT 44
+
+#define ALX_MIIDBG_LEGCYPS 0x29
+#define ALX_LEGCYPS_EN 0x8000
+#define ALX_LEGCYPS_DEF 0x129D
+
+#define ALX_MIIDBG_TST100BTCFG 0x36
+#define ALX_TST100BTCFG_DEF 0xE12C
+
+#define ALX_MIIDBG_GREENCFG 0x3B
+#define ALX_GREENCFG_DEF 0x7078
+
+#define ALX_MIIDBG_GREENCFG2 0x3D
+#define ALX_GREENCFG2_BP_GREEN 0x8000
+#define ALX_GREENCFG2_GATE_DFSE_EN 0x0080
+
+/******* dev 3 *********/
+#define ALX_MIIEXT_PCS 3
+
+#define ALX_MIIEXT_CLDCTRL3 0x8003
+#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
+
+#define ALX_MIIEXT_CLDCTRL5 0x8005
+#define ALX_CLDCTRL5_BP_VD_HLFBIAS 0x4000
+
+#define ALX_MIIEXT_CLDCTRL6 0x8006
+#define ALX_CLDCTRL6_CAB_LEN_MASK 0xFF
+#define ALX_CLDCTRL6_CAB_LEN_SHIFT 0
+#define ALX_CLDCTRL6_CAB_LEN_SHORT1G 116
+#define ALX_CLDCTRL6_CAB_LEN_SHORT100M 152
+
+#define ALX_MIIEXT_VDRVBIAS 0x8062
+#define ALX_VDRVBIAS_DEF 0x3
+
+/********* dev 7 **********/
+#define ALX_MIIEXT_ANEG 7
+
+#define ALX_MIIEXT_LOCAL_EEEADV 0x3C
+#define ALX_LOCAL_EEEADV_1000BT 0x0004
+#define ALX_LOCAL_EEEADV_100BT 0x0002
+
+#define ALX_MIIEXT_AFE 0x801A
+#define ALX_AFE_10BT_100M_TH 0x0040
+
+#define ALX_MIIEXT_S3DIG10 0x8023
+/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
+#define ALX_MIIEXT_S3DIG10_SL 0x0001
+#define ALX_MIIEXT_S3DIG10_DEF 0
+
+#define ALX_MIIEXT_NLP78 0x8027
+#define ALX_MIIEXT_NLP78_120M_DEF 0x8A05
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 297fc138fa15..986df04fdcb3 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1790,6 +1790,9 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
+ if (tg3_flag(tp, NO_FWARE_REPORTED))
+ return 0;
+
if (tg3_flag(tp, IS_SSB_CORE)) {
/* We don't use firmware. */
return 0;
@@ -10475,6 +10478,13 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
*/
static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
{
+ /* Chip may have been just powered on. If so, the boot code may still
+ * be running initialization. Wait for it to finish to avoid races in
+ * accessing the hardware.
+ */
+ tg3_enable_register_access(tp);
+ tg3_poll_fw(tp);
+
tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 6e8bc9d88c41..94d957d203a6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
file->f_pos += offset;
break;
case 2:
- file->f_pos = debug->buffer_len - offset;
+ file->f_pos = debug->buffer_len + offset;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 28a5e425fecf..92306b320840 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -76,6 +76,12 @@ int tulip_refill_rx(struct net_device *dev)
mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&tp->pdev->dev, mapping)) {
+ dev_kfree_skb(skb);
+ tp->rx_buffers[entry].skb = NULL;
+ break;
+ }
+
tp->rx_buffers[entry].mapping = mapping;
tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9aef457dacbb..98efc29eaa55 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4259,6 +4259,9 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
netdev->features |= NETIF_F_HIGHDMA;
} else {
status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (!status)
+ status = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a2eadc070328..8cb600ccaf1d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -722,8 +722,8 @@ static int sh_eth_check_reset(struct net_device *ndev)
mdelay(1);
cnt--;
}
- if (cnt < 0) {
- pr_err("Device reset fail\n");
+ if (cnt <= 0) {
+ pr_err("Device reset failed\n");
ret = -ETIMEDOUT;
}
return ret;
@@ -1260,16 +1260,23 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length;
-#if defined(CONFIG_ARCH_R8A7740)
- desc_status >>= 16;
-#endif
-
if (--boguscnt < 0)
break;
if (!(desc_status & RDFEND))
ndev->stats.rx_length_errors++;
+#if defined(CONFIG_ARCH_R8A7740)
+ /*
+ * In case of almost all GETHER/ETHERs, the Receive Frame State
+ * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
+ * bit 0. However, in case of the R8A7740's GETHER, the RFS
+ * bits are from bit 25 to bit 16. So, the driver needs right
+ * shifting by 16.
+ */
+ desc_status >>= 16;
+#endif
+
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 618446ae1ec1..ee919ca8b8a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1899,7 +1899,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef STMMAC_XMIT_DEBUG
if (netif_msg_pktdata(priv)) {
- pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d"
+ pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
__func__, (priv->cur_tx % txsize),
(priv->dirty_tx % txsize), entry, first, nfrags);
if (priv->extend_desc)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a45f64eef870..101b037a7dce 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1681,7 +1681,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->rx_packet_max = max(rx_packet_max, 128);
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
priv->irq_enabled = true;
- if (!ndev) {
+ if (!priv->cpts) {
pr_err("error allocating cpts\n");
goto clean_ndev_ret;
}
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 12aec173564c..c47f0dbcebb5 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -449,10 +449,9 @@ static int davinci_mdio_suspend(struct device *dev)
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- pm_runtime_put_sync(data->dev);
-
data->suspended = true;
spin_unlock(&data->lock);
+ pm_runtime_put_sync(data->dev);
return 0;
}
@@ -460,15 +459,12 @@ static int davinci_mdio_suspend(struct device *dev)
static int davinci_mdio_resume(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- u32 ctrl;
- spin_lock(&data->lock);
pm_runtime_get_sync(data->dev);
+ spin_lock(&data->lock);
/* restart the scan state machine */
- ctrl = __raw_readl(&data->regs->control);
- ctrl |= CONTROL_ENABLE;
- __raw_writel(ctrl, &data->regs->control);
+ __davinci_mdio_reset(data);
data->suspended = false;
spin_unlock(&data->lock);
@@ -477,8 +473,8 @@ static int davinci_mdio_resume(struct device *dev)
}
static const struct dev_pm_ops davinci_mdio_pm_ops = {
- .suspend = davinci_mdio_suspend,
- .resume = davinci_mdio_resume,
+ .suspend_late = davinci_mdio_suspend,
+ .resume_early = davinci_mdio_resume,
};
static const struct of_device_id davinci_mdio_of_mtable[] = {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ab2307b5d9a7..4dccead586be 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,7 +285,9 @@ int netvsc_recv_callback(struct hv_device *device_obj,
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci);
+ if (packet->vlan_tci & VLAN_TAG_PRESENT)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_tci);
net->stats.rx_packets++;
net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index edfddc5f61b4..d811b06e2ccd 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -853,18 +853,24 @@ static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
- if (data && data[IFLA_MACVLAN_MODE])
- vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+
if (data && data[IFLA_MACVLAN_FLAGS]) {
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
-
- if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
- dev_set_promiscuity(vlan->lowerdev, -1);
- else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
- dev_set_promiscuity(vlan->lowerdev, 1);
+ if (vlan->port->passthru && promisc) {
+ int err;
+
+ if (flags & MACVLAN_FLAG_NOPROMISC)
+ err = dev_set_promiscuity(vlan->lowerdev, -1);
+ else
+ err = dev_set_promiscuity(vlan->lowerdev, 1);
+ if (err < 0)
+ return err;
+ }
vlan->flags = flags;
}
+ if (data && data[IFLA_MACVLAN_MODE])
+ vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
return 0;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e46fef38685b..bff7e0b0b4e7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1111,8 +1111,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
}
port->index = -1;
- team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list);
+ team_port_enable(team, port);
__team_compute_features(team);
__team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
__team_options_change_check(team);
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
index 5ca14d463ba7..7f032e211343 100644
--- a/drivers/net/team/team_mode_random.c
+++ b/drivers/net/team/team_mode_random.c
@@ -28,6 +28,8 @@ static bool rnd_transmit(struct team *team, struct sk_buff *skb)
port_index = random_N(team->en_port_count);
port = team_get_port_by_index_rcu(team, port_index);
+ if (unlikely(!port))
+ goto drop;
port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 90311c7375fb..53665850b59e 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -33,6 +33,8 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port_index = team_num_to_port_index(team,
rr_priv(team)->sent_packets++);
port = team_get_port_by_index_rcu(team, port_index);
+ if (unlikely(!port))
+ goto drop;
port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a34427065460..cea2fe4e9812 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -352,7 +352,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
u32 numqueues = 0;
rcu_read_lock();
- numqueues = tun->numqueues;
+ numqueues = ACCESS_ONCE(tun->numqueues);
txq = skb_get_rxhash(skb);
if (txq) {
@@ -2157,6 +2157,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
INIT_LIST_HEAD(&tfile->next);
+ sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
+
return 0;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 078795fe6e31..04ee044dde51 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -627,6 +627,12 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
+/* Huawei E1820 - handled by qmi_wwan */
+{
+ USB_DEVICE_INTERFACE_NUMBER(HUAWEI_VENDOR_ID, 0x14ac, 1),
+ .driver_info = 0,
+},
+
/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 86adfa0a912e..d095d0d3056b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -519,6 +519,7 @@ static const struct usb_device_id products[] = {
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8111565c35fc..f6dce13c8f89 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -603,18 +603,22 @@ skip:
/* Watch incoming packets to learn mapping between Ethernet address
* and Tunnel endpoint.
+ * Return true if packet is bogus and should be droppped.
*/
-static void vxlan_snoop(struct net_device *dev,
+static bool vxlan_snoop(struct net_device *dev,
__be32 src_ip, const u8 *src_mac)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
- int err;
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
if (likely(f->remote.remote_ip == src_ip))
- return;
+ return false;
+
+ /* Don't migrate static entries, drop packets */
+ if (f->state & NUD_NOARP)
+ return true;
if (net_ratelimit())
netdev_info(dev,
@@ -626,14 +630,19 @@ static void vxlan_snoop(struct net_device *dev,
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, src_mac, src_ip,
- NUD_REACHABLE,
- NLM_F_EXCL|NLM_F_CREATE,
- vxlan->dst_port,
- vxlan->default_dst.remote_vni,
- 0, NTF_SELF);
+
+ /* close off race between vxlan_flush and incoming packets */
+ if (netif_running(dev))
+ vxlan_fdb_create(vxlan, src_mac, src_ip,
+ NUD_REACHABLE,
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan->dst_port,
+ vxlan->default_dst.remote_vni,
+ 0, NTF_SELF);
spin_unlock(&vxlan->hash_lock);
}
+
+ return false;
}
@@ -766,8 +775,9 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
vxlan->dev->dev_addr) == 0)
goto drop;
- if (vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+ if ((vxlan->flags & VXLAN_F_LEARN) &&
+ vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
+ goto drop;
__skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb);
@@ -1190,9 +1200,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *skb1;
skb1 = skb_clone(skb, GFP_ATOMIC);
- rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
- if (rc == NETDEV_TX_OK)
- rc = rc1;
+ if (skb1) {
+ rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+ if (rc == NETDEV_TX_OK)
+ rc = rc1;
+ }
}
rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 3b078515b422..760ab3fe09e2 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -84,13 +84,17 @@ config ATH9K_DFS_CERTIFIED
developed. At this point enabling this option won't do anything
except increase code size.
-config ATH9K_RATE_CONTROL
+config ATH9K_LEGACY_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
- default y
+ default n
---help---
Say Y, if you want to use the ath9k specific rate control
- module instead of minstrel_ht.
+ module instead of minstrel_ht. Be warned that there are various
+ issues with the ath9k RC and minstrel is a more robust algorithm.
+ Note that even if this option is selected, "ath9k_rate_control"
+ has to be passed to mac80211 using the module parameter,
+ ieee80211_default_rc_algo.
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 2ad8f9474ba1..75ee9e7704ce 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,7 @@ ath9k-y += beacon.o \
antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
-ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
+ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index db5ffada2217..7546b9a7dcbf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -958,11 +958,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
- {0x0000a080, 0x1a1a1a1a},
- {0x0000a084, 0x1a1a1a1a},
- {0x0000a088, 0x1a1a1a1a},
- {0x0000a08c, 0x1a1a1a1a},
- {0x0000a090, 0x171a1a1a},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
{0x0000a094, 0x11111717},
{0x0000a098, 0x00030311},
{0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index daba841808cf..389ee1b59976 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -792,8 +792,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->iface_combinations = if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
- if (AR_SREV_5416(sc->sc_ah))
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
@@ -831,10 +830,6 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
sc->ant_rx = hw->wiphy->available_antennas_rx;
sc->ant_tx = hw->wiphy->available_antennas_tx;
-#ifdef CONFIG_ATH9K_RATE_CONTROL
- hw->rate_control_algorithm = "ath9k_rate_control";
-#endif
-
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&sc->sbands[IEEE80211_BAND_2GHZ];
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 267dbfcfaa96..b9a87383cb43 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -231,7 +231,7 @@ static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
}
#endif
-#ifdef CONFIG_ATH9K_RATE_CONTROL
+#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
int ath_rate_control_register(void);
void ath_rate_control_unregister(void);
#else
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 6dd07e2ec595..a95b77ab360e 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2458,7 +2458,7 @@ static void b43_request_firmware(struct work_struct *work)
for (i = 0; i < B43_NR_FWTYPES; i++) {
errmsg = ctx->errors[i];
if (strlen(errmsg))
- b43err(dev->wl, errmsg);
+ b43err(dev->wl, "%s", errmsg);
}
b43_print_fw_helptext(dev->wl, 1);
goto out;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index b98f2235978e..2c593570497c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -930,6 +930,10 @@ fail:
brcmf_fws_del_interface(ifp);
brcmf_fws_deinit(drvr);
}
+ if (drvr->iflist[0]) {
+ free_netdev(ifp->ndev);
+ drvr->iflist[0] = NULL;
+ }
if (p2p_ifp) {
free_netdev(p2p_ifp->ndev);
drvr->iflist[1] = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 28e7aeedd184..9fd6f2fef11b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,21 +3074,8 @@ static void brcms_b_antsel_set(struct brcms_hardware *wlc_hw, u32 antsel_avail)
*/
static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
{
- /* disallow PS when one of the following global conditions meets */
- if (!wlc->pub->associated)
- return false;
-
- /* disallow PS when one of these meets when not scanning */
- if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
- return false;
-
- if (wlc->bsscfg->type == BRCMS_TYPE_AP)
- return false;
-
- if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
- return false;
-
- return true;
+ /* not supporting PS so always return false for now */
+ return false;
}
static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index c9f197d9ca1e..fe31590a51b2 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,6 +816,7 @@ out:
rs_sta->last_txrate_idx = idx;
info->control.rates[0].idx = rs_sta->last_txrate_idx;
}
+ info->control.rates[0].count = 1;
D_RATE("leave: %d\n", idx);
}
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 1fc0b227e120..ed3c42a63a43 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
-
+ info->control.rates[0].count = 1;
}
static void *
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index f8246f2d88f9..4caaf52986a4 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -1832,16 +1832,16 @@ u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
u32 beacon_interval);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops il_pm_ops;
#define IL_LEGACY_PM_OPS (&il_pm_ops)
-#else /* !CONFIG_PM */
+#else /* !CONFIG_PM_SLEEP */
#define IL_LEGACY_PM_OPS NULL
-#endif /* !CONFIG_PM */
+#endif /* !CONFIG_PM_SLEEP */
/*****************************************************
* Error Handling Debugging
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 94314a8e1029..8fe76dc4f373 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
-
+ info->control.rates[0].count = 1;
}
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 707446fa00bd..cd1ad0019185 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
- if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
+ if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
return;
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 4f886133639a..2f690e578b5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,10 +1000,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
*/
if (load_module) {
err = request_module("%s", op->name);
+#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
if (err)
IWL_ERR(drv,
"failed to load module %s (error %d), is dynamic loading enabled?\n",
op->name, err);
+#endif
}
return;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index d6beec765d65..31587a318f8b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2652,6 +2652,7 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
info->control.rates[0].flags = 0;
}
info->control.rates[0].idx = rate_idx;
+ info->control.rates[0].count = 1;
}
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index a830eb6cc411..b9ba4e71ea4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,7 +180,8 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
return;
} else if (ieee80211_is_back_req(fc)) {
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ tx_cmd->tx_flags |=
+ cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
}
/* HT rate doesn't make sense for a non data frame */
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 753b5682d53f..a5f9875cfd6e 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -26,10 +26,17 @@
static struct dentry *mwifiex_dfs_dir;
static char *bss_modes[] = {
- "Unknown",
- "Ad-hoc",
- "Managed",
- "Auto"
+ "UNSPECIFIED",
+ "ADHOC",
+ "STATION",
+ "AP",
+ "AP_VLAN",
+ "WDS",
+ "MONITOR",
+ "MESH_POINT",
+ "P2P_CLIENT",
+ "P2P_GO",
+ "P2P_DEVICE",
};
/* size/addr for mwifiex_debug_info */
@@ -200,7 +207,12 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
p += sprintf(p, "driver_version = %s", fmt);
p += sprintf(p, "\nverext = %s", priv->version_str);
p += sprintf(p, "\ninterface_name=\"%s\"\n", netdev->name);
- p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
+
+ if (info.bss_mode >= ARRAY_SIZE(bss_modes))
+ p += sprintf(p, "bss_mode=\"%d\"\n", info.bss_mode);
+ else
+ p += sprintf(p, "bss_mode=\"%s\"\n", bss_modes[info.bss_mode]);
+
p += sprintf(p, "media_state=\"%s\"\n",
(!priv->media_connected ? "Disconnected" : "Connected"));
p += sprintf(p, "mac_address=\"%pM\"\n", netdev->dev_addr);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index ead3a3e746a2..3aa30ddcbfea 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,19 +3027,26 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
* TODO: we do not use +6 dBm option to do not increase power beyond
* regulatory limit, however this could be utilized for devices with
* CAPABILITY_POWER_LIMIT.
+ *
+ * TODO: add different temperature compensation code for RT3290 & RT5390
+ * to allow to use BBP_R1 for those chips.
*/
- rt2800_bbp_read(rt2x00dev, 1, &r1);
- if (delta <= -12) {
- power_ctrl = 2;
- delta += 12;
- } else if (delta <= -6) {
- power_ctrl = 1;
- delta += 6;
- } else {
- power_ctrl = 0;
+ if (!rt2x00_rt(rt2x00dev, RT3290) &&
+ !rt2x00_rt(rt2x00dev, RT5390)) {
+ rt2800_bbp_read(rt2x00dev, 1, &r1);
+ if (delta <= -12) {
+ power_ctrl = 2;
+ delta += 12;
+ } else if (delta <= -6) {
+ power_ctrl = 1;
+ delta += 6;
+ } else {
+ power_ctrl = 0;
+ }
+ rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
+ rt2800_bbp_write(rt2x00dev, 1, r1);
}
- rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
- rt2800_bbp_write(rt2x00dev, 1, r1);
+
offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 999ffc12578b..c97e9d327331 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -764,6 +764,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
"can't alloc skb for rx\n");
goto done;
}
+ kmemleak_not_leak(new_skb);
pci_unmap_single(rtlpci->pdev,
*((dma_addr_t *) skb->cb),
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 3d0498e69c8c..189ba124a8c6 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1973,26 +1973,35 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
-void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level)
+static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u32 ratr_value = (u32) mac->basic_rates;
- u8 *mcsrate = mac->mcs;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 ratr_value;
u8 ratr_index = 0;
u8 nmode = mac->ht_enable;
- u8 mimo_ps = 1;
- u16 shortgi_rate = 0;
- u32 tmp_ratr_value = 0;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u16 shortgi_rate;
+ u32 tmp_ratr_value;
u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = mac->sgi_40;
- u8 curshortgi_20mhz = mac->sgi_20;
+ u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
enum wireless_mode wirelessmode = mac->mode;
- ratr_value |= ((*(u16 *) (mcsrate))) << 12;
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->supp_rates[1] << 4;
+ else
+ ratr_value = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_value = 0xfff;
+
+ ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
if (ratr_value & 0x0000000c)
@@ -2006,7 +2015,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
nmode = 1;
- if (mimo_ps == 0) {
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
ratr_value &= 0x0007F005;
} else {
u32 ratr_mask;
@@ -2016,8 +2025,7 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
ratr_mask = 0x000ff005;
else
ratr_mask = 0x0f0ff005;
- if (curtxbw_40mhz)
- ratr_mask |= 0x00000010;
+
ratr_value &= ratr_mask;
}
break;
@@ -2026,41 +2034,74 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
ratr_value &= 0x000ff0ff;
else
ratr_value &= 0x0f0ff0ff;
+
break;
}
+
ratr_value &= 0x0FFFFFFF;
- if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
- (!curtxbw_40mhz && curshortgi_20mhz))) {
+
+ if (nmode && ((curtxbw_40mhz &&
+ curshortgi_40mhz) || (!curtxbw_40mhz &&
+ curshortgi_20mhz))) {
+
ratr_value |= 0x10000000;
tmp_ratr_value = (ratr_value >> 12);
+
for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
if ((1 << shortgi_rate) & tmp_ratr_value)
break;
}
+
shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
- (shortgi_rate << 4) | (shortgi_rate);
+ (shortgi_rate << 4) | (shortgi_rate);
}
+
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
-void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
+static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u32 ratr_bitmap = (u32) mac->basic_rates;
- u8 *p_mcsrate = mac->mcs;
- u8 ratr_index = 0;
- u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = mac->sgi_40;
- u8 curshortgi_20mhz = mac->sgi_20;
- enum wireless_mode wirelessmode = mac->mode;
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_sta_info *sta_entry = NULL;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 curtxbw_40mhz = (sta->bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
+ u8 curshortgi_40mhz = curtxbw_40mhz &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
+ 1 : 0;
+ u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
+ 1 : 0;
+ enum wireless_mode wirelessmode = 0;
bool shortgi = false;
u8 rate_mask[5];
u8 macid = 0;
- u8 mimops = 1;
-
- ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+
+ sta_entry = (struct rtl_sta_info *) sta->drv_priv;
+ wirelessmode = sta_entry->wireless_mode;
+ if (mac->opmode == NL80211_IFTYPE_STATION ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_bitmap = sta->supp_rates[1] << 4;
+ else
+ ratr_bitmap = sta->supp_rates[0];
+ if (mac->opmode == NL80211_IFTYPE_ADHOC)
+ ratr_bitmap = 0xfff;
+ ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
+ sta->ht_cap.mcs.rx_mask[0] << 12);
switch (wirelessmode) {
case WIRELESS_MODE_B:
ratr_index = RATR_INX_WIRELESS_B;
@@ -2071,6 +2112,7 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
break;
case WIRELESS_MODE_G:
ratr_index = RATR_INX_WIRELESS_GB;
+
if (rssi_level == 1)
ratr_bitmap &= 0x00000f00;
else if (rssi_level == 2)
@@ -2085,7 +2127,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
case WIRELESS_MODE_N_24G:
case WIRELESS_MODE_N_5G:
ratr_index = RATR_INX_WIRELESS_NGB;
- if (mimops == 0) {
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
if (rssi_level == 1)
ratr_bitmap &= 0x00070000;
else if (rssi_level == 2)
@@ -2128,8 +2171,10 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
}
}
}
+
if ((curtxbw_40mhz && curshortgi_40mhz) ||
(!curtxbw_40mhz && curshortgi_20mhz)) {
+
if (macid == 0)
shortgi = true;
else if (macid == 1)
@@ -2138,21 +2183,42 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
break;
default:
ratr_index = RATR_INX_WIRELESS_NGB;
+
if (rtlphy->rf_type == RF_1T2R)
ratr_bitmap &= 0x000ff0ff;
else
ratr_bitmap &= 0x0f0ff0ff;
break;
}
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "ratr_bitmap :%x\n",
- ratr_bitmap);
- *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) |
- ratr_index << 28);
+ sta_entry->ratr_index = ratr_index;
+
+ RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
+ *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
+ (ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
"Rate_index:%x, ratr_val:%x, %5phC\n",
ratr_index, ratr_bitmap, rate_mask);
- rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
+ memcpy(rtlpriv->rate_mask, rate_mask, 5);
+ /* rtl92c_fill_h2c_cmd() does USB I/O and will result in a
+ * "scheduled while atomic" if called directly */
+ schedule_work(&rtlpriv->works.fill_h2c_cmd);
+
+ if (macid != 0)
+ sta_entry->ratr_index = ratr_index;
+}
+
+void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm.useramask)
+ rtl92cu_update_hal_rate_mask(hw, sta, rssi_level);
+ else
+ rtl92cu_update_hal_rate_table(hw, sta);
}
void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index f41a3aa4a26f..8e3ec1e25644 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -98,10 +98,6 @@ void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level);
-void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw);
bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 85b6bdb163c0..da4f587199ee 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -289,14 +289,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
macaddr = cam_const_broad;
entry_id = key_index;
} else {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT) {
+ entry_id = rtl_cam_get_free_entry(hw,
+ p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ RT_TRACE(rtlpriv, COMP_SEC,
+ DBG_EMERG,
+ "Can not find free hw security cam entry\n");
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+
key_index = PAIRWISE_KEYIDX;
- entry_id = CAM_PAIRWISE_KEY_POSITION;
is_pairwise = true;
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
"delete one entry\n");
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_MESH_POINT)
+ rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 938b1e670b93..826f085c29dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -106,8 +106,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.update_interrupt_mask = rtl92cu_update_interrupt_mask,
.get_hw_reg = rtl92cu_get_hw_reg,
.set_hw_reg = rtl92cu_set_hw_reg,
- .update_rate_tbl = rtl92cu_update_hal_rate_table,
- .update_rate_mask = rtl92cu_update_hal_rate_mask,
+ .update_rate_tbl = rtl92cu_update_hal_rate_tbl,
.fill_tx_desc = rtl92cu_tx_fill_desc,
.fill_fake_txdesc = rtl92cu_fill_fake_txdesc,
.fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc,
@@ -137,6 +136,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
.phy_lc_calibrate = _rtl92cu_phy_lc_calibrate,
.phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
.dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
+ .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
};
static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index a1310abd0d54..262e1e4c6e5b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,5 +49,8 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr, u32 bitmask);
void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 76732b0cd221..a3532e077871 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -824,6 +824,7 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
/* should after adapter start and interrupt enable. */
set_hal_stop(rtlhal);
+ cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
/* Enable software */
SET_USB_STOP(rtlusb);
rtl_usb_deinit(hw);
@@ -1026,6 +1027,16 @@ static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw,
return false;
}
+static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, fill_h2c_cmd);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask);
+}
+
static struct rtl_intf_ops rtl_usb_ops = {
.adapter_start = rtl_usb_start,
.adapter_stop = rtl_usb_stop,
@@ -1057,6 +1068,8 @@ int rtl_usb_probe(struct usb_interface *intf,
/* this spin lock must be initialized early */
spin_lock_init(&rtlpriv->locks.usb_lock);
+ INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
+ rtl_fill_h2c_cmd_work_callback);
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 44328baa6389..cc03e7c87cbe 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1736,6 +1736,8 @@ struct rtl_hal_ops {
void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
bool mstate);
void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
+ void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
};
struct rtl_intf_ops {
@@ -1869,6 +1871,7 @@ struct rtl_works {
struct delayed_work fwevt_wq;
struct work_struct lps_change_work;
+ struct work_struct fill_h2c_cmd;
};
struct rtl_debug {
@@ -2048,6 +2051,7 @@ struct rtl_priv {
};
};
bool enter_ps; /* true when entering PS */
+ u8 rate_mask[5];
/*This must be the last item so
that it points to the data allocated
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index affdb3ec6225..4a0bbb13806b 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -310,7 +310,7 @@ static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
memcpy(cmd->channels_2, cmd_channels->channels_2,
sizeof(cmd->channels_2));
memcpy(cmd->channels_5, cmd_channels->channels_5,
- sizeof(cmd->channels_2));
+ sizeof(cmd->channels_5));
/* channels_4 are not supported, so no need to copy them */
}
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 222d03540200..9e5484a73667 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -36,12 +36,12 @@
#define WL127X_IFTYPE_SR_VER 3
#define WL127X_MAJOR_SR_VER 10
#define WL127X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
-#define WL127X_MINOR_SR_VER 115
+#define WL127X_MINOR_SR_VER 133
/* minimum multi-role FW version for wl127x */
#define WL127X_IFTYPE_MR_VER 5
#define WL127X_MAJOR_MR_VER 7
#define WL127X_SUBTYPE_MR_VER WLCORE_FW_VER_IGNORE
-#define WL127X_MINOR_MR_VER 115
+#define WL127X_MINOR_MR_VER 42
/* FW chip version for wl128x */
#define WL128X_CHIP_VER 7
@@ -49,7 +49,7 @@
#define WL128X_IFTYPE_SR_VER 3
#define WL128X_MAJOR_SR_VER 10
#define WL128X_SUBTYPE_SR_VER WLCORE_FW_VER_IGNORE
-#define WL128X_MINOR_SR_VER 115
+#define WL128X_MINOR_SR_VER 133
/* minimum multi-role FW version for wl128x */
#define WL128X_IFTYPE_MR_VER 5
#define WL128X_MAJOR_MR_VER 7
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 09d944505ac0..2b642f8c9266 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -34,7 +34,7 @@ static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
memcpy(cmd->channels_2, cmd_channels->channels_2,
sizeof(cmd->channels_2));
memcpy(cmd->channels_5, cmd_channels->channels_5,
- sizeof(cmd->channels_2));
+ sizeof(cmd->channels_5));
/* channels_4 are not supported, so no need to copy them */
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 82576fffb452..a0b50ad2ef31 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -778,12 +778,13 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
- if (ret && list_empty(&vif->notify_list))
- list_add_tail(&vif->notify_list, &notify);
xenvif_notify_tx_completion(vif);
- xenvif_put(vif);
+ if (ret && list_empty(&vif->notify_list))
+ list_add_tail(&vif->notify_list, &notify);
+ else
+ xenvif_put(vif);
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
@@ -791,6 +792,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
notify_remote_via_irq(vif->rx_irq);
list_del_init(&vif->notify_list);
+ xenvif_put(vif);
}
/* More work to do? */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f53b992f060a..a6f584a7f4a1 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -192,14 +192,15 @@ EXPORT_SYMBOL(of_find_property);
struct device_node *of_find_all_nodes(struct device_node *prev)
{
struct device_node *np;
+ unsigned long flags;
- raw_spin_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
np = prev ? prev->allnext : of_allnodes;
for (; np != NULL; np = np->allnext)
if (of_node_get(np))
break;
of_node_put(prev);
- raw_spin_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_all_nodes);
@@ -421,8 +422,9 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
struct device_node *prev)
{
struct device_node *next;
+ unsigned long flags;
- raw_spin_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
next = prev ? prev->sibling : node->child;
for (; next; next = next->sibling) {
if (!__of_device_is_available(next))
@@ -431,7 +433,7 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
break;
}
of_node_put(prev);
- raw_spin_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return next;
}
EXPORT_SYMBOL(of_get_next_available_child);
@@ -735,13 +737,14 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np;
+ unsigned long flags;
- raw_spin_lock(&devtree_lock);
+ raw_spin_lock_irqsave(&devtree_lock, flags);
for (np = of_allnodes; np; np = np->allnext)
if (np->phandle == handle)
break;
of_node_get(np);
- raw_spin_unlock(&devtree_lock);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 791a6719d8a9..8cd90e7e945a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -2357,27 +2357,48 @@ static const unsigned int sdhi3_wp_mux[] = {
};
/* - USB0 ------------------------------------------------------------------- */
static const unsigned int usb0_pins[] = {
- /* OVC */
- 150, 154,
+ /* PENC */
+ 154,
};
static const unsigned int usb0_mux[] = {
- USB_OVC0_MARK, USB_PENC0_MARK,
+ USB_PENC0_MARK,
+};
+static const unsigned int usb0_ovc_pins[] = {
+ /* USB_OVC */
+ 150
+};
+static const unsigned int usb0_ovc_mux[] = {
+ USB_OVC0_MARK,
};
/* - USB1 ------------------------------------------------------------------- */
static const unsigned int usb1_pins[] = {
- /* OVC */
- 152, 155,
+ /* PENC */
+ 155,
};
static const unsigned int usb1_mux[] = {
- USB_OVC1_MARK, USB_PENC1_MARK,
+ USB_PENC1_MARK,
+};
+static const unsigned int usb1_ovc_pins[] = {
+ /* USB_OVC */
+ 152,
+};
+static const unsigned int usb1_ovc_mux[] = {
+ USB_OVC1_MARK,
};
/* - USB2 ------------------------------------------------------------------- */
static const unsigned int usb2_pins[] = {
- /* OVC, PENC */
- 125, 156,
+ /* PENC */
+ 156,
};
static const unsigned int usb2_mux[] = {
- USB_OVC2_MARK, USB_PENC2_MARK,
+ USB_PENC2_MARK,
+};
+static const unsigned int usb2_ovc_pins[] = {
+ /* USB_OVC */
+ 125,
+};
+static const unsigned int usb2_ovc_mux[] = {
+ USB_OVC2_MARK,
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
@@ -2501,8 +2522,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi3_cd),
SH_PFC_PIN_GROUP(sdhi3_wp),
SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb0_ovc),
SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb1_ovc),
SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(usb2_ovc),
};
static const char * const du0_groups[] = {
@@ -2683,14 +2707,17 @@ static const char * const sdhi3_groups[] = {
static const char * const usb0_groups[] = {
"usb0",
+ "usb0_ovc",
};
static const char * const usb1_groups[] = {
"usb1",
+ "usb1_ovc",
};
static const char * const usb2_groups[] = {
"usb2",
+ "usb2_ovc",
};
static const struct sh_pfc_function pinmux_functions[] = {
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 8df0c5a21be2..d111c8687f9b 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -703,7 +703,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
}
rfkill_init_sw_state(gps_rfkill,
hp_wmi_get_sw_state(HPWMI_GPS));
- rfkill_set_hw_state(bluetooth_rfkill,
+ rfkill_set_hw_state(gps_rfkill,
hp_wmi_get_hw_state(HPWMI_GPS));
err = rfkill_register(gps_rfkill);
if (err)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 0eab77b22340..f296f3f7db9b 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -25,6 +25,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/ioctl.h>
#include <linux/completion.h>
#include <linux/io.h>
@@ -42,10 +43,65 @@
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
+struct at91_rtc_config {
+ bool use_shadow_imr;
+};
+
+static const struct at91_rtc_config *at91_rtc_config;
static DECLARE_COMPLETION(at91_rtc_updated);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
+static DEFINE_SPINLOCK(at91_rtc_lock);
+static u32 at91_rtc_shadow_imr;
+
+static void at91_rtc_write_ier(u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&at91_rtc_lock, flags);
+ at91_rtc_shadow_imr |= mask;
+ at91_rtc_write(AT91_RTC_IER, mask);
+ spin_unlock_irqrestore(&at91_rtc_lock, flags);
+}
+
+static void at91_rtc_write_idr(u32 mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&at91_rtc_lock, flags);
+ at91_rtc_write(AT91_RTC_IDR, mask);
+ /*
+ * Register read back (of any RTC-register) needed to make sure
+ * IDR-register write has reached the peripheral before updating
+ * shadow mask.
+ *
+ * Note that there is still a possibility that the mask is updated
+ * before interrupts have actually been disabled in hardware. The only
+ * way to be certain would be to poll the IMR-register, which is is
+ * the very register we are trying to emulate. The register read back
+ * is a reasonable heuristic.
+ */
+ at91_rtc_read(AT91_RTC_SR);
+ at91_rtc_shadow_imr &= ~mask;
+ spin_unlock_irqrestore(&at91_rtc_lock, flags);
+}
+
+static u32 at91_rtc_read_imr(void)
+{
+ unsigned long flags;
+ u32 mask;
+
+ if (at91_rtc_config->use_shadow_imr) {
+ spin_lock_irqsave(&at91_rtc_lock, flags);
+ mask = at91_rtc_shadow_imr;
+ spin_unlock_irqrestore(&at91_rtc_lock, flags);
+ } else {
+ mask = at91_rtc_read(AT91_RTC_IMR);
+ }
+
+ return mask;
+}
/*
* Decode time/date into rtc_time structure
@@ -110,9 +166,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
cr = at91_rtc_read(AT91_RTC_CR);
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
+ at91_rtc_write_ier(AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_write_idr(AT91_RTC_ACKUPD);
at91_rtc_write(AT91_RTC_TIMR,
bin2bcd(tm->tm_sec) << 0
@@ -144,7 +200,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+ alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM)
? 1 : 0;
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -169,7 +225,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write_idr(AT91_RTC_ALARM);
at91_rtc_write(AT91_RTC_TIMALR,
bin2bcd(tm.tm_sec) << 0
| bin2bcd(tm.tm_min) << 8
@@ -182,7 +238,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (alrm->enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write_ier(AT91_RTC_ALARM);
}
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -198,9 +254,9 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write_ier(AT91_RTC_ALARM);
} else
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write_idr(AT91_RTC_ALARM);
return 0;
}
@@ -209,7 +265,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
+ unsigned long imr = at91_rtc_read_imr();
seq_printf(seq, "update_IRQ\t: %s\n",
(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -229,7 +285,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr();
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
@@ -250,6 +306,43 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
return IRQ_NONE; /* not handled */
}
+static const struct at91_rtc_config at91rm9200_config = {
+};
+
+static const struct at91_rtc_config at91sam9x5_config = {
+ .use_shadow_imr = true,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id at91_rtc_dt_ids[] = {
+ {
+ .compatible = "atmel,at91rm9200-rtc",
+ .data = &at91rm9200_config,
+ }, {
+ .compatible = "atmel,at91sam9x5-rtc",
+ .data = &at91sam9x5_config,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
+#endif
+
+static const struct at91_rtc_config *
+at91_rtc_get_config(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+
+ if (pdev->dev.of_node) {
+ match = of_match_node(at91_rtc_dt_ids, pdev->dev.of_node);
+ if (!match)
+ return NULL;
+ return (const struct at91_rtc_config *)match->data;
+ }
+
+ return &at91rm9200_config;
+}
+
static const struct rtc_class_ops at91_rtc_ops = {
.read_time = at91_rtc_readtime,
.set_time = at91_rtc_settime,
@@ -268,6 +361,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
struct resource *regs;
int ret = 0;
+ at91_rtc_config = at91_rtc_get_config(pdev);
+ if (!at91_rtc_config)
+ return -ENODEV;
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(&pdev->dev, "no mmio resource defined\n");
@@ -290,7 +387,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
/* Disable all interrupts */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
@@ -335,7 +432,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
/* Disable all interrupts */
- at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
free_irq(irq, pdev);
@@ -358,13 +455,13 @@ static int at91_rtc_suspend(struct device *dev)
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
+ at91_rtc_imr = at91_rtc_read_imr()
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
enable_irq_wake(irq);
else
- at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
+ at91_rtc_write_idr(at91_rtc_imr);
}
return 0;
}
@@ -375,7 +472,7 @@ static int at91_rtc_resume(struct device *dev)
if (device_may_wakeup(dev))
disable_irq_wake(irq);
else
- at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
+ at91_rtc_write_ier(at91_rtc_imr);
}
return 0;
}
@@ -383,12 +480,6 @@ static int at91_rtc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
-static const struct of_device_id at91_rtc_dt_ids[] = {
- { .compatible = "atmel,at91rm9200-rtc" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, at91_rtc_dt_ids);
-
static struct platform_driver at91_rtc_driver = {
.remove = __exit_p(at91_rtc_remove),
.driver = {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index cc5bea9c4b1c..f1cb706445c7 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -854,6 +854,9 @@ static int cmos_resume(struct device *dev)
}
spin_lock_irq(&rtc_lock);
+ if (device_may_wakeup(dev))
+ hpet_rtc_timer_init();
+
do {
CMOS_WRITE(tmp, RTC_CONTROL);
hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
@@ -869,7 +872,6 @@ static int cmos_resume(struct device *dev)
rtc_update_irq(cmos->rtc, 1, mask);
tmp &= ~RTC_AIE;
hpet_mask_rtc_irq_bit(RTC_AIE);
- hpet_rtc_timer_init();
} while (mask & RTC_AIE);
spin_unlock_irq(&rtc_lock);
}
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 459c2ffc95a6..426901cef14f 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -273,6 +273,8 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
return ret;
}
+ device_init_wakeup(&pdev->dev, 1);
+
platform_set_drvdata(pdev, rtc);
rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev),
&tps6586x_rtc_ops, THIS_MODULE);
@@ -292,7 +294,6 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
goto fail_rtc_register;
}
disable_irq(rtc->irq);
- device_set_wakeup_capable(&pdev->dev, 1);
return 0;
fail_rtc_register:
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 8751a5240c99..b2eab34f38d9 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,7 @@ static int twl_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rtc);
+ device_init_wakeup(&pdev->dev, 1);
return 0;
out2:
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 4ffa66c87ea5..9ca3996f65b2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2040,6 +2040,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
netiucv_setup_netdevice);
if (!dev)
return NULL;
+ rtnl_lock();
if (dev_alloc_name(dev, dev->name) < 0)
goto out_netdev;
@@ -2061,6 +2062,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
out_fsm:
kfree_fsm(privptr->fsm);
out_netdev:
+ rtnl_unlock();
free_netdev(dev);
return NULL;
}
@@ -2100,6 +2102,7 @@ static ssize_t conn_write(struct device_driver *drv,
rc = netiucv_register_device(dev);
if (rc) {
+ rtnl_unlock();
IUCV_DBF_TEXT_(setup, 2,
"ret %d from netiucv_register_device\n", rc);
goto out_free_ndev;
@@ -2109,7 +2112,8 @@ static ssize_t conn_write(struct device_driver *drv,
priv = netdev_priv(dev);
SET_NETDEV_DEV(dev, priv->dev);
- rc = register_netdev(dev);
+ rc = register_netdevice(dev);
+ rtnl_unlock();
if (rc)
goto out_unreg;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 439c012be763..b63d534192e3 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
file->f_pos += offset;
break;
case 2:
- file->f_pos = debug->buffer_len - offset;
+ file->f_pos = debug->buffer_len + offset;
break;
default:
return -EINVAL;
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index adc1f7f471f5..85e1ffd0e5c5 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
pos = file->f_pos + offset;
break;
case 2:
- pos = fnic_dbg_prt->buffer_len - offset;
+ pos = fnic_dbg_prt->buffer_len + offset;
}
return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
-EINVAL : (file->f_pos = pos);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index f63f5ff7f274..f525ecb7a9c6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
pos = file->f_pos + off;
break;
case 2:
- pos = debug->len - off;
+ pos = debug->len + off;
}
return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
}
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 60cfae51c713..eab593eaaafa 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -89,7 +89,7 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
if ((mask & hspi_read(hspi, SPSR)) == val)
return 0;
- msleep(20);
+ udelay(10);
}
dev_err(hspi->dev, "timeout\n");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 35f60bd252dd..637d728fbeb5 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1487,7 +1487,7 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
return 0;
err_spi_register_master:
- free_irq(board_dat->pdev->irq, board_dat);
+ free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
err_spi_get_resources:
@@ -1667,6 +1667,7 @@ static int pch_spi_probe(struct pci_dev *pdev,
pd_dev = platform_device_alloc("pch-spi", i);
if (!pd_dev) {
dev_err(&pdev->dev, "platform_device_alloc failed\n");
+ retval = -ENOMEM;
goto err_platform_device;
}
pd_dev_save->pd_save[i] = pd_dev;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index e1d769607425..34d18dcfa0db 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -267,7 +267,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
u32 ipif_ier;
- u16 cr;
/* We get here with transmitter inhibited */
@@ -276,7 +275,6 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->remaining_bytes = t->len;
INIT_COMPLETION(xspi->done);
- xilinx_spi_fill_tx_fifo(xspi);
/* Enable the transmit empty interrupt, which we use to determine
* progress on the transmission.
@@ -285,12 +283,41 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
xspi->regs + XIPIF_V123B_IIER_OFFSET);
- /* Start the transfer by not inhibiting the transmitter any longer */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
- ~XSPI_CR_TRANS_INHIBIT;
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ for (;;) {
+ u16 cr;
+ u8 sr;
+
+ xilinx_spi_fill_tx_fifo(xspi);
+
+ /* Start the transfer by not inhibiting the transmitter any
+ * longer
+ */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
+ ~XSPI_CR_TRANS_INHIBIT;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+ wait_for_completion(&xspi->done);
+
+ /* A transmit has just completed. Process received data and
+ * check for more data to transmit. Always inhibit the
+ * transmitter while the Isr refills the transmit register/FIFO,
+ * or make sure it is stopped if we're done.
+ */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+
+ /* Read out all the data from the Rx FIFO */
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
+ xspi->rx_fn(xspi);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ }
- wait_for_completion(&xspi->done);
+ /* See if there is more data to send */
+ if (!xspi->remaining_bytes > 0)
+ break;
+ }
/* Disable the transmit empty interrupt */
xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
@@ -314,38 +341,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
- u16 cr;
- u8 sr;
-
- /* A transmit has just completed. Process received data and
- * check for more data to transmit. Always inhibit the
- * transmitter while the Isr refills the transmit register/FIFO,
- * or make sure it is stopped if we're done.
- */
- cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
- xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
- xspi->regs + XSPI_CR_OFFSET);
-
- /* Read out all the data from the Rx FIFO */
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
- xspi->rx_fn(xspi);
- sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
- }
-
- /* See if there is more data to send */
- if (xspi->remaining_bytes > 0) {
- xilinx_spi_fill_tx_fifo(xspi);
- /* Start the transfer by not inhibiting the
- * transmitter any longer
- */
- xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
- } else {
- /* No more data to send.
- * Indicate the transfer is completed.
- */
- complete(&xspi->done);
- }
+ complete(&xspi->done);
}
return IRQ_HANDLED;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 49b098bedf9b..475c9c114689 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -276,8 +276,9 @@ static void ci_role_work(struct work_struct *work)
ci_role_stop(ci);
ci_role_start(ci, role);
- enable_irq(ci->irq);
}
+
+ enable_irq(ci->irq);
}
static irqreturn_t ci_irq(int irq, void *data)
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 519ead2443c5..b501346484ae 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1678,8 +1678,11 @@ static int udc_start(struct ci13xxx *ci)
ci->gadget.ep0 = &ci->ep0in->ep;
- if (ci->global_phy)
+ if (ci->global_phy) {
ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+ if (IS_ERR(ci->transceiver))
+ ci->transceiver = NULL;
+ }
if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
if (ci->transceiver == NULL) {
@@ -1694,7 +1697,7 @@ static int udc_start(struct ci13xxx *ci)
goto put_transceiver;
}
- if (!IS_ERR_OR_NULL(ci->transceiver)) {
+ if (ci->transceiver) {
retval = otg_set_peripheral(ci->transceiver->otg,
&ci->gadget);
if (retval)
@@ -1711,7 +1714,7 @@ static int udc_start(struct ci13xxx *ci)
return retval;
remove_trans:
- if (!IS_ERR_OR_NULL(ci->transceiver)) {
+ if (ci->transceiver) {
otg_set_peripheral(ci->transceiver->otg, NULL);
if (ci->global_phy)
usb_put_phy(ci->transceiver);
@@ -1719,7 +1722,7 @@ remove_trans:
dev_err(dev, "error = %i\n", retval);
put_transceiver:
- if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy)
+ if (ci->transceiver && ci->global_phy)
usb_put_phy(ci->transceiver);
destroy_eps:
destroy_eps(ci);
@@ -1747,7 +1750,7 @@ static void udc_stop(struct ci13xxx *ci)
dma_pool_destroy(ci->td_pool);
dma_pool_destroy(ci->qh_pool);
- if (!IS_ERR_OR_NULL(ci->transceiver)) {
+ if (ci->transceiver) {
otg_set_peripheral(ci->transceiver->otg, NULL);
if (ci->global_phy)
usb_put_phy(ci->transceiver);
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 090b411d893f..7d8dd5aad236 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -165,11 +165,12 @@ static void f81232_set_termios(struct tty_struct *tty,
/* FIXME - Stubbed out for now */
/* Don't change anything if nothing has changed */
- if (!tty_termios_hw_change(&tty->termios, old_termios))
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
/* Do the real work here... */
- tty_termios_copy_hw(&tty->termios, old_termios);
+ if (old_termios)
+ tty_termios_copy_hw(&tty->termios, old_termios);
}
static int f81232_tiocmget(struct tty_struct *tty)
@@ -187,12 +188,11 @@ static int f81232_tiocmset(struct tty_struct *tty,
static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios tmp_termios;
int result;
/* Setup termios */
if (tty)
- f81232_set_termios(tty, port, &tmp_termios);
+ f81232_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 7151659367a0..048cd44d51b1 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -284,7 +284,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
serial settings even to the same values as before. Thus
we actually need to filter in this specific case */
- if (!tty_termios_hw_change(&tty->termios, old_termios))
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
cflag = tty->termios.c_cflag;
@@ -293,7 +293,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
if (!buf) {
dev_err(&port->dev, "%s - out of memory.\n", __func__);
/* Report back no change occurred */
- tty->termios = *old_termios;
+ if (old_termios)
+ tty->termios = *old_termios;
return;
}
@@ -433,7 +434,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
control = priv->line_control;
if ((cflag & CBAUD) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
- else if ((old_termios->c_cflag & CBAUD) == B0)
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
if (control != priv->line_control) {
control = priv->line_control;
@@ -492,7 +493,6 @@ static void pl2303_close(struct usb_serial_port *port)
static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
@@ -508,7 +508,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
/* Setup termios */
if (tty)
- pl2303_set_termios(tty, port, &tmp_termios);
+ pl2303_set_termios(tty, port, NULL);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index cf3df793c2b7..ddf6c47137dc 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -291,7 +291,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
unsigned int cflag = tty->termios.c_cflag;
- unsigned int old_cflag = old_termios->c_cflag;
unsigned short uartdata;
unsigned char buf[2] = {0, 0};
int baud;
@@ -299,15 +298,15 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
u8 control;
/* check that they really want us to change something */
- if (!tty_termios_hw_change(&tty->termios, old_termios))
+ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
/* set DTR/RTS active */
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
- if ((old_cflag & CBAUD) == B0) {
+ if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
priv->line_control |= MCR_DTR;
- if (!(old_cflag & CRTSCTS))
+ if (!(old_termios->c_cflag & CRTSCTS))
priv->line_control |= MCR_RTS;
}
if (control != priv->line_control) {
@@ -394,7 +393,6 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct spcp8x5_private *priv = usb_get_serial_port_data(port);
int ret;
@@ -411,7 +409,7 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
spcp8x5_set_ctrl_line(port, priv->line_control);
if (tty)
- spcp8x5_set_termios(tty, port, &tmp_termios);
+ spcp8x5_set_termios(tty, port, NULL);
port->port.drain_delay = 256;
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index acb7121a9316..6d78736563de 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -1360,7 +1360,7 @@ static const struct file_operations vfio_device_fops = {
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
{
- if (MINOR(dev->devt) == 0)
+ if (mode && (MINOR(dev->devt) == 0))
*mode = S_IRUGO | S_IWUGO;
return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 2b51e2336aa2..f80d3dd41d8c 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -155,14 +155,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
static void vhost_net_clear_ubuf_info(struct vhost_net *n)
{
-
- bool zcopy;
int i;
- for (i = 0; i < n->dev.nvqs; ++i) {
- zcopy = vhost_net_zcopy_mask & (0x1 << i);
- if (zcopy)
- kfree(n->vqs[i].ubuf_info);
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ kfree(n->vqs[i].ubuf_info);
+ n->vqs[i].ubuf_info = NULL;
}
}
@@ -171,7 +168,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
bool zcopy;
int i;
- for (i = 0; i < n->dev.nvqs; ++i) {
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
zcopy = vhost_net_zcopy_mask & (0x1 << i);
if (!zcopy)
continue;
@@ -183,12 +180,7 @@ int vhost_net_set_ubuf_info(struct vhost_net *n)
return 0;
err:
- while (i--) {
- zcopy = vhost_net_zcopy_mask & (0x1 << i);
- if (!zcopy)
- continue;
- kfree(n->vqs[i].ubuf_info);
- }
+ vhost_net_clear_ubuf_info(n);
return -ENOMEM;
}
@@ -196,12 +188,12 @@ void vhost_net_vq_reset(struct vhost_net *n)
{
int i;
+ vhost_net_clear_ubuf_info(n);
+
for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
n->vqs[i].done_idx = 0;
n->vqs[i].upend_idx = 0;
n->vqs[i].ubufs = NULL;
- kfree(n->vqs[i].ubuf_info);
- n->vqs[i].ubuf_info = NULL;
n->vqs[i].vhost_hlen = 0;
n->vqs[i].sock_hlen = 0;
}
@@ -436,7 +428,8 @@ static void handle_tx(struct vhost_net *net)
kref_get(&ubufs->kref);
}
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
- }
+ } else
+ msg.msg_control = NULL;
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
@@ -1053,6 +1046,10 @@ static long vhost_net_set_owner(struct vhost_net *n)
int r;
mutex_lock(&n->dev.mutex);
+ if (vhost_dev_has_owner(&n->dev)) {
+ r = -EBUSY;
+ goto out;
+ }
r = vhost_net_set_ubuf_info(n);
if (r)
goto out;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index beee7f5787e6..60aa5ad09a2f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -344,13 +344,19 @@ static int vhost_attach_cgroups(struct vhost_dev *dev)
}
/* Caller should have device mutex */
+bool vhost_dev_has_owner(struct vhost_dev *dev)
+{
+ return dev->mm;
+}
+
+/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct task_struct *worker;
int err;
/* Is there an owner already? */
- if (dev->mm) {
+ if (vhost_dev_has_owner(dev)) {
err = -EBUSY;
goto err_mm;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a7ad63592987..64adcf99ff33 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -133,6 +133,7 @@ struct vhost_dev {
long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
long vhost_dev_set_owner(struct vhost_dev *dev);
+bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
struct vhost_memory *vhost_dev_reset_owner_prepare(void);
void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index cc072c66c766..0f0493c63371 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -379,10 +379,10 @@ static int xen_tmem_init(void)
#ifdef CONFIG_FRONTSWAP
if (tmem_enabled && frontswap) {
char *s = "";
- struct frontswap_ops *old_ops =
- frontswap_register_ops(&tmem_frontswap_ops);
+ struct frontswap_ops *old_ops;
tmem_frontswap_poolid = -1;
+ old_ops = frontswap_register_ops(&tmem_frontswap_ops);
if (IS_ERR(old_ops) || old_ops) {
if (IS_ERR(old_ops))
return PTR_ERR(old_ops);