summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/blacklist.c8
-rw-r--r--drivers/acpi/pci_root.c16
-rw-r--r--drivers/ata/ahci.c28
-rw-r--r--drivers/ata/libahci.c78
-rw-r--r--drivers/ata/sata_rcar.c15
-rw-r--r--drivers/base/power/domain.c42
-rw-r--r--drivers/block/zram/zram_drv.c3
-rw-r--r--drivers/char/hw_random/pseries-rng.c11
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c4
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/crypto/caam/key_gen.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c8
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c32
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c2
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c25
-rw-r--r--drivers/gpu/drm/radeon/atom.c11
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c7
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c21
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rs690.c3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.c9
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/ibmpowernv.c6
-rw-r--r--drivers/hwmon/pwm-fan.c13
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c1
-rw-r--r--drivers/input/mouse/alps.c28
-rw-r--r--drivers/input/mouse/elantech.c56
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c3
-rw-r--r--drivers/iommu/arm-smmu.c134
-rw-r--r--drivers/iommu/dmar.c532
-rw-r--r--drivers/iommu/exynos-iommu.c1
-rw-r--r--drivers/iommu/intel-iommu.c308
-rw-r--r--drivers/iommu/intel_irq_remapping.c249
-rw-r--r--drivers/iommu/iommu.c60
-rw-r--r--drivers/iommu/ipmmu-vmsa.c3
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/msm_iommu_dev.c10
-rw-r--r--drivers/iommu/omap-iommu-debug.c242
-rw-r--r--drivers/iommu/omap-iommu.c313
-rw-r--r--drivers/iommu/omap-iommu.h98
-rw-r--r--drivers/iommu/omap-iommu2.c337
-rw-r--r--drivers/iommu/rockchip-iommu.c1038
-rw-r--r--drivers/iommu/shmobile-iommu.c1
-rw-r--r--drivers/iommu/tegra-smmu.c1
-rw-r--r--drivers/md/dm-bufio.c12
-rw-r--r--drivers/md/dm-raid.c17
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-thin.c16
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c2
-rw-r--r--drivers/md/persistent-data/dm-btree.c24
-rw-r--r--drivers/mfd/max77693.c14
-rw-r--r--drivers/mfd/rtsx_pcr.c2
-rw-r--r--drivers/mfd/stmpe.h2
-rw-r--r--drivers/mfd/twl4030-power.c52
-rw-r--r--drivers/mfd/viperboard.c5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c20
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c52
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c1
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/phy.c36
-rw-r--r--drivers/net/ppp/ppp_generic.c40
-rw-r--r--drivers/net/tun.c28
-rw-r--r--drivers/net/usb/asix_devices.c14
-rw-r--r--drivers/net/vxlan.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/hp_accel.c44
-rw-r--r--drivers/power/ab8500_fg.c17
-rw-r--r--drivers/power/bq2415x_charger.c23
-rw-r--r--drivers/power/charger-manager.c164
-rw-r--r--drivers/power/power_supply_core.c3
-rw-r--r--drivers/s390/kvm/virtio_ccw.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c19
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c15
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c18
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/scsi_error.c20
-rw-r--r--drivers/thermal/imx_thermal.c45
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c8
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c1
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h1
151 files changed, 3898 insertions, 1616 deletions
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index ed122e17636e..7556e7c4a055 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -290,6 +290,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Vostro 3546",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index cd4de7e038ea..c6bcb8c719d8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/pci.h>
#include <linux/pci-acpi.h>
#include <linux/pci-aspm.h>
+#include <linux/dmar.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
acpi_handle handle = device->handle;
int no_aspm = 0, clear_aspm = 0;
+ bool hotadd = system_state != SYSTEM_BOOTING;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
+ if (hotadd && dmar_device_add(handle)) {
+ result = -ENXIO;
+ goto end;
+ }
+
pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary);
@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL;
result = -ENODEV;
- goto end;
+ goto remove_dmar;
}
if (clear_aspm) {
@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
- if (system_state != SYSTEM_BOOTING) {
+ if (hotadd) {
pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus);
}
@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_unlock_rescan_remove();
return 1;
+remove_dmar:
+ if (hotadd)
+ dmar_device_remove(handle);
end:
kfree(root);
return result;
@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_remove_root_bus(root->bus);
+ dmar_device_remove(device->handle);
+
pci_unlock_rescan_remove();
kfree(root);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5f039f191067..e45f83789809 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_nomsi] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_noncq] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
.flags = AHCI_FLAG_COMMON,
@@ -313,6 +321,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -475,10 +488,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
/*
- * Samsung SSDs found on some macbooks. NCQ times out.
- * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ * Samsung SSDs found on some macbooks. NCQ times out if MSI is
+ * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
*/
- { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -514,12 +527,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
- unsigned int force_port_map = 0;
- unsigned int mask_port_map = 0;
-
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
- force_port_map = 1;
+ hpriv->force_port_map = 1;
}
/*
@@ -529,9 +539,9 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
*/
if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
if (pdev->device == 0x6121)
- mask_port_map = 0x3;
+ hpriv->mask_port_map = 0x3;
else
- mask_port_map = 0xf;
+ hpriv->mask_port_map = 0xf;
dev_info(&pdev->dev,
"Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 5eb61c9e63da..97683e45ab04 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1778,16 +1778,15 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
}
}
-static void ahci_update_intr_status(struct ata_port *ap)
+static void ahci_port_intr(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
- struct ahci_port_priv *pp = ap->private_data;
u32 status;
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
- atomic_or(status, &pp->intr_status);
+ ahci_handle_port_interrupt(ap, port_mmio, status);
}
static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
@@ -1808,34 +1807,6 @@ static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
return IRQ_HANDLED;
}
-irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct ahci_host_priv *hpriv = host->private_data;
- u32 irq_masked = hpriv->port_map;
- unsigned int i;
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- if (!(irq_masked & (1 << i)))
- continue;
-
- ap = host->ports[i];
- if (ap) {
- ahci_port_thread_fn(irq, ap);
- VPRINTK("port %u\n", i);
- } else {
- VPRINTK("port %u (no irq)\n", i);
- if (ata_ratelimit())
- dev_warn(host->dev,
- "interrupt on disabled port %u\n", i);
- }
- }
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance)
{
struct ata_port *ap = dev_instance;
@@ -1875,6 +1846,8 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
irq_masked = irq_stat & hpriv->port_map;
+ spin_lock(&host->lock);
+
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
@@ -1883,7 +1856,7 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
ap = host->ports[i];
if (ap) {
- ahci_update_intr_status(ap);
+ ahci_port_intr(ap);
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
@@ -1906,9 +1879,11 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
+ spin_unlock(&host->lock);
+
VPRINTK("EXIT\n");
- return handled ? IRQ_WAKE_THREAD : IRQ_NONE;
+ return IRQ_RETVAL(handled);
}
unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
@@ -2320,8 +2295,13 @@ static int ahci_port_start(struct ata_port *ap)
*/
pp->intr_mask = DEF_PORT_IRQ;
- spin_lock_init(&pp->lock);
- ap->lock = &pp->lock;
+ /*
+ * Switch to per-port locking in case each port has its own MSI vector.
+ */
+ if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
+ spin_lock_init(&pp->lock);
+ ap->lock = &pp->lock;
+ }
ap->private_data = pp;
@@ -2482,31 +2462,6 @@ out_free_irqs:
return rc;
}
-static int ahci_host_activate_single_irq(struct ata_host *host, int irq,
- struct scsi_host_template *sht)
-{
- int i, rc;
-
- rc = ata_host_start(host);
- if (rc)
- return rc;
-
- rc = devm_request_threaded_irq(host->dev, irq, ahci_single_irq_intr,
- ahci_thread_fn, IRQF_SHARED,
- dev_driver_string(host->dev), host);
- if (rc)
- return rc;
-
- for (i = 0; i < host->n_ports; i++)
- ata_port_desc(host->ports[i], "irq %d", irq);
-
- rc = ata_host_register(host, sht);
- if (rc)
- devm_free_irq(host->dev, irq, host);
-
- return rc;
-}
-
/**
* ahci_host_activate - start AHCI host, request IRQs and register it
* @host: target ATA host
@@ -2532,7 +2487,8 @@ int ahci_host_activate(struct ata_host *host, int irq,
if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
rc = ahci_host_activate_multi_irqs(host, irq, sht);
else
- rc = ahci_host_activate_single_irq(host, irq, sht);
+ rc = ata_host_activate(host, irq, ahci_single_irq_intr,
+ IRQF_SHARED, sht);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 61eb6d77dac7..ea1fbc1d4c5f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -146,6 +146,7 @@
enum sata_rcar_type {
RCAR_GEN1_SATA,
RCAR_GEN2_SATA,
+ RCAR_R8A7790_ES1_SATA,
};
struct sata_rcar_priv {
@@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host)
ap->udma_mask = ATA_UDMA6;
ap->flags |= ATA_FLAG_SATA;
+ if (priv->type == RCAR_R8A7790_ES1_SATA)
+ ap->flags |= ATA_FLAG_NO_DIPM;
+
ioaddr->cmd_addr = base + SDATA_REG;
ioaddr->ctl_addr = base + SSDEVCON_REG;
ioaddr->scr_addr = base + SCRSSTS_REG;
@@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host)
sata_rcar_gen1_phy_init(priv);
break;
case RCAR_GEN2_SATA:
+ case RCAR_R8A7790_ES1_SATA:
sata_rcar_gen2_phy_init(priv);
break;
default:
@@ -838,9 +843,17 @@ static struct of_device_id sata_rcar_match[] = {
.data = (void *)RCAR_GEN2_SATA
},
{
+ .compatible = "renesas,sata-r8a7790-es1",
+ .data = (void *)RCAR_R8A7790_ES1_SATA
+ },
+ {
.compatible = "renesas,sata-r8a7791",
.data = (void *)RCAR_GEN2_SATA
},
+ {
+ .compatible = "renesas,sata-r8a7793",
+ .data = (void *)RCAR_GEN2_SATA
+ },
{ },
};
MODULE_DEVICE_TABLE(of, sata_rcar_match);
@@ -849,7 +862,9 @@ static const struct platform_device_id sata_rcar_id_table[] = {
{ "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
{ "sata-r8a7779", RCAR_GEN1_SATA },
{ "sata-r8a7790", RCAR_GEN2_SATA },
+ { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
{ "sata-r8a7791", RCAR_GEN2_SATA },
+ { "sata-r8a7793", RCAR_GEN2_SATA },
{ },
};
MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 40bc2f4072cc..fb83d4acd400 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
struct device *dev = pdd->dev;
int ret = 0;
- if (gpd_data->need_restore)
+ if (gpd_data->need_restore > 0)
return 0;
+ /*
+ * If the value of the need_restore flag is still unknown at this point,
+ * we trust that pm_genpd_poweroff() has verified that the device is
+ * already runtime PM suspended.
+ */
+ if (gpd_data->need_restore < 0) {
+ gpd_data->need_restore = 1;
+ return 0;
+ }
+
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
@@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
mutex_lock(&genpd->lock);
if (!ret)
- gpd_data->need_restore = true;
+ gpd_data->need_restore = 1;
return ret;
}
@@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- bool need_restore = gpd_data->need_restore;
+ int need_restore = gpd_data->need_restore;
- gpd_data->need_restore = false;
+ gpd_data->need_restore = 0;
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
+
+ /*
+ * Call genpd_restore_dev() for recently added devices too (need_restore
+ * is negative then).
+ */
if (need_restore)
genpd_restore_dev(genpd, dev);
@@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
bool (*stop_ok)(struct device *__dev);
int ret;
@@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
return 0;
mutex_lock(&genpd->lock);
+
+ /*
+ * If we have an unknown state of the need_restore flag, it means none
+ * of the runtime PM callbacks has been invoked yet. Let's update the
+ * flag to reflect that the current state is active.
+ */
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (gpd_data->need_restore < 0)
+ gpd_data->need_restore = 0;
+
genpd->in_progress++;
pm_genpd_poweroff(genpd);
genpd->in_progress--;
@@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
spin_unlock_irq(&dev->power.lock);
if (genpd->attach_dev)
- genpd->attach_dev(dev);
+ genpd->attach_dev(genpd, dev);
mutex_lock(&gpd_data->lock);
gpd_data->base.dev = dev;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
+ gpd_data->need_restore = -1;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1;
mutex_unlock(&gpd_data->lock);
@@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
if (genpd->detach_dev)
- genpd->detach_dev(dev);
+ genpd->detach_dev(genpd, dev);
spin_lock_irq(&dev->power.lock);
@@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
psd = dev_to_psd(dev);
if (psd && psd->domain_data)
- to_gpd_data(psd->domain_data)->need_restore = val;
+ to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
spin_unlock_irqrestore(&dev->power.lock, flags);
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 2ad0b5bce44b..3920ee45aa59 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -560,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
}
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem);
+ if (user_mem)
+ kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 6226aa08c36a..bcf86f91800a 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -25,18 +25,21 @@
#include <asm/vio.h>
-static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
+static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
+ u64 buffer[PLPAR_HCALL_BUFSIZE];
+ size_t size = max < 8 ? max : 8;
int rc;
- rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+ rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
if (rc != H_SUCCESS) {
pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
return -EIO;
}
+ memcpy(data, buffer, size);
/* The hypervisor interface returns 64 bits */
- return 8;
+ return size;
}
/**
@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
static struct hwrng pseries_rng = {
.name = KBUILD_MODNAME,
- .data_read = pseries_rng_data_read,
+ .read = pseries_rng_read,
};
static int __init pseries_rng_probe(struct vio_dev *dev,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index bfa640023e64..cf7a561fad7c 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1449,8 +1449,6 @@ static int add_port(struct ports_device *portdev, u32 id)
spin_lock_init(&port->outvq_lock);
init_waitqueue_head(&port->waitqueue);
- virtio_device_ready(portdev->vdev);
-
/* Fill the in_vq with buffers so the host can send us data. */
nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
if (!nr_added_bufs) {
@@ -2026,6 +2024,8 @@ static int virtcons_probe(struct virtio_device *vdev)
spin_lock_init(&portdev->ports_lock);
INIT_LIST_HEAD(&portdev->ports);
+ virtio_device_ready(portdev->vdev);
+
if (multiport) {
unsigned int nr_added_bufs;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 23aaf40cf37f..f657c571b18e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -166,8 +166,8 @@ try_again:
if (ret == -EPROBE_DEFER)
dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
else
- dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret,
- cpu);
+ dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
+ ret);
} else {
*cdev = cpu_dev;
*creg = cpu_reg;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 644b54e1e7d1..4473eba1d6b0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1022,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy->governor = NULL;
+ if (policy)
+ policy->governor = NULL;
return policy;
}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 871703c49d2c..e1eaf4ff9762 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
- int ret = 0;
+ int ret = -ENOMEM;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
- return -ENOMEM;
+ return ret;
}
- init_job_desc(desc, 0);
-
dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_in)) {
dev_err(jrdev, "unable to map key input memory\n");
- kfree(desc);
- return -ENOMEM;
+ goto out_free;
}
+
+ dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_out)) {
+ dev_err(jrdev, "unable to map key output memory\n");
+ goto out_unmap_in;
+ }
+
+ init_job_desc(desc, 0);
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, dma_addr_out)) {
- dev_err(jrdev, "unable to map key output memory\n");
- kfree(desc);
- return -ENOMEM;
- }
append_fifo_store(desc, dma_addr_out, split_key_len,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE);
+out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
-
+out_free:
kfree(desc);
-
return ret;
}
EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 9282381b03ce..fe7b3f06f6e6 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -198,8 +198,7 @@ struct adf_accel_dev {
struct dentry *debugfs_dir;
struct list_head list;
struct module *owner;
- uint8_t accel_id;
- uint8_t numa_node;
struct adf_accel_pci accel_pci_dev;
+ uint8_t accel_id;
} __packed;
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 5f3fa45348b4..9dd2cb72a4e8 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) {
- ring->inflights = kzalloc_node(sizeof(atomic_t),
- GFP_KERNEL,
- accel_dev->numa_node);
+ ring->inflights =
+ kzalloc_node(sizeof(atomic_t),
+ GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
if (!ring->inflights)
goto err;
} else {
@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data)
return -ENOMEM;
num_banks = GET_MAX_BANKS(accel_dev);
size = num_banks * sizeof(struct adf_etr_bank_data);
- etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+ etr_data->banks = kzalloc_node(size, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data->banks) {
ret = -ENOMEM;
goto err_bank;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index f2e2f158cfbe..9e9619cd4a79 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!n))
return -EINVAL;
- bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+ bufl = kmalloc_node(sz, GFP_ATOMIC,
+ dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl))
return -ENOMEM;
@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
for_each_sg(assoc, sg, assoc_n, i) {
+ if (!sg->length)
+ continue;
bufl->bufers[bufs].addr = dma_map_single(dev,
sg_virt(sg),
sg->length,
@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers;
buflout = kmalloc_node(sz, GFP_ATOMIC,
- inst->accel_dev->numa_node);
+ dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err;
bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 0d59bcb50de1..828f2a686aab 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list);
- if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+ if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
+ dev_to_node(&GET_DEV(accel_dev)) < 0)
+ && adf_dev_started(accel_dev))
break;
accel_dev = NULL;
}
if (!accel_dev) {
- pr_err("QAT: Could not find device on give node\n");
+ pr_err("QAT: Could not find device on node %d\n", node);
accel_dev = adf_devmgr_get_first();
}
if (!accel_dev || !adf_dev_started(accel_dev))
@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!inst)
goto err;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
index 978d6c56639d..53c491b59f07 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
uint64_t reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
- accel_dev->numa_node);
+ dev_to_node(&GET_DEV(accel_dev)));
if (!admin)
return -ENOMEM;
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 0d0435a41be9..948f66be262b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev);
}
-static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
-{
- unsigned int bus_per_cpu = 0;
- struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
-
- if (!c->phys_proc_id)
- return 0;
-
- bus_per_cpu = 256 / (c->phys_proc_id + 1);
-
- if (bus_per_cpu != 0)
- return pdev->bus->number / bus_per_cpu;
- return 0;
-}
-
static int qat_dev_start(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- uint8_t node;
int ret;
switch (ent->device) {
@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- node = adf_get_dev_node_id(pdev);
- accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /* If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow. */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!accel_dev)
return -ENOMEM;
- accel_dev->numa_node = node;
INIT_LIST_HEAD(&accel_dev->crypto_list);
/* Add accel device to accel table.
@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */
- hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+ hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+ dev_to_node(&pdev->dev));
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index 67ec61e51185..d96ee21b9b77 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
uint32_t msix_num_entries = hw_data->num_banks + 1;
entries = kzalloc_node(msix_num_entries * sizeof(*entries),
- GFP_KERNEL, accel_dev->numa_node);
+ GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!entries)
return -ENOMEM;
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 5d997a33907e..2a3973a7c441 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
_IOC_SIZE(cmd) > sizeof(buffer))
return -ENOTTY;
- if (_IOC_DIR(cmd) == _IOC_READ)
- memset(&buffer, 0, _IOC_SIZE(cmd));
+ memset(&buffer, 0, sizeof(buffer));
if (_IOC_DIR(cmd) & _IOC_WRITE)
if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index c57466edf45b..e5c4c6c8c967 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -495,6 +495,12 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
mutex_lock(&drm_component_lock);
+ /* Do not retry to probe if there is no any kms driver regitered. */
+ if (list_empty(&drm_component_list)) {
+ mutex_unlock(&drm_component_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
list_for_each_entry(cdev, &drm_component_list, list) {
/*
* Add components to master only in case that crtc and
@@ -585,10 +591,21 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
goto err_unregister_mixer_drv;
#endif
+ match = exynos_drm_match_add(&pdev->dev);
+ if (IS_ERR(match)) {
+ ret = PTR_ERR(match);
+ goto err_unregister_hdmi_drv;
+ }
+
+ ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
+ match);
+ if (ret < 0)
+ goto err_unregister_hdmi_drv;
+
#ifdef CONFIG_DRM_EXYNOS_G2D
ret = platform_driver_register(&g2d_driver);
if (ret < 0)
- goto err_unregister_hdmi_drv;
+ goto err_del_component_master;
#endif
#ifdef CONFIG_DRM_EXYNOS_FIMC
@@ -619,23 +636,9 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
goto err_unregister_ipp_drv;
#endif
- match = exynos_drm_match_add(&pdev->dev);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err_unregister_resources;
- }
-
- ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
- match);
- if (ret < 0)
- goto err_unregister_resources;
-
return ret;
-err_unregister_resources:
-
#ifdef CONFIG_DRM_EXYNOS_IPP
- exynos_platform_device_ipp_unregister();
err_unregister_ipp_drv:
platform_driver_unregister(&ipp_driver);
err_unregister_gsc_drv:
@@ -658,9 +661,11 @@ err_unregister_g2d_drv:
#ifdef CONFIG_DRM_EXYNOS_G2D
platform_driver_unregister(&g2d_driver);
-err_unregister_hdmi_drv:
+err_del_component_master:
#endif
+ component_master_del(&pdev->dev, &exynos_drm_ops);
+err_unregister_hdmi_drv:
#ifdef CONFIG_DRM_EXYNOS_HDMI
platform_driver_unregister(&hdmi_driver);
err_unregister_mixer_drv:
@@ -741,6 +746,18 @@ static int exynos_drm_init(void)
{
int ret;
+ /*
+ * Register device object only in case of Exynos SoC.
+ *
+ * Below codes resolves temporarily infinite loop issue incurred
+ * by Exynos drm driver when using multi-platform kernel.
+ * So these codes will be replaced with more generic way later.
+ */
+ if (!of_machine_is_compatible("samsung,exynos3") &&
+ !of_machine_is_compatible("samsung,exynos4") &&
+ !of_machine_is_compatible("samsung,exynos5"))
+ return -ENODEV;
+
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR(exynos_drm_pdev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index df7a77d3eff8..6ff8599f6cbf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -302,9 +302,12 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d)
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
kfree(g2d->cmdlist_node);
- dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
- g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+
+ if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
+ dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ }
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 055d5e7fbf12..2318b4c7a8f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -986,6 +986,15 @@ static int i915_pm_freeze(struct device *dev)
return i915_drm_freeze(drm_dev);
}
+static int i915_pm_freeze_late(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
+
+ return intel_suspend_complete(dev_priv);
+}
+
static int i915_pm_thaw_early(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1570,6 +1579,7 @@ static const struct dev_pm_ops i915_pm_ops = {
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
.thaw_early = i915_pm_thaw_early,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b672b843fd5e..728938f02341 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1902,6 +1902,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ if (!USES_PPGTT(dev_priv->dev))
+ /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
+ * so RTL will always use the value corresponding to
+ * pat_sel = 000".
+ * So let's disable cache for GGTT to avoid screen corruptions.
+ * MOCS still can be used though.
+ * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
+ * before this patch, i.e. the same uncached + snooping access
+ * like on gen6/7 seems to be in effect.
+ * - So this just fixes blitter/render access. Again it looks
+ * like it's not just uncached access, but uncached + snooping.
+ * So we can still hold onto all our assumptions wrt cpu
+ * clflushing on LLC machines.
+ */
+ pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
* write would work. */
I915_WRITE(GEN8_PRIVATE_PAT, pat);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2cefb597df6d..2b1eaa29ada4 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -364,22 +364,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
-
- obj->map_and_fenceable =
- !i915_gem_obj_ggtt_bound(obj) ||
- (i915_gem_obj_ggtt_offset(obj) +
- obj->base.size <= dev_priv->gtt.mappable_end &&
- i915_gem_object_fence_ok(obj, args->tiling_mode));
-
- /* Rebind if we need a change of alignment */
- if (!obj->map_and_fenceable) {
- u32 unfenced_align =
- i915_gem_get_gtt_alignment(dev, obj->base.size,
- args->tiling_mode,
- false);
- if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
- ret = i915_gem_object_ggtt_unbind(obj);
- }
+ if (obj->map_and_fenceable &&
+ !i915_gem_object_fence_ok(obj, args->tiling_mode))
+ ret = i915_gem_object_ggtt_unbind(obj);
if (ret == 0) {
obj->fence_dirty =
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 0e018cb49147..41b3be217493 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1098,12 +1098,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
+ int min;
WARN_ON(panel->backlight.max == 0);
+ /*
+ * XXX: If the vbt value is 255, it makes min equal to max, which leads
+ * to problems. There are such machines out there. Either our
+ * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+ * against this by letting the minimum be at most (arbitrarily chosen)
+ * 25% of the max.
+ */
+ min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+ if (min != dev_priv->vbt.backlight.min_brightness) {
+ DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
+ dev_priv->vbt.backlight.min_brightness, min);
+ }
+
/* vbt value is a coefficient in range [0..255] */
- return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
- 0, panel->backlight.max);
+ return scale(min, 0, 255, 0, panel->backlight.max);
}
static int bdw_setup_backlight(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
index a16024a74771..fde42e4d1b56 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
@@ -27,6 +27,20 @@ struct gk20a_fb_priv {
};
static int
+gk20a_fb_init(struct nouveau_object *object)
+{
+ struct gk20a_fb_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fb_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+ return 0;
+}
+
+static int
gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -48,7 +62,7 @@ gk20a_fb_oclass = &(struct nouveau_fb_impl) {
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = gk20a_fb_ctor,
.dtor = _nouveau_fb_dtor,
- .init = _nouveau_fb_init,
+ .init = gk20a_fb_init,
.fini = _nouveau_fb_fini,
},
.memtype = nvc0_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ae873d1a8d46..eb8b36714fa1 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -791,6 +791,22 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
}
static int
+nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
+{
+ struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ u32 *push;
+
+ push = evo_wait(mast, 8);
+ if (!push)
+ return -ENOMEM;
+
+ evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
+ evo_data(push, usec);
+ evo_kick(push, mast);
+ return 0;
+}
+
+static int
nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
{
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
@@ -1104,14 +1120,14 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x00800000 | mode->clock);
evo_data(push, (ilace == 2) ? 2 : 0);
- evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 8);
+ evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
evo_data(push, 0x00000000);
evo_data(push, (vactive << 16) | hactive);
evo_data(push, ( vsynce << 16) | hsynce);
evo_data(push, (vblanke << 16) | hblanke);
evo_data(push, (vblanks << 16) | hblanks);
evo_data(push, (vblan2e << 16) | vblan2s);
- evo_data(push, vblankus);
+ evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x00000311);
@@ -1141,6 +1157,11 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
nv_connector = nouveau_crtc_connector_get(nv_crtc);
nv50_crtc_set_dither(nv_crtc, false);
nv50_crtc_set_scale(nv_crtc, false);
+
+ /* G94 only accepts this after setting scale */
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
+ nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus);
+
nv50_crtc_set_color_vibrance(nv_crtc, false);
nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
return 0;
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 15da7ef344a4..ec1593a6a561 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1217,7 +1217,7 @@ free:
return ret;
}
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
@@ -1238,6 +1238,15 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
return r;
}
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+{
+ int r;
+ mutex_lock(&ctx->scratch_mutex);
+ r = atom_execute_table_scratch_unlocked(ctx, index, params);
+ mutex_unlock(&ctx->scratch_mutex);
+ return r;
+}
+
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index feba6b8d36b3..6d014ddb6b78 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -125,6 +125,7 @@ struct card_info {
struct atom_context {
struct card_info *card;
struct mutex mutex;
+ struct mutex scratch_mutex;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
@@ -145,6 +146,7 @@ extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
int atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 95d5d4ab3335..11ba9d21b89b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -100,6 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
memset(&args, 0, sizeof(args));
mutex_lock(&chan->mutex);
+ mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
@@ -113,7 +114,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
*ack = args.v1.ucReplyStatus;
@@ -147,6 +148,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
r = recv_bytes;
done:
+ mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
mutex_unlock(&chan->mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 9c570fb15b8c..4157780585a0 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -48,6 +48,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
memset(&args, 0, sizeof(args));
mutex_lock(&chan->mutex);
+ mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
@@ -82,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
@@ -95,6 +96,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
radeon_atom_copy_swap(buf, base, num, false);
done:
+ mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
mutex_unlock(&chan->mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 377afa504d2b..89c01fa6dd8e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4313,8 +4313,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
/* init the CE partitions. CE only used for gfx on CIK */
radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
- radeon_ring_write(ring, 0xc000);
- radeon_ring_write(ring, 0xc000);
+ radeon_ring_write(ring, 0x8000);
+ radeon_ring_write(ring, 0x8000);
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
@@ -9447,6 +9447,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 4e8432d07f15..d748963af08b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -667,17 +667,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
unsigned i;
+ unsigned index;
int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
+ u64 gpu_addr;
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ index = R600_WB_DMA_RING_TEST_OFFSET;
+ else
+ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
+
+ gpu_addr = rdev->wb.gpu_addr + index;
tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
+ rdev->wb.wb[index/4] = cpu_to_le32(tmp);
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
@@ -686,8 +689,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr);
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr);
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
@@ -704,7 +707,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
+ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f37d39d2bbbc..85995b4e3338 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
@@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 10f8be0ee173..b53b31a7b76f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled) {
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index aabc343b9a8f..cf0df45d455e 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -338,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
unsigned i;
+ unsigned index;
int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp = 0;
+ u64 gpu_addr;
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ index = R600_WB_DMA_RING_TEST_OFFSET;
+ else
+ index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
+ gpu_addr = rdev->wb.gpu_addr + index;
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
@@ -357,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[1] = lower_32_bits(gpu_addr);
+ ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
ib.ptr[3] = 0xDEADBEEF;
ib.length_dw = 4;
@@ -374,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
+ tmp = le32_to_cpu(rdev->wb.wb[index/4]);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ea2676954dde..995a8b1770dd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -952,6 +952,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
}
mutex_init(&rdev->mode_info.atom_context->mutex);
+ mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f6db4629aaa..9acb1c3c005b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
/* FIXME: implement full support */
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3462b64369bf..0a2d36e81108 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8a477bf1fdb3..c55d653aaf5f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eeea5b6a1775..7d5083dc4acb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
+ if (!rdev->mode_info.mode_config_initialized)
+ return;
+
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 6553fd238685..054a79f143ae 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -736,7 +736,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
static void tegra_crtc_disable(struct drm_crtc *crtc)
{
- struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
@@ -752,7 +751,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
}
}
- drm_vblank_off(drm, dc->pipe);
+ drm_crtc_vblank_off(crtc);
}
static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -841,8 +840,6 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
u32 value;
int err;
- drm_vblank_pre_modeset(crtc->dev, dc->pipe);
-
err = tegra_crtc_setup_clk(crtc, mode);
if (err) {
dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
@@ -896,6 +893,8 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
unsigned int syncpt;
unsigned long value;
+ drm_crtc_vblank_off(crtc);
+
/* hardware initialization */
reset_control_deassert(dc->rst);
usleep_range(10000, 20000);
@@ -943,7 +942,7 @@ static void tegra_crtc_commit(struct drm_crtc *crtc)
value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
- drm_vblank_post_modeset(crtc->dev, dc->pipe);
+ drm_crtc_vblank_on(crtc);
}
static void tegra_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 73bd9e2e42bc..3402033fa52a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1659,6 +1659,7 @@ void hid_disconnect(struct hid_device *hdev)
hdev->hiddev_disconnect(hdev);
if (hdev->claimed & HID_CLAIMED_HIDRAW)
hidraw_disconnect(hdev);
+ hdev->claimed = 0;
}
EXPORT_SYMBOL_GPL(hid_disconnect);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e23ab8b30626..7c863738e419 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -299,6 +299,7 @@
#define USB_VENDOR_ID_ELAN 0x04f3
#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
+#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
#define USB_VENDOR_ID_ELECOM 0x056e
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 5014bb567b29..552671ee7c5d 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,6 +72,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index fcdbde4ec692..3057dfc7e3bc 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -234,7 +234,7 @@ static const struct pci_device_id fam15h_power_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{}
};
MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index d2bf2c97ae70..6a30eeea94be 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -181,7 +181,7 @@ static int __init populate_attr_groups(struct platform_device *pdev)
opal = of_find_node_by_path("/ibm,opal/sensors");
if (!opal) {
- dev_err(&pdev->dev, "Opal node 'sensors' not found\n");
+ dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
return -ENODEV;
}
@@ -335,7 +335,9 @@ static int __init ibmpowernv_init(void)
err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
if (err) {
- pr_err("Platfrom driver probe failed\n");
+ if (err != -ENODEV)
+ pr_err("Platform driver probe failed (%d)\n", err);
+
goto exit_device_del;
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 823c877a1ec0..1991d9032c38 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -161,10 +161,17 @@ static int pwm_fan_suspend(struct device *dev)
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+ unsigned long duty;
+ int ret;
- if (ctx->pwm_value)
- return pwm_enable(ctx->pwm);
- return 0;
+ if (ctx->pwm_value == 0)
+ return 0;
+
+ duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
+ ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+ if (ret)
+ return ret;
+ return pwm_enable(ctx->pwm);
}
#endif
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index fb3b63b2f85c..8400a1a34d87 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -85,6 +85,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, pwr);
+ device_init_wakeup(&pdev->dev, true);
return 0;
}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 2b0ae8cc8e51..d125a019383f 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1156,7 +1156,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
+ /*
+ * Check if we are dealing with a bare PS/2 packet, presumably from
+ * a device connected to the external PS/2 port. Because bare PS/2
+ * protocol does not have enough constant bits to self-synchronize
+ * properly we only do this if the device is fully synchronized.
+ */
+ if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
if (psmouse->pktcnt == 3) {
alps_report_bare_ps2_packet(psmouse, psmouse->packet,
true);
@@ -1180,12 +1186,27 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
}
/* Bytes 2 - pktsize should have 0 in the highest bit */
- if ((priv->proto_version < ALPS_PROTO_V5) &&
+ if (priv->proto_version < ALPS_PROTO_V5 &&
psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
psmouse->pktcnt - 1,
psmouse->packet[psmouse->pktcnt - 1]);
+
+ if (priv->proto_version == ALPS_PROTO_V3 &&
+ psmouse->pktcnt == psmouse->pktsize) {
+ /*
+ * Some Dell boxes, such as Latitude E6440 or E7440
+ * with closed lid, quite often smash last byte of
+ * otherwise valid packet with 0xff. Given that the
+ * next packet is very likely to be valid let's
+ * report PSMOUSE_FULL_PACKET but not process data,
+ * rather than reporting PSMOUSE_BAD_DATA and
+ * filling the logs.
+ */
+ return PSMOUSE_FULL_PACKET;
+ }
+
return PSMOUSE_BAD_DATA;
}
@@ -2389,6 +2410,9 @@ int alps_init(struct psmouse *psmouse)
/* We are having trouble resyncing ALPS touchpads so disable it for now */
psmouse->resync_time = 0;
+ /* Allow 2 invalid packets without resetting device */
+ psmouse->resetafter = psmouse->pktsize * 2;
+
return 0;
init_fail:
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 06fc6e76ffbe..3fcb6b3cb0bd 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -563,6 +563,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
} else {
input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
+ input_report_key(dev, BTN_MIDDLE, packet[0] & 0x04);
}
input_mt_report_pointer_emulation(dev, true);
@@ -792,6 +793,9 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
unsigned char packet_type = packet[3] & 0x03;
bool sanity_check;
+ if ((packet[3] & 0x0f) == 0x06)
+ return PACKET_TRACKPOINT;
+
/*
* Sanity check based on the constant bits of a packet.
* The constant bits change depending on the value of
@@ -877,10 +881,19 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
case 4:
packet_type = elantech_packet_check_v4(psmouse);
- if (packet_type == PACKET_UNKNOWN)
+ switch (packet_type) {
+ case PACKET_UNKNOWN:
return PSMOUSE_BAD_DATA;
- elantech_report_absolute_v4(psmouse, packet_type);
+ case PACKET_TRACKPOINT:
+ elantech_report_trackpoint(psmouse, packet_type);
+ break;
+
+ default:
+ elantech_report_absolute_v4(psmouse, packet_type);
+ break;
+ }
+
break;
}
@@ -1120,6 +1133,22 @@ static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
}
/*
+ * Some hw_version 4 models do have a middle button
+ */
+static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Fujitsu H730 has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
+ },
+ },
+#endif
+ { }
+};
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -1138,6 +1167,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__clear_bit(EV_REL, dev->evbit);
__set_bit(BTN_LEFT, dev->keybit);
+ if (dmi_check_system(elantech_dmi_has_middle_button))
+ __set_bit(BTN_MIDDLE, dev->keybit);
__set_bit(BTN_RIGHT, dev->keybit);
__set_bit(BTN_TOUCH, dev->keybit);
@@ -1299,6 +1330,7 @@ ELANTECH_INT_ATTR(reg_25, 0x25);
ELANTECH_INT_ATTR(reg_26, 0x26);
ELANTECH_INT_ATTR(debug, 0);
ELANTECH_INT_ATTR(paritycheck, 0);
+ELANTECH_INT_ATTR(crc_enabled, 0);
static struct attribute *elantech_attrs[] = {
&psmouse_attr_reg_07.dattr.attr,
@@ -1313,6 +1345,7 @@ static struct attribute *elantech_attrs[] = {
&psmouse_attr_reg_26.dattr.attr,
&psmouse_attr_debug.dattr.attr,
&psmouse_attr_paritycheck.dattr.attr,
+ &psmouse_attr_crc_enabled.dattr.attr,
NULL
};
@@ -1439,6 +1472,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
}
/*
+ * Some hw_version 4 models do not work with crc_disabled
+ */
+static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Fujitsu H730 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
+ },
+ },
+#endif
+ { }
+};
+
+/*
* Some hw_version 3 models go into error state when we try to set
* bit 3 and/or bit 1 of r10.
*/
@@ -1513,7 +1562,8 @@ static int elantech_set_properties(struct elantech_data *etd)
* The signatures of v3 and v4 packets change depending on the
* value of this hardware flag.
*/
- etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
+ etd->crc_enabled = (etd->fw_version & 0x4000) == 0x4000 ||
+ dmi_check_system(elantech_dmi_force_crc_enabled);
/* Enable real hardware resolution on hw_version 3 ? */
etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 9031a0a28ea4..2a7a9174c702 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,8 +135,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN2002",
- "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN0039",
+ "LEN2002", "LEN2004", NULL},
1024, 5112, 2024, 4832
},
{
@@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0036", /* T440 */
"LEN0037",
"LEN0038",
+ "LEN0039", /* T440s */
"LEN0041",
"LEN0042", /* Yoga */
"LEN0045",
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd5112265cc9..22f15f9951e7 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -144,13 +144,26 @@ config OMAP_IOMMU
select IOMMU_API
config OMAP_IOMMU_DEBUG
- tristate "Export OMAP IOMMU internals in DebugFS"
- depends on OMAP_IOMMU && DEBUG_FS
- help
- Select this to see extensive information about
- the internal state of OMAP IOMMU in debugfs.
+ bool "Export OMAP IOMMU internals in DebugFS"
+ depends on OMAP_IOMMU && DEBUG_FS
+ ---help---
+ Select this to see extensive information about
+ the internal state of OMAP IOMMU in debugfs.
+
+ Say N unless you know you need this.
- Say N unless you know you need this.
+config ROCKCHIP_IOMMU
+ bool "Rockchip IOMMU Support"
+ depends on ARM
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMUs found on Rockchip rk32xx SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+ Say Y here if you are using a Rockchip SoC that includes an IOMMU
+ device.
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 16edef74b8ee..7b976f294a69 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
-obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
+obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 505a9adac2d5..b205f76d7129 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return (irq_remapping_enabled == 1);
+ case IOMMU_CAP_NOEXEC:
+ return false;
}
return false;
@@ -3424,6 +3426,7 @@ static const struct iommu_ops amd_iommu_ops = {
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = amd_iommu_iova_to_phys,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 60558f794922..b8aac1389a96 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -404,9 +404,16 @@ struct arm_smmu_cfg {
#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
+enum arm_smmu_domain_stage {
+ ARM_SMMU_DOMAIN_S1 = 0,
+ ARM_SMMU_DOMAIN_S2,
+ ARM_SMMU_DOMAIN_NESTED,
+};
+
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct arm_smmu_cfg cfg;
+ enum arm_smmu_domain_stage stage;
spinlock_t lock;
};
@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu)
goto out_unlock;
- if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+ /*
+ * Mapping the requested stage onto what we support is surprisingly
+ * complicated, mainly because the spec allows S1+S2 SMMUs without
+ * support for nested translation. That means we end up with the
+ * following table:
+ *
+ * Requested Supported Actual
+ * S1 N S1
+ * S1 S1+S2 S1
+ * S1 S2 S2
+ * S1 S1 S1
+ * N N N
+ * N S1+S2 S2
+ * N S2 S2
+ * N S1 S1
+ *
+ * Note that you can't actually request stage-2 mappings.
+ */
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ switch (smmu_domain->stage) {
+ case ARM_SMMU_DOMAIN_S1:
+ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+ start = smmu->num_s2_context_banks;
+ break;
+ case ARM_SMMU_DOMAIN_NESTED:
/*
* We will likely want to change this if/when KVM gets
* involved.
*/
- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
- start = smmu->num_s2_context_banks;
- } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
- start = smmu->num_s2_context_banks;
- } else {
+ case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
}
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int prot, int stage)
{
pte_t *pte, *start;
- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
+ pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
pteval |= ARM_SMMU_PTE_MEMATTR_NC;
}
+ if (prot & IOMMU_NOEXEC)
+ pteval |= ARM_SMMU_PTE_XN;
+
/* If no access, create a faulting entry to avoid TLB fills */
- if (prot & IOMMU_EXEC)
- pteval &= ~ARM_SMMU_PTE_XN;
- else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
return true;
case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */
+ case IOMMU_CAP_NOEXEC:
+ return true;
default:
return false;
}
@@ -1644,20 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
+static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = domain->priv;
+
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu)
+ return -EPERM;
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
static const struct iommu_ops arm_smmu_ops = {
- .capable = arm_smmu_capable,
- .domain_init = arm_smmu_domain_init,
- .domain_destroy = arm_smmu_domain_destroy,
- .attach_dev = arm_smmu_attach_dev,
- .detach_dev = arm_smmu_detach_dev,
- .map = arm_smmu_map,
- .unmap = arm_smmu_unmap,
- .iova_to_phys = arm_smmu_iova_to_phys,
- .add_device = arm_smmu_add_device,
- .remove_device = arm_smmu_remove_device,
- .pgsize_bitmap = (SECTION_SIZE |
- ARM_SMMU_PTE_CONT_SIZE |
- PAGE_SIZE),
+ .capable = arm_smmu_capable,
+ .domain_init = arm_smmu_domain_init,
+ .domain_destroy = arm_smmu_domain_destroy,
+ .attach_dev = arm_smmu_attach_dev,
+ .detach_dev = arm_smmu_detach_dev,
+ .map = arm_smmu_map,
+ .unmap = arm_smmu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .add_device = arm_smmu_add_device,
+ .remove_device = arm_smmu_remove_device,
+ .domain_get_attr = arm_smmu_domain_get_attr,
+ .domain_set_attr = arm_smmu_domain_set_attr,
+ .pgsize_bitmap = (SECTION_SIZE |
+ ARM_SMMU_PTE_CONT_SIZE |
+ PAGE_SIZE),
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -2072,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
+ struct device_node *np;
int ret;
+ /*
+ * Play nice with systems that don't have an ARM SMMU by checking that
+ * an ARM SMMU exists in the system before proceeding with the driver
+ * and IOMMU bus operation registration.
+ */
+ np = of_find_matching_node(NULL, arm_smmu_of_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
ret = platform_driver_register(&arm_smmu_driver);
if (ret)
return ret;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index c5c61cabd6e3..9847613085e1 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -44,6 +44,14 @@
#include "irq_remapping.h"
+typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
+struct dmar_res_callback {
+ dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
+ void *arg[ACPI_DMAR_TYPE_RESERVED];
+ bool ignore_unhandled;
+ bool print_entry;
+};
+
/*
* Assumptions:
* 1) The hotplug framework guarentees that DMAR unit will be hot-added
@@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units);
struct acpi_table_header * __initdata dmar_tbl;
static acpi_size dmar_tbl_size;
static int dmar_dev_scope_status = 1;
+static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
-static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
+static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
* add INCLUDE_ALL at the tail, so scan the list will find it at
@@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = {
.priority = INT_MIN,
};
+static struct dmar_drhd_unit *
+dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
+ if (dmaru->segment == drhd->segment &&
+ dmaru->reg_base_addr == drhd->address)
+ return dmaru;
+
+ return NULL;
+}
+
/**
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
* structure which uniquely represent one DMA remapping hardware unit
* present in the platform
*/
-static int __init
-dmar_parse_one_drhd(struct acpi_dmar_header *header)
+static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_hardware_unit *drhd;
struct dmar_drhd_unit *dmaru;
int ret = 0;
drhd = (struct acpi_dmar_hardware_unit *)header;
- dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
+ dmaru = dmar_find_dmaru(drhd);
+ if (dmaru)
+ goto out;
+
+ dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
if (!dmaru)
return -ENOMEM;
- dmaru->hdr = header;
+ /*
+ * If header is allocated from slab by ACPI _DSM method, we need to
+ * copy the content because the memory buffer will be freed on return.
+ */
+ dmaru->hdr = (void *)(dmaru + 1);
+ memcpy(dmaru->hdr, header, header->length);
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
@@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
return ret;
}
dmar_register_drhd_unit(dmaru);
+
+out:
+ if (arg)
+ (*(int *)arg)++;
+
return 0;
}
@@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
kfree(dmaru);
}
-static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
+static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
+ void *arg)
{
struct acpi_dmar_andd *andd = (void *)header;
@@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
}
#ifdef CONFIG_ACPI_NUMA
-static int __init
-dmar_parse_one_rhsa(struct acpi_dmar_header *header)
+static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_rhsa *rhsa;
struct dmar_drhd_unit *drhd;
@@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
return 0;
}
+#else
+#define dmar_parse_one_rhsa dmar_res_noop
#endif
static void __init
@@ -503,6 +540,52 @@ static int __init dmar_table_detect(void)
return (ACPI_SUCCESS(status) ? 1 : 0);
}
+static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
+ size_t len, struct dmar_res_callback *cb)
+{
+ int ret = 0;
+ struct acpi_dmar_header *iter, *next;
+ struct acpi_dmar_header *end = ((void *)start) + len;
+
+ for (iter = start; iter < end && ret == 0; iter = next) {
+ next = (void *)iter + iter->length;
+ if (iter->length == 0) {
+ /* Avoid looping forever on bad ACPI tables */
+ pr_debug(FW_BUG "Invalid 0-length structure\n");
+ break;
+ } else if (next > end) {
+ /* Avoid passing table end */
+ pr_warn(FW_BUG "record passes table end\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (cb->print_entry)
+ dmar_table_print_dmar_entry(iter);
+
+ if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
+ /* continue for forward compatibility */
+ pr_debug("Unknown DMAR structure type %d\n",
+ iter->type);
+ } else if (cb->cb[iter->type]) {
+ ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
+ } else if (!cb->ignore_unhandled) {
+ pr_warn("No handler for DMAR structure type %d\n",
+ iter->type);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
+ struct dmar_res_callback *cb)
+{
+ return dmar_walk_remapping_entries((void *)(dmar + 1),
+ dmar->header.length - sizeof(*dmar), cb);
+}
+
/**
* parse_dmar_table - parses the DMA reporting table
*/
@@ -510,9 +593,18 @@ static int __init
parse_dmar_table(void)
{
struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
int ret = 0;
int drhd_count = 0;
+ struct dmar_res_callback cb = {
+ .print_entry = true,
+ .ignore_unhandled = true,
+ .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
+ .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
+ .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
+ .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
+ .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
+ };
/*
* Do it again, earlier dmar_tbl mapping could be mapped with
@@ -536,51 +628,10 @@ parse_dmar_table(void)
}
pr_info("Host address width %d\n", dmar->width + 1);
-
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- ret = -EINVAL;
- break;
- }
-
- dmar_table_print_dmar_entry(entry_header);
-
- switch (entry_header->type) {
- case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd_count++;
- ret = dmar_parse_one_drhd(entry_header);
- break;
- case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- ret = dmar_parse_one_rmrr(entry_header);
- break;
- case ACPI_DMAR_TYPE_ROOT_ATS:
- ret = dmar_parse_one_atsr(entry_header);
- break;
- case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
-#ifdef CONFIG_ACPI_NUMA
- ret = dmar_parse_one_rhsa(entry_header);
-#endif
- break;
- case ACPI_DMAR_TYPE_NAMESPACE:
- ret = dmar_parse_one_andd(entry_header);
- break;
- default:
- pr_warn("Unknown DMAR structure type %d\n",
- entry_header->type);
- ret = 0; /* for forward compatibility */
- break;
- }
- if (ret)
- break;
-
- entry_header = ((void *)entry_header + entry_header->length);
- }
- if (drhd_count == 0)
+ ret = dmar_walk_dmar_table(dmar, &cb);
+ if (ret == 0 && drhd_count == 0)
pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
+
return ret;
}
@@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message)
dmi_get_system_info(DMI_PRODUCT_VERSION));
}
-static int __init check_zero_address(void)
+static int __ref
+dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
{
- struct acpi_table_dmar *dmar;
- struct acpi_dmar_header *entry_header;
struct acpi_dmar_hardware_unit *drhd;
+ void __iomem *addr;
+ u64 cap, ecap;
- dmar = (struct acpi_table_dmar *)dmar_tbl;
- entry_header = (struct acpi_dmar_header *)(dmar + 1);
-
- while (((unsigned long)entry_header) <
- (((unsigned long)dmar) + dmar_tbl->length)) {
- /* Avoid looping forever on bad ACPI tables */
- if (entry_header->length == 0) {
- pr_warn("Invalid 0-length structure\n");
- return 0;
- }
+ drhd = (void *)entry;
+ if (!drhd->address) {
+ warn_invalid_dmar(0, "");
+ return -EINVAL;
+ }
- if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
- void __iomem *addr;
- u64 cap, ecap;
+ if (arg)
+ addr = ioremap(drhd->address, VTD_PAGE_SIZE);
+ else
+ addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
+ if (!addr) {
+ pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
+ return -EINVAL;
+ }
- drhd = (void *)entry_header;
- if (!drhd->address) {
- warn_invalid_dmar(0, "");
- goto failed;
- }
+ cap = dmar_readq(addr + DMAR_CAP_REG);
+ ecap = dmar_readq(addr + DMAR_ECAP_REG);
- addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
- if (!addr ) {
- printk("IOMMU: can't validate: %llx\n", drhd->address);
- goto failed;
- }
- cap = dmar_readq(addr + DMAR_CAP_REG);
- ecap = dmar_readq(addr + DMAR_ECAP_REG);
- early_iounmap(addr, VTD_PAGE_SIZE);
- if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->address,
- " returns all ones");
- goto failed;
- }
- }
+ if (arg)
+ iounmap(addr);
+ else
+ early_iounmap(addr, VTD_PAGE_SIZE);
- entry_header = ((void *)entry_header + entry_header->length);
+ if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
+ warn_invalid_dmar(drhd->address, " returns all ones");
+ return -EINVAL;
}
- return 1;
-failed:
return 0;
}
int __init detect_intel_iommu(void)
{
int ret;
+ struct dmar_res_callback validate_drhd_cb = {
+ .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
+ .ignore_unhandled = true,
+ };
down_write(&dmar_global_lock);
ret = dmar_table_detect();
if (ret)
- ret = check_zero_address();
- {
- if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
- iommu_detected = 1;
- /* Make sure ACS will be enabled */
- pci_request_acs();
- }
+ ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
+ &validate_drhd_cb);
+ if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ iommu_detected = 1;
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+ }
#ifdef CONFIG_X86
- if (ret)
- x86_init.iommu.iommu_init = intel_iommu_init;
+ if (ret)
+ x86_init.iommu.iommu_init = intel_iommu_init;
#endif
- }
+
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
dmar_tbl = NULL;
up_write(&dmar_global_lock);
@@ -931,11 +974,32 @@ out:
return err;
}
+static int dmar_alloc_seq_id(struct intel_iommu *iommu)
+{
+ iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
+ DMAR_UNITS_SUPPORTED);
+ if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
+ iommu->seq_id = -1;
+ } else {
+ set_bit(iommu->seq_id, dmar_seq_ids);
+ sprintf(iommu->name, "dmar%d", iommu->seq_id);
+ }
+
+ return iommu->seq_id;
+}
+
+static void dmar_free_seq_id(struct intel_iommu *iommu)
+{
+ if (iommu->seq_id >= 0) {
+ clear_bit(iommu->seq_id, dmar_seq_ids);
+ iommu->seq_id = -1;
+ }
+}
+
static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
u32 ver, sts;
- static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
int err;
@@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (!iommu)
return -ENOMEM;
- iommu->seq_id = iommu_allocated++;
- sprintf (iommu->name, "dmar%d", iommu->seq_id);
+ if (dmar_alloc_seq_id(iommu) < 0) {
+ pr_err("IOMMU: failed to allocate seq_id\n");
+ err = -ENOSPC;
+ goto error;
+ }
err = map_iommu(iommu, drhd->reg_base_addr);
if (err) {
pr_err("IOMMU: failed to map %s\n", iommu->name);
- goto error;
+ goto error_free_seq_id;
}
err = -EINVAL;
@@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
- err_unmap:
+err_unmap:
unmap_iommu(iommu);
- error:
+error_free_seq_id:
+ dmar_free_seq_id(iommu);
+error:
kfree(iommu);
return err;
}
@@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu)
if (iommu->reg)
unmap_iommu(iommu);
+ dmar_free_seq_id(iommu);
kfree(iommu);
}
@@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void)
return dmar->flags & 0x1;
}
+/* Check whether DMAR units are in use */
+static inline bool dmar_in_use(void)
+{
+ return irq_remapping_enabled || intel_iommu_enabled;
+}
+
static int __init dmar_free_unused_resources(void)
{
struct dmar_drhd_unit *dmaru, *dmaru_n;
- /* DMAR units are in use */
- if (irq_remapping_enabled || intel_iommu_enabled)
+ if (dmar_in_use())
return 0;
if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
@@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void)
late_initcall(dmar_free_unused_resources);
IOMMU_INIT_POST(detect_intel_iommu);
+
+/*
+ * DMAR Hotplug Support
+ * For more details, please refer to Intel(R) Virtualization Technology
+ * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
+ * "Remapping Hardware Unit Hot Plug".
+ */
+static u8 dmar_hp_uuid[] = {
+ /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
+ /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
+};
+
+/*
+ * Currently there's only one revision and BIOS will not check the revision id,
+ * so use 0 for safety.
+ */
+#define DMAR_DSM_REV_ID 0
+#define DMAR_DSM_FUNC_DRHD 1
+#define DMAR_DSM_FUNC_ATSR 2
+#define DMAR_DSM_FUNC_RHSA 3
+
+static inline bool dmar_detect_dsm(acpi_handle handle, int func)
+{
+ return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
+}
+
+static int dmar_walk_dsm_resource(acpi_handle handle, int func,
+ dmar_res_handler_t handler, void *arg)
+{
+ int ret = -ENODEV;
+ union acpi_object *obj;
+ struct acpi_dmar_header *start;
+ struct dmar_res_callback callback;
+ static int res_type[] = {
+ [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
+ [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
+ [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
+ };
+
+ if (!dmar_detect_dsm(handle, func))
+ return 0;
+
+ obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
+ func, NULL, ACPI_TYPE_BUFFER);
+ if (!obj)
+ return -ENODEV;
+
+ memset(&callback, 0, sizeof(callback));
+ callback.cb[res_type[func]] = handler;
+ callback.arg[res_type[func]] = arg;
+ start = (struct acpi_dmar_header *)obj->buffer.pointer;
+ ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
+
+ ACPI_FREE(obj);
+
+ return ret;
+}
+
+static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int ret;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return -ENODEV;
+
+ ret = dmar_ir_hotplug(dmaru, true);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, true);
+
+ return ret;
+}
+
+static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ int i, ret;
+ struct device *dev;
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (!dmaru)
+ return 0;
+
+ /*
+ * All PCI devices managed by this unit should have been destroyed.
+ */
+ if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
+ for_each_active_dev_scope(dmaru->devices,
+ dmaru->devices_cnt, i, dev)
+ return -EBUSY;
+
+ ret = dmar_ir_hotplug(dmaru, false);
+ if (ret == 0)
+ ret = dmar_iommu_hotplug(dmaru, false);
+
+ return ret;
+}
+
+static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
+{
+ struct dmar_drhd_unit *dmaru;
+
+ dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
+ if (dmaru) {
+ list_del_rcu(&dmaru->list);
+ synchronize_rcu();
+ dmar_free_drhd(dmaru);
+ }
+
+ return 0;
+}
+
+static int dmar_hotplug_insert(acpi_handle handle)
+{
+ int ret;
+ int drhd_count = 0;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_validate_one_drhd, (void *)1);
+ if (ret)
+ goto out;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_parse_one_drhd, (void *)&drhd_count);
+ if (ret == 0 && drhd_count == 0) {
+ pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
+ goto out;
+ } else if (ret) {
+ goto release_drhd;
+ }
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
+ &dmar_parse_one_rhsa, NULL);
+ if (ret)
+ goto release_drhd;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_parse_one_atsr, NULL);
+ if (ret)
+ goto release_atsr;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ if (!ret)
+ return 0;
+
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+release_atsr:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL);
+release_drhd:
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL);
+out:
+ return ret;
+}
+
+static int dmar_hotplug_remove(acpi_handle handle)
+{
+ int ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_check_one_atsr, NULL);
+ if (ret)
+ return ret;
+
+ ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_remove_drhd, NULL);
+ if (ret == 0) {
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
+ &dmar_release_one_atsr, NULL));
+ WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_release_drhd, NULL));
+ } else {
+ dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
+ &dmar_hp_add_drhd, NULL);
+ }
+
+ return ret;
+}
+
+static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
+ void *context, void **retval)
+{
+ acpi_handle *phdl = retval;
+
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ *phdl = handle;
+ return AE_CTRL_TERMINATE;
+ }
+
+ return AE_OK;
+}
+
+static int dmar_device_hotplug(acpi_handle handle, bool insert)
+{
+ int ret;
+ acpi_handle tmp = NULL;
+ acpi_status status;
+
+ if (!dmar_in_use())
+ return 0;
+
+ if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
+ tmp = handle;
+ } else {
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
+ ACPI_UINT32_MAX,
+ dmar_get_dsm_handle,
+ NULL, NULL, &tmp);
+ if (ACPI_FAILURE(status)) {
+ pr_warn("Failed to locate _DSM method.\n");
+ return -ENXIO;
+ }
+ }
+ if (tmp == NULL)
+ return 0;
+
+ down_write(&dmar_global_lock);
+ if (insert)
+ ret = dmar_hotplug_insert(tmp);
+ else
+ ret = dmar_hotplug_remove(tmp);
+ up_write(&dmar_global_lock);
+
+ return ret;
+}
+
+int dmar_device_add(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, true);
+}
+
+int dmar_device_remove(acpi_handle handle)
+{
+ return dmar_device_hotplug(handle, false);
+}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 74233186f6f7..28372b85d8da 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1178,6 +1178,7 @@ static const struct iommu_ops exynos_iommu_ops = {
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = exynos_iommu_iova_to_phys,
.add_device = exynos_iommu_add_device,
.remove_device = exynos_iommu_remove_device,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a27d6cb1a793..1232336b960e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root)
}
static inline void set_root_value(struct root_entry *root, unsigned long value)
{
+ root->val &= ~VTD_PAGE_MASK;
root->val |= value & VTD_PAGE_MASK;
}
@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context,
static inline void context_set_address_root(struct context_entry *context,
unsigned long value)
{
+ context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK;
}
@@ -328,17 +330,10 @@ static int hw_pass_through = 1;
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
-/* define the limit of IOMMUs supported in each domain */
-#ifdef CONFIG_X86
-# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
-#else
-# define IOMMU_UNITS_SUPPORTED 64
-#endif
-
struct dmar_domain {
int id; /* domain id */
int nid; /* node id */
- DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED);
+ DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
unsigned long flags;
root = (struct root_entry *)alloc_pgtable_page(iommu->node);
- if (!root)
+ if (!root) {
+ pr_err("IOMMU: allocating root entry for %s failed\n",
+ iommu->name);
return -ENOMEM;
+ }
__iommu_flush_cache(iommu, root, ROOT_SIZE);
@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
return 0;
}
-static void free_dmar_iommu(struct intel_iommu *iommu)
+static void disable_dmar_iommu(struct intel_iommu *iommu)
{
struct dmar_domain *domain;
int i;
@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
+}
- kfree(iommu->domains);
- kfree(iommu->domain_ids);
- iommu->domains = NULL;
- iommu->domain_ids = NULL;
+static void free_dmar_iommu(struct intel_iommu *iommu)
+{
+ if ((iommu->domains) && (iommu->domain_ids)) {
+ kfree(iommu->domains);
+ kfree(iommu->domain_ids);
+ iommu->domains = NULL;
+ iommu->domain_ids = NULL;
+ }
g_iommus[iommu->seq_id] = NULL;
@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
{
struct dma_pte *first_pte = NULL, *pte = NULL;
phys_addr_t uninitialized_var(pteval);
- unsigned long sg_res;
+ unsigned long sg_res = 0;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
- if (sg)
- sg_res = 0;
- else {
- sg_res = nr_pages + 1;
+ if (!sg) {
+ sg_res = nr_pages;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
+static void intel_iommu_init_qi(struct intel_iommu *iommu)
+{
+ /*
+ * Start from the sane iommu hardware state.
+ * If the queued invalidation is already initialized by us
+ * (for example, while enabling interrupt-remapping) then
+ * we got the things already rolling from a sane state.
+ */
+ if (!iommu->qi) {
+ /*
+ * Clear any previous faults.
+ */
+ dmar_fault(-1, iommu);
+ /*
+ * Disable queued invalidation if supported and already enabled
+ * before OS handover.
+ */
+ dmar_disable_qi(iommu);
+ }
+
+ if (dmar_enable_qi(iommu)) {
+ /*
+ * Queued Invalidate not enabled, use Register Based Invalidate
+ */
+ iommu->flush.flush_context = __iommu_flush_context;
+ iommu->flush.flush_iotlb = __iommu_flush_iotlb;
+ pr_info("IOMMU: %s using Register based invalidation\n",
+ iommu->name);
+ } else {
+ iommu->flush.flush_context = qi_flush_context;
+ iommu->flush.flush_iotlb = qi_flush_iotlb;
+ pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
+ }
+}
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -2728,14 +2764,18 @@ static int __init init_dmars(void)
* threaded kernel __init code path all other access are read
* only
*/
- if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) {
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
g_num_of_iommus++;
continue;
}
printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
- IOMMU_UNITS_SUPPORTED);
+ DMAR_UNITS_SUPPORTED);
}
+ /* Preallocate enough resources for IOMMU hot-addition */
+ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
+ g_num_of_iommus = DMAR_UNITS_SUPPORTED;
+
g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
GFP_KERNEL);
if (!g_iommus) {
@@ -2764,58 +2804,14 @@ static int __init init_dmars(void)
* among all IOMMU's. Need to Split it later.
*/
ret = iommu_alloc_root_entry(iommu);
- if (ret) {
- printk(KERN_ERR "IOMMU: allocate root entry failed\n");
+ if (ret)
goto free_iommu;
- }
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
}
- /*
- * Start from the sane iommu hardware state.
- */
- for_each_active_iommu(iommu, drhd) {
- /*
- * If the queued invalidation is already initialized by us
- * (for example, while enabling interrupt-remapping) then
- * we got the things already rolling from a sane state.
- */
- if (iommu->qi)
- continue;
-
- /*
- * Clear any previous faults.
- */
- dmar_fault(-1, iommu);
- /*
- * Disable queued invalidation if supported and already enabled
- * before OS handover.
- */
- dmar_disable_qi(iommu);
- }
-
- for_each_active_iommu(iommu, drhd) {
- if (dmar_enable_qi(iommu)) {
- /*
- * Queued Invalidate not enabled, use Register Based
- * Invalidate
- */
- iommu->flush.flush_context = __iommu_flush_context;
- iommu->flush.flush_iotlb = __iommu_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- } else {
- iommu->flush.flush_context = qi_flush_context;
- iommu->flush.flush_iotlb = qi_flush_iotlb;
- printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
- "invalidation\n",
- iommu->seq_id,
- (unsigned long long)drhd->reg_base_addr);
- }
- }
+ for_each_active_iommu(iommu, drhd)
+ intel_iommu_init_qi(iommu);
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
@@ -2901,8 +2897,10 @@ static int __init init_dmars(void)
return 0;
free_iommu:
- for_each_active_iommu(iommu, drhd)
+ for_each_active_iommu(iommu, drhd) {
+ disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
+ }
kfree(deferred_flush);
free_g_iommus:
kfree(g_iommus);
@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
-int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
return 0;
}
-int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
+{
+ struct dmar_atsr_unit *atsru;
+ struct acpi_dmar_atsr *tmp;
+
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+ tmp = (struct acpi_dmar_atsr *)atsru->hdr;
+ if (atsr->segment != tmp->segment)
+ continue;
+ if (atsr->header.length != tmp->header.length)
+ continue;
+ if (memcmp(atsr, tmp, atsr->header.length) == 0)
+ return atsru;
+ }
+
+ return NULL;
+}
+
+int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
+ if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
+ return 0;
+
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
- atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru)
+ return 0;
+
+ atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
if (!atsru)
return -ENOMEM;
- atsru->hdr = hdr;
+ /*
+ * If memory is allocated from slab by ACPI _DSM method, we need to
+ * copy the memory content because the memory buffer will be freed
+ * on return.
+ */
+ atsru->hdr = (void *)(atsru + 1);
+ memcpy(atsru->hdr, hdr, hdr->length);
atsru->include_all = atsr->flags & 0x1;
if (!atsru->include_all) {
atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
kfree(atsru);
}
+int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (atsru) {
+ list_del_rcu(&atsru->list);
+ synchronize_rcu();
+ intel_iommu_free_atsr(atsru);
+ }
+
+ return 0;
+}
+
+int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
+{
+ int i;
+ struct device *dev;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = dmar_find_atsr(atsr);
+ if (!atsru)
+ return 0;
+
+ if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
+ for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
+ i, dev)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
+{
+ int sp, ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (g_iommus[iommu->seq_id])
+ return 0;
+
+ if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
+ pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ if (!ecap_sc_support(iommu->ecap) &&
+ domain_update_iommu_snooping(iommu)) {
+ pr_warn("IOMMU: %s doesn't support snooping.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+ sp = domain_update_iommu_superpage(iommu) - 1;
+ if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
+ pr_warn("IOMMU: %s doesn't support large page.\n",
+ iommu->name);
+ return -ENXIO;
+ }
+
+ /*
+ * Disable translation if already enabled prior to OS handover.
+ */
+ if (iommu->gcmd & DMA_GCMD_TE)
+ iommu_disable_translation(iommu);
+
+ g_iommus[iommu->seq_id] = iommu;
+ ret = iommu_init_domains(iommu);
+ if (ret == 0)
+ ret = iommu_alloc_root_entry(iommu);
+ if (ret)
+ goto out;
+
+ if (dmaru->ignored) {
+ /*
+ * we always have to disable PMRs or DMA may fail on this device
+ */
+ if (force_on)
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+ }
+
+ intel_iommu_init_qi(iommu);
+ iommu_flush_write_buffer(iommu);
+ ret = dmar_set_interrupt(iommu);
+ if (ret)
+ goto disable_iommu;
+
+ iommu_set_root_entry(iommu);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+ iommu_enable_translation(iommu);
+
+ if (si_domain) {
+ ret = iommu_attach_domain(si_domain, iommu);
+ if (ret < 0 || si_domain->id != ret)
+ goto disable_iommu;
+ domain_attach_iommu(si_domain, iommu);
+ }
+
+ iommu_disable_protect_mem_regions(iommu);
+ return 0;
+
+disable_iommu:
+ disable_dmar_iommu(iommu);
+out:
+ free_dmar_iommu(iommu);
+ return ret;
+}
+
+int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!intel_iommu_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+
+ if (insert) {
+ ret = intel_iommu_add(dmaru);
+ } else {
+ disable_dmar_iommu(iommu);
+ free_dmar_iommu(iommu);
+ }
+
+ return ret;
+}
+
static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
@@ -4467,6 +4628,7 @@ static const struct iommu_ops intel_iommu_ops = {
.detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = intel_iommu_iova_to_phys,
.add_device = intel_iommu_add_device,
.remove_device = intel_iommu_remove_device,
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 7c80661b35c1..27541d440849 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -36,7 +36,6 @@ struct hpet_scope {
static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static struct hpet_scope ir_hpet[MAX_HPET_TBS];
-static int ir_ioapic_num, ir_hpet_num;
/*
* Lock ordering:
@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
int i;
for (i = 0; i < MAX_HPET_TBS; i++)
- if (ir_hpet[i].id == hpet_id)
+ if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
return ir_hpet[i].iommu;
return NULL;
}
@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic)
int i;
for (i = 0; i < MAX_IO_APICS; i++)
- if (ir_ioapic[i].id == apic)
+ if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
return ir_ioapic[i].iommu;
return NULL;
}
@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_IO_APICS; i++) {
- if (ir_ioapic[i].id == apic) {
+ if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
down_read(&dmar_global_lock);
for (i = 0; i < MAX_HPET_TBS; i++) {
- if (ir_hpet[i].id == id) {
+ if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
break;
}
@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-
-static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
+static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
struct ir_table *ir_table;
struct page *pages;
unsigned long *bitmap;
- ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
- GFP_ATOMIC);
+ if (iommu->ir_table)
+ return 0;
- if (!iommu->ir_table)
+ ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
+ if (!ir_table)
return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
- kfree(iommu->ir_table);
- return -ENOMEM;
+ goto out_free_table;
}
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
sizeof(long), GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
- __free_pages(pages, INTR_REMAP_PAGE_ORDER);
- kfree(ir_table);
- return -ENOMEM;
+ goto out_free_pages;
}
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
-
- iommu_set_irq_remapping(iommu, mode);
+ iommu->ir_table = ir_table;
return 0;
+
+out_free_pages:
+ __free_pages(pages, INTR_REMAP_PAGE_ORDER);
+out_free_table:
+ kfree(ir_table);
+ return -ENOMEM;
+}
+
+static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
+{
+ if (iommu && iommu->ir_table) {
+ free_pages((unsigned long)iommu->ir_table->base,
+ INTR_REMAP_PAGE_ORDER);
+ kfree(iommu->ir_table->bitmap);
+ kfree(iommu->ir_table);
+ iommu->ir_table = NULL;
+ }
}
/*
@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void)
if (!ecap_ir_support(iommu->ecap))
continue;
- if (intel_setup_irq_remapping(iommu, eim))
+ if (intel_setup_irq_remapping(iommu))
goto error;
+ iommu_set_irq_remapping(iommu, eim);
setup = 1;
}
@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void)
return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
error:
- /*
- * handle error condition gracefully here!
- */
+ for_each_iommu(iommu, drhd)
+ if (ecap_ir_support(iommu->ecap)) {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ }
if (x2apic_present)
pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
@@ -699,12 +714,13 @@ error:
return -1;
}
-static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
PCI_SECONDARY_BUS);
path++;
}
- ir_hpet[ir_hpet_num].bus = bus;
- ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_hpet[ir_hpet_num].iommu = iommu;
- ir_hpet[ir_hpet_num].id = scope->enumeration_id;
- ir_hpet_num++;
+
+ for (count = 0; count < MAX_HPET_TBS; count++) {
+ if (ir_hpet[count].iommu == iommu &&
+ ir_hpet[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_hpet[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max HPET blocks\n");
+ return -ENOSPC;
+ }
+
+ ir_hpet[free].iommu = iommu;
+ ir_hpet[free].id = scope->enumeration_id;
+ ir_hpet[free].bus = bus;
+ ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
+ pr_info("HPET id %d under DRHD base 0x%Lx\n",
+ scope->enumeration_id, drhd->address);
+
+ return 0;
}
-static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
- struct intel_iommu *iommu)
+static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
+ struct intel_iommu *iommu,
+ struct acpi_dmar_hardware_unit *drhd)
{
struct acpi_dmar_pci_path *path;
u8 bus;
- int count;
+ int count, free = -1;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
path++;
}
- ir_ioapic[ir_ioapic_num].bus = bus;
- ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
- ir_ioapic[ir_ioapic_num].iommu = iommu;
- ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
- ir_ioapic_num++;
+ for (count = 0; count < MAX_IO_APICS; count++) {
+ if (ir_ioapic[count].iommu == iommu &&
+ ir_ioapic[count].id == scope->enumeration_id)
+ return 0;
+ else if (ir_ioapic[count].iommu == NULL && free == -1)
+ free = count;
+ }
+ if (free == -1) {
+ pr_warn("Exceeded Max IO APICS\n");
+ return -ENOSPC;
+ }
+
+ ir_ioapic[free].bus = bus;
+ ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
+ ir_ioapic[free].iommu = iommu;
+ ir_ioapic[free].id = scope->enumeration_id;
+ pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
+ scope->enumeration_id, drhd->address, iommu->seq_id);
+
+ return 0;
}
static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu)
{
+ int ret = 0;
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_device_scope *scope;
void *start, *end;
drhd = (struct acpi_dmar_hardware_unit *)header;
-
start = (void *)(drhd + 1);
end = ((void *)drhd) + header->length;
- while (start < end) {
+ while (start < end && ret == 0) {
scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- if (ir_ioapic_num == MAX_IO_APICS) {
- printk(KERN_WARNING "Exceeded Max IO APICS\n");
- return -1;
- }
-
- printk(KERN_INFO "IOAPIC id %d under DRHD base "
- " 0x%Lx IOMMU %d\n", scope->enumeration_id,
- drhd->address, iommu->seq_id);
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
+ ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
+ else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
+ ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
+ start += scope->length;
+ }
- ir_parse_one_ioapic_scope(scope, iommu);
- } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
- if (ir_hpet_num == MAX_HPET_TBS) {
- printk(KERN_WARNING "Exceeded Max HPET blocks\n");
- return -1;
- }
+ return ret;
+}
- printk(KERN_INFO "HPET id %d under DRHD base"
- " 0x%Lx\n", scope->enumeration_id,
- drhd->address);
+static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
+{
+ int i;
- ir_parse_one_hpet_scope(scope, iommu);
- }
- start += scope->length;
- }
+ for (i = 0; i < MAX_HPET_TBS; i++)
+ if (ir_hpet[i].iommu == iommu)
+ ir_hpet[i].iommu = NULL;
- return 0;
+ for (i = 0; i < MAX_IO_APICS; i++)
+ if (ir_ioapic[i].iommu == iommu)
+ ir_ioapic[i].iommu = NULL;
}
/*
@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = {
.msi_setup_irq = intel_msi_setup_irq,
.alloc_hpet_msi = intel_alloc_hpet_msi,
};
+
+/*
+ * Support of Interrupt Remapping Unit Hotplug
+ */
+static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
+{
+ int ret;
+ int eim = x2apic_enabled();
+
+ if (eim && !ecap_eim_support(iommu->ecap)) {
+ pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
+ iommu->reg_phys, iommu->ecap);
+ return -ENODEV;
+ }
+
+ if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
+ pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
+ iommu->reg_phys);
+ return -ENODEV;
+ }
+
+ /* TODO: check all IOAPICs are covered by IOMMU */
+
+ /* Setup Interrupt-remapping now. */
+ ret = intel_setup_irq_remapping(iommu);
+ if (ret) {
+ pr_err("DRHD %Lx: failed to allocate resource\n",
+ iommu->reg_phys);
+ ir_remove_ioapic_hpet_scope(iommu);
+ return ret;
+ }
+
+ if (!iommu->qi) {
+ /* Clear previous faults. */
+ dmar_fault(-1, iommu);
+ iommu_disable_irq_remapping(iommu);
+ dmar_disable_qi(iommu);
+ }
+
+ /* Enable queued invalidation */
+ ret = dmar_enable_qi(iommu);
+ if (!ret) {
+ iommu_set_irq_remapping(iommu, eim);
+ } else {
+ pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
+ iommu->reg_phys, iommu->ecap, ret);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+
+ return ret;
+}
+
+int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ int ret = 0;
+ struct intel_iommu *iommu = dmaru->iommu;
+
+ if (!irq_remapping_enabled)
+ return 0;
+ if (iommu == NULL)
+ return -EINVAL;
+ if (!ecap_ir_support(iommu->ecap))
+ return 0;
+
+ if (insert) {
+ if (!iommu->ir_table)
+ ret = dmar_ir_add(dmaru, iommu);
+ } else {
+ if (iommu->ir_table) {
+ if (!bitmap_empty(iommu->ir_table->bitmap,
+ INTR_REMAP_TABLE_ENTRIES)) {
+ ret = -EBUSY;
+ } else {
+ iommu_disable_irq_remapping(iommu);
+ intel_teardown_irq_remapping(iommu);
+ ir_remove_ioapic_hpet_scope(iommu);
+ }
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ed8b04867b1f..1bd63352ab17 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
kfree(nb);
return err;
}
- return bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
+
+ err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
+ if (err) {
+ bus_unregister_notifier(bus, nb);
+ kfree(nb);
+ return err;
+ }
+
+ return 0;
}
/**
@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
*/
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
{
+ int err;
+
if (bus->iommu_ops != NULL)
return -EBUSY;
bus->iommu_ops = ops;
/* Do IOMMU specific setup for this bus-type */
- return iommu_bus_init(bus, ops);
+ err = iommu_bus_init(bus, ops);
+ if (err)
+ bus->iommu_ops = NULL;
+
+ return err;
}
EXPORT_SYMBOL_GPL(bus_set_iommu);
@@ -1124,6 +1138,48 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
EXPORT_SYMBOL_GPL(iommu_unmap);
+size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot)
+{
+ struct scatterlist *s;
+ size_t mapped = 0;
+ unsigned int i, min_pagesz;
+ int ret;
+
+ if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+ return 0;
+
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+
+ /*
+ * We are mapping on IOMMU page boundaries, so offset within
+ * the page must be 0. However, the IOMMU may support pages
+ * smaller than PAGE_SIZE, so s->offset may still represent
+ * an offset of that boundary within the CPU page.
+ */
+ if (!IS_ALIGNED(s->offset, min_pagesz))
+ goto out_err;
+
+ ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
+ if (ret)
+ goto out_err;
+
+ mapped += s->length;
+ }
+
+ return mapped;
+
+out_err:
+ /* undo mappings already done */
+ iommu_unmap(domain, iova, mapped);
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(default_iommu_map_sg);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 7dab5cbcc775..99effbb17191 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = {
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.remove_device = ipmmu_remove_device,
@@ -1184,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_name(&pdev->dev), mmu);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
- return irq;
+ return ret;
}
ipmmu_device_reset(mmu);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 6e3dcc289d59..e1b05379ca0e 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -73,8 +73,7 @@ fail:
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{
- if (drvdata->clk)
- clk_disable(drvdata->clk);
+ clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
}
@@ -681,6 +680,7 @@ static const struct iommu_ops msm_iommu_ops = {
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 61def7cb5263..b6d01f97e537 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
struct clk *iommu_clk;
struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
+ struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
void __iomem *regs_base;
int ret, irq, par;
@@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata);
- if (iommu_clk)
- clk_disable(iommu_clk);
+ clk_disable(iommu_clk);
clk_disable(iommu_pclk);
@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev)
static int msm_iommu_ctx_probe(struct platform_device *pdev)
{
- struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
+ struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
struct msm_iommu_drvdata *drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int i, ret;
@@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
SET_NSCFG(drvdata->base, mid, 3);
}
- if (drvdata->clk)
- clk_disable(drvdata->clk);
+ clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 531658d17333..f3d20a2039d2 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -10,45 +10,35 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/platform_device.h>
#include <linux/debugfs.h>
-#include <linux/omap-iommu.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h"
#include "omap-iommu.h"
-#define MAXCOLUMN 100 /* for short messages */
-
static DEFINE_MUTEX(iommu_debug_lock);
static struct dentry *iommu_debug_root;
-static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
{
- u32 ver = omap_iommu_arch_version();
- char buf[MAXCOLUMN], *p = buf;
-
- p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
-
- return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ return !obj->domain;
}
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes;
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ struct omap_iommu *obj = file->private_data;
char *p, *buf;
ssize_t bytes, rest;
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
+
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
return bytes;
}
-static ssize_t debug_write_pagetable(struct file *file,
- const char __user *userbuf, size_t count, loff_t *ppos)
+static void dump_ioptable(struct seq_file *s)
{
- struct iotlb_entry e;
- struct cr_regs cr;
- int err;
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- char buf[MAXCOLUMN], *p = buf;
-
- count = min(count, sizeof(buf));
-
- mutex_lock(&iommu_debug_lock);
- if (copy_from_user(p, userbuf, count)) {
- mutex_unlock(&iommu_debug_lock);
- return -EFAULT;
- }
-
- sscanf(p, "%x %x", &cr.cam, &cr.ram);
- if (!cr.cam || !cr.ram) {
- mutex_unlock(&iommu_debug_lock);
- return -EINVAL;
- }
-
- omap_iotlb_cr_to_e(&cr, &e);
- err = omap_iopgtable_store_entry(obj, &e);
- if (err)
- dev_err(obj->dev, "%s: fail to store cr\n", __func__);
-
- mutex_unlock(&iommu_debug_lock);
- return count;
-}
-
-#define dump_ioptable_entry_one(lv, da, val) \
- ({ \
- int __err = 0; \
- ssize_t bytes; \
- const int maxcol = 22; \
- const char *str = "%d: %08x %08x\n"; \
- bytes = snprintf(p, maxcol, str, lv, da, val); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- __err = -ENOMEM; \
- __err; \
- })
-
-static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- int i;
- u32 *iopgd;
- char *p = buf;
+ int i, j;
+ u32 da;
+ u32 *iopgd, *iopte;
+ struct omap_iommu *obj = s->private;
spin_lock(&obj->page_table_lock);
iopgd = iopgd_offset(obj, 0);
for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
- int j, err;
- u32 *iopte;
- u32 da;
-
if (!*iopgd)
continue;
if (!(*iopgd & IOPGD_TABLE)) {
da = i << IOPGD_SHIFT;
-
- err = dump_ioptable_entry_one(1, da, *iopgd);
- if (err)
- goto out;
+ seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd);
continue;
}
iopte = iopte_offset(iopgd, 0);
-
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
if (!*iopte)
continue;
da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
- err = dump_ioptable_entry_one(2, da, *iopgd);
- if (err)
- goto out;
+ seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte);
}
}
-out:
- spin_unlock(&obj->page_table_lock);
- return p - buf;
+ spin_unlock(&obj->page_table_lock);
}
-static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
+static int debug_read_pagetable(struct seq_file *s, void *data)
{
- struct device *dev = file->private_data;
- struct omap_iommu *obj = dev_to_omap_iommu(dev);
- char *p, *buf;
- size_t bytes;
+ struct omap_iommu *obj = s->private;
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- p = buf;
-
- p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
- p += sprintf(p, "-----------------------------------------\n");
+ if (is_omap_iommu_detached(obj))
+ return -EPERM;
mutex_lock(&iommu_debug_lock);
- bytes = PAGE_SIZE - (p - buf);
- p += dump_ioptable(obj, p, bytes);
-
- bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+ seq_printf(s, "L: %8s %8s\n", "da:", "pte:");
+ seq_puts(s, "--------------------------\n");
+ dump_ioptable(s);
mutex_unlock(&iommu_debug_lock);
- free_page((unsigned long)buf);
- return bytes;
+ return 0;
}
-#define DEBUG_FOPS(name) \
- static const struct file_operations debug_##name##_fops = { \
- .open = simple_open, \
- .read = debug_read_##name, \
- .write = debug_write_##name, \
- .llseek = generic_file_llseek, \
- };
+#define DEBUG_SEQ_FOPS_RO(name) \
+ static int debug_open_##name(struct inode *inode, struct file *file) \
+ { \
+ return single_open(file, debug_read_##name, inode->i_private); \
+ } \
+ \
+ static const struct file_operations debug_##name##_fops = { \
+ .open = debug_open_##name, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ }
#define DEBUG_FOPS_RO(name) \
static const struct file_operations debug_##name##_fops = { \
@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
.llseek = generic_file_llseek, \
};
-DEBUG_FOPS_RO(ver);
DEBUG_FOPS_RO(regs);
DEBUG_FOPS_RO(tlb);
-DEBUG_FOPS(pagetable);
+DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \
{ \
struct dentry *dent; \
- dent = debugfs_create_file(#attr, mode, parent, \
- dev, &debug_##attr##_fops); \
+ dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
+ obj, &debug_##attr##_fops); \
if (!dent) \
- return -ENOMEM; \
+ goto err; \
}
-#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
-static int iommu_debug_register(struct device *dev, void *data)
+void omap_iommu_debugfs_add(struct omap_iommu *obj)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_iommu *obj = platform_get_drvdata(pdev);
- struct omap_iommu_arch_data *arch_data;
- struct dentry *d, *parent;
-
- if (!obj || !obj->dev)
- return -EINVAL;
-
- arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
- if (!arch_data)
- return -ENOMEM;
-
- arch_data->iommu_dev = obj;
+ struct dentry *d;
- dev->archdata.iommu = arch_data;
+ if (!iommu_debug_root)
+ return;
- d = debugfs_create_dir(obj->name, iommu_debug_root);
- if (!d)
- goto nomem;
- parent = d;
+ obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
+ if (!obj->debug_dir)
+ return;
- d = debugfs_create_u8("nr_tlb_entries", 400, parent,
+ d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
(u8 *)&obj->nr_tlb_entries);
if (!d)
- goto nomem;
+ return;
- DEBUG_ADD_FILE_RO(ver);
DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb);
- DEBUG_ADD_FILE(pagetable);
+ DEBUG_ADD_FILE_RO(pagetable);
- return 0;
+ return;
-nomem:
- kfree(arch_data);
- return -ENOMEM;
+err:
+ debugfs_remove_recursive(obj->debug_dir);
}
-static int iommu_debug_unregister(struct device *dev, void *data)
+void omap_iommu_debugfs_remove(struct omap_iommu *obj)
{
- if (!dev->archdata.iommu)
- return 0;
-
- kfree(dev->archdata.iommu);
+ if (!obj->debug_dir)
+ return;
- dev->archdata.iommu = NULL;
-
- return 0;
+ debugfs_remove_recursive(obj->debug_dir);
}
-static int __init iommu_debug_init(void)
+void __init omap_iommu_debugfs_init(void)
{
- struct dentry *d;
- int err;
-
- d = debugfs_create_dir("iommu", NULL);
- if (!d)
- return -ENOMEM;
- iommu_debug_root = d;
-
- err = omap_foreach_iommu_device(d, iommu_debug_register);
- if (err)
- goto err_out;
- return 0;
-
-err_out:
- debugfs_remove_recursive(iommu_debug_root);
- return err;
+ iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
+ if (!iommu_debug_root)
+ pr_err("can't create debugfs dir\n");
}
-module_init(iommu_debug_init)
-static void __exit iommu_debugfs_exit(void)
+void __exit omap_iommu_debugfs_exit(void)
{
- debugfs_remove_recursive(iommu_debug_root);
- omap_foreach_iommu_device(NULL, iommu_debug_unregister);
+ debugfs_remove(iommu_debug_root);
}
-module_exit(iommu_debugfs_exit)
-
-MODULE_DESCRIPTION("omap iommu: debugfs interface");
-MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 36278870e84a..bbb7dcef02d3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -76,53 +76,23 @@ struct iotlb_lock {
short vict;
};
-/* accommodate the difference between omap1 and omap2/3 */
-static const struct iommu_functions *arch_iommu;
-
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
/**
- * omap_install_iommu_arch - Install archtecure specific iommu functions
- * @ops: a pointer to architecture specific iommu functions
- *
- * There are several kind of iommu algorithm(tlb, pagetable) among
- * omap series. This interface installs such an iommu algorighm.
- **/
-int omap_install_iommu_arch(const struct iommu_functions *ops)
-{
- if (arch_iommu)
- return -EBUSY;
-
- arch_iommu = ops;
- return 0;
-}
-EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
-
-/**
- * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
- * @ops: a pointer to architecture specific iommu functions
- *
- * This interface uninstalls the iommu algorighm installed previously.
- **/
-void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
-{
- if (arch_iommu != ops)
- pr_err("%s: not your arch\n", __func__);
-
- arch_iommu = NULL;
-}
-EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
-
-/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
**/
void omap_iommu_save_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ u32 *p = obj->ctx;
+ int i;
- arch_iommu->save_ctx(obj);
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ p[i] = iommu_read_reg(obj, i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
void omap_iommu_restore_ctx(struct device *dev)
{
struct omap_iommu *obj = dev_to_omap_iommu(dev);
+ u32 *p = obj->ctx;
+ int i;
- arch_iommu->restore_ctx(obj);
+ for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
+ iommu_write_reg(obj, p[i], i * sizeof(u32));
+ dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
+ }
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
-/**
- * omap_iommu_arch_version - Return running iommu arch version
- **/
-u32 omap_iommu_arch_version(void)
+static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
- return arch_iommu->version;
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ if (on)
+ iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
+ else
+ iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
+
+ l &= ~MMU_CNTL_MASK;
+ if (on)
+ l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
+ else
+ l |= (MMU_CNTL_MMU_EN);
+
+ iommu_write_reg(obj, l, MMU_CNTL);
+}
+
+static int omap2_iommu_enable(struct omap_iommu *obj)
+{
+ u32 l, pa;
+
+ if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
+ return -EINVAL;
+
+ pa = virt_to_phys(obj->iopgd);
+ if (!IS_ALIGNED(pa, SZ_16K))
+ return -EINVAL;
+
+ l = iommu_read_reg(obj, MMU_REVISION);
+ dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
+ (l >> 4) & 0xf, l & 0xf);
+
+ iommu_write_reg(obj, pa, MMU_TTB);
+
+ if (obj->has_bus_err_back)
+ iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
+
+ __iommu_set_twl(obj, true);
+
+ return 0;
+}
+
+static void omap2_iommu_disable(struct omap_iommu *obj)
+{
+ u32 l = iommu_read_reg(obj, MMU_CNTL);
+
+ l &= ~MMU_CNTL_MASK;
+ iommu_write_reg(obj, l, MMU_CNTL);
+
+ dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
-EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
static int iommu_enable(struct omap_iommu *obj)
{
int err;
struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
-
- if (!arch_iommu)
- return -ENODEV;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->deassert_reset) {
err = pdata->deassert_reset(pdev, pdata->reset_name);
@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj)
pm_runtime_get_sync(obj->dev);
- err = arch_iommu->enable(obj);
+ err = omap2_iommu_enable(obj);
return err;
}
@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj)
static void iommu_disable(struct omap_iommu *obj)
{
struct platform_device *pdev = to_platform_device(obj->dev);
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
- arch_iommu->disable(obj);
+ omap2_iommu_disable(obj);
pm_runtime_put_sync(obj->dev);
@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj)
/*
* TLB operations
*/
-void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
-{
- BUG_ON(!cr || !e);
-
- arch_iommu->cr_to_e(cr, e);
-}
-EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
-
static inline int iotlb_cr_valid(struct cr_regs *cr)
{
if (!cr)
return -EINVAL;
- return arch_iommu->cr_valid(cr);
-}
-
-static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
- struct iotlb_entry *e)
-{
- if (!e)
- return NULL;
-
- return arch_iommu->alloc_cr(obj, e);
+ return cr->cam & MMU_CAM_V;
}
static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{
- return arch_iommu->cr_to_virt(cr);
+ u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
+ u32 mask = get_cam_va_mask(cr->cam & page_size);
+
+ return cr->cam & mask;
}
static u32 get_iopte_attr(struct iotlb_entry *e)
{
- return arch_iommu->get_pte_attr(e);
+ u32 attr;
+
+ attr = e->mixed << 5;
+ attr |= e->endian;
+ attr |= e->elsz >> 3;
+ attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
+ (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
+ return attr;
}
static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
{
- return arch_iommu->fault_isr(obj, da);
+ u32 status, fault_addr;
+
+ status = iommu_read_reg(obj, MMU_IRQSTATUS);
+ status &= MMU_IRQ_MASK;
+ if (!status) {
+ *da = 0;
+ return 0;
+ }
+
+ fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
+ *da = fault_addr;
+
+ iommu_write_reg(obj, status, MMU_IRQSTATUS);
+
+ return status;
}
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
- arch_iommu->tlb_read_cr(obj, cr);
+ cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
+ cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
- arch_iommu->tlb_load_cr(obj, cr);
+ iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
+ iommu_write_reg(obj, cr->ram, MMU_RAM);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
iommu_write_reg(obj, 1, MMU_LD_TLB);
}
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj: target iommu
- * @cr: contents of cam and ram register
- * @buf: output buffer
- **/
-static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf)
-{
- BUG_ON(!cr || !buf);
-
- return arch_iommu->dump_cr(obj, cr, buf);
-}
-
/* only used in iotlb iteration for-loop */
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{
@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
return cr;
}
+#ifdef PREFETCH_IOTLB
+static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
+ struct iotlb_entry *e)
+{
+ struct cr_regs *cr;
+
+ if (!e)
+ return NULL;
+
+ if (e->da & ~(get_cam_va_mask(e->pgsz))) {
+ dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
+ e->da);
+ return ERR_PTR(-EINVAL);
+ }
+
+ cr = kmalloc(sizeof(*cr), GFP_KERNEL);
+ if (!cr)
+ return ERR_PTR(-ENOMEM);
+
+ cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
+ cr->ram = e->pa | e->endian | e->elsz | e->mixed;
+
+ return cr;
+}
+
/**
* load_iotlb_entry - Set an iommu tlb entry
* @obj: target iommu
* @e: an iommu tlb entry info
**/
-#ifdef PREFETCH_IOTLB
static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err = 0;
@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev);
}
-#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
+
+#define pr_reg(name) \
+ do { \
+ ssize_t bytes; \
+ const char *str = "%20s: %08x\n"; \
+ const int maxcol = 32; \
+ bytes = snprintf(p, maxcol, str, __stringify(name), \
+ iommu_read_reg(obj, MMU_##name)); \
+ p += bytes; \
+ len -= bytes; \
+ if (len < maxcol) \
+ goto out; \
+ } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+ char *p = buf;
+
+ pr_reg(REVISION);
+ pr_reg(IRQSTATUS);
+ pr_reg(IRQENABLE);
+ pr_reg(WALKING_ST);
+ pr_reg(CNTL);
+ pr_reg(FAULT_AD);
+ pr_reg(TTB);
+ pr_reg(LOCK);
+ pr_reg(LD_TLB);
+ pr_reg(CAM);
+ pr_reg(RAM);
+ pr_reg(GFLUSH);
+ pr_reg(FLUSH_ENTRY);
+ pr_reg(READ_CAM);
+ pr_reg(READ_RAM);
+ pr_reg(EMU_FAULT_AD);
+out:
+ return p - buf;
+}
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
{
@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
pm_runtime_get_sync(obj->dev);
- bytes = arch_iommu->dump_ctx(obj, buf, bytes);
+ bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
pm_runtime_put_sync(obj->dev);
return bytes;
}
-EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
@@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
}
/**
+ * iotlb_dump_cr - Dump an iommu tlb entry into buf
+ * @obj: target iommu
+ * @cr: contents of cam and ram register
+ * @buf: output buffer
+ **/
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+ char *buf)
+{
+ char *p = buf;
+
+ /* FIXME: Need more detail analysis of cam/ram */
+ p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
+
+ return p - buf;
+}
+
+/**
* omap_dump_tlb_entries - dump cr arrays to given buffer
* @obj: target iommu
* @buf: output buffer
@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
return p - buf;
}
-EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
-
-int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
-{
- return driver_for_each_device(&omap_iommu_driver.driver,
- NULL, data, fn);
-}
-EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
-#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
+#endif /* CONFIG_OMAP_IOMMU_DEBUG */
/*
* H/W pagetable operations
@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
* @obj: target iommu
* @e: an iommu tlb entry info
**/
-int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
+static int
+omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
{
int err;
@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
prefetch_iotlb_entry(obj, e);
return err;
}
-EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
/**
* iopgtable_lookup_entry - Lookup an iommu pte entry
@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
+ struct omap_iommu_domain *omap_domain = domain->priv;
- if (!obj->refcount)
+ if (!omap_domain->iommu_dev)
return IRQ_NONE;
errs = iommu_report_fault(obj, &da);
@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
spin_lock(&obj->iommu_lock);
- /* an iommu device can only be attached once */
- if (++obj->refcount > 1) {
- dev_err(dev, "%s: already attached!\n", obj->name);
- err = -EBUSY;
- goto err_enable;
- }
-
obj->iopgd = iopgd;
err = iommu_enable(obj);
if (err)
@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
return obj;
err_enable:
- obj->refcount--;
spin_unlock(&obj->iommu_lock);
return ERR_PTR(err);
}
@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
spin_lock(&obj->iommu_lock);
- if (--obj->refcount == 0)
- iommu_disable(obj);
-
+ iommu_disable(obj);
obj->iopgd = NULL;
spin_unlock(&obj->iommu_lock);
@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
int irq;
struct omap_iommu *obj;
struct resource *res;
- struct iommu_platform_data *pdata = pdev->dev.platform_data;
+ struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *of = pdev->dev.of_node;
obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
pm_runtime_irq_safe(obj->dev);
pm_runtime_enable(obj->dev);
+ omap_iommu_debugfs_add(obj);
+
dev_info(&pdev->dev, "%s registered\n", obj->name);
return 0;
}
@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
struct omap_iommu *obj = platform_get_drvdata(pdev);
iopgtable_clear_entry_all(obj);
+ omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
e->da = da;
e->pa = pa;
e->valid = MMU_CAM_V;
- /* FIXME: add OMAP1 support */
e->pgsz = pgsz;
e->endian = MMU_RAM_ENDIAN_LITTLE;
e->elsz = MMU_RAM_ELSZ_8;
@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
omap_domain->dev = NULL;
+ oiommu->domain = NULL;
}
static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1288,6 +1364,7 @@ static const struct iommu_ops omap_iommu_ops = {
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = omap_iommu_iova_to_phys,
.add_device = omap_iommu_add_device,
.remove_device = omap_iommu_remove_device,
@@ -1308,6 +1385,8 @@ static int __init omap_iommu_init(void)
bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+ omap_iommu_debugfs_init();
+
return platform_driver_register(&omap_iommu_driver);
}
/* must be ready before omap3isp is probed */
@@ -1318,6 +1397,8 @@ static void __exit omap_iommu_exit(void)
kmem_cache_destroy(iopte_cachep);
platform_driver_unregister(&omap_iommu_driver);
+
+ omap_iommu_debugfs_exit();
}
module_exit(omap_iommu_exit);
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 4f1b68c08c15..d736630df3c8 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -10,9 +10,8 @@
* published by the Free Software Foundation.
*/
-#if defined(CONFIG_ARCH_OMAP1)
-#error "iommu for this processor not implemented yet"
-#endif
+#ifndef _OMAP_IOMMU_H
+#define _OMAP_IOMMU_H
struct iotlb_entry {
u32 da;
@@ -30,10 +29,9 @@ struct omap_iommu {
const char *name;
void __iomem *regbase;
struct device *dev;
- void *isr_priv;
struct iommu_domain *domain;
+ struct dentry *debug_dir;
- unsigned int refcount;
spinlock_t iommu_lock; /* global for this whole object */
/*
@@ -67,34 +65,6 @@ struct cr_regs {
};
};
-/* architecture specific functions */
-struct iommu_functions {
- unsigned long version;
-
- int (*enable)(struct omap_iommu *obj);
- void (*disable)(struct omap_iommu *obj);
- void (*set_twl)(struct omap_iommu *obj, bool on);
- u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
-
- void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
- void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
-
- struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
- struct iotlb_entry *e);
- int (*cr_valid)(struct cr_regs *cr);
- u32 (*cr_to_virt)(struct cr_regs *cr);
- void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
- ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
- char *buf);
-
- u32 (*get_pte_attr)(struct iotlb_entry *e);
-
- void (*save_ctx)(struct omap_iommu *obj);
- void (*restore_ctx)(struct omap_iommu *obj);
- ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
-};
-
-#ifdef CONFIG_IOMMU_API
/**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device
@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
return arch_data->iommu_dev;
}
-#endif
/*
* MMU Register offsets
@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* MMU Register bit definitions
*/
+/* IRQSTATUS & IRQENABLE */
+#define MMU_IRQ_MULTIHITFAULT (1 << 4)
+#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
+#define MMU_IRQ_EMUMISS (1 << 2)
+#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
+#define MMU_IRQ_TLBMISS (1 << 0)
+
+#define __MMU_IRQ_FAULT \
+ (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
+#define MMU_IRQ_MASK \
+ (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
+#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
+#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
+
+/* MMU_CNTL */
+#define MMU_CNTL_SHIFT 1
+#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
+#define MMU_CNTL_EML_TLB (1 << 3)
+#define MMU_CNTL_TWL_EN (1 << 2)
+#define MMU_CNTL_MMU_EN (1 << 1)
+
+/* CAM */
#define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_CAM_PGSZ_4K (2 << 0)
#define MMU_CAM_PGSZ_16M (3 << 0)
+/* RAM */
#define MMU_RAM_PADDR_SHIFT 12
#define MMU_RAM_PADDR_MASK \
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
+#define get_cam_va_mask(pgsz) \
+ (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
+ ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
+ ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
+ ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
+
/*
* utilities for super page(16MB, 1MB, 64KB and 4KB)
*/
@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/*
* global functions
*/
-extern u32 omap_iommu_arch_version(void);
-
-extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
-
-extern int
-omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
-
-extern void omap_iommu_save_ctx(struct device *dev);
-extern void omap_iommu_restore_ctx(struct device *dev);
-
-extern int omap_foreach_iommu_device(void *data,
- int (*fn)(struct device *, void *));
-
-extern int omap_install_iommu_arch(const struct iommu_functions *ops);
-extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
-
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
+void omap_iommu_debugfs_init(void);
+void omap_iommu_debugfs_exit(void);
+
+void omap_iommu_debugfs_add(struct omap_iommu *obj);
+void omap_iommu_debugfs_remove(struct omap_iommu *obj);
+#else
+static inline void omap_iommu_debugfs_init(void) { }
+static inline void omap_iommu_debugfs_exit(void) { }
+
+static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
+static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
+#endif
+
/*
* register accessors
*/
@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
{
__raw_writel(val, obj->regbase + offs);
}
+
+#endif /* _OMAP_IOMMU_H */
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
deleted file mode 100644
index 5e1ea3b0bf16..000000000000
--- a/drivers/iommu/omap-iommu2.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * omap iommu: omap2/3 architecture specific functions
- *
- * Copyright (C) 2008-2009 Nokia Corporation
- *
- * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
- * Paul Mundt and Toshihiro Kobayashi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/omap-iommu.h>
-#include <linux/slab.h>
-#include <linux/stringify.h>
-#include <linux/platform_data/iommu-omap.h>
-
-#include "omap-iommu.h"
-
-/*
- * omap2 architecture specific register bit definitions
- */
-#define IOMMU_ARCH_VERSION 0x00000011
-
-/* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT (1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
-#define MMU_IRQ_EMUMISS (1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
-#define MMU_IRQ_TLBMISS (1 << 0)
-
-#define __MMU_IRQ_FAULT \
- (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
-#define MMU_IRQ_MASK \
- (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
-#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
-#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
-
-/* MMU_CNTL */
-#define MMU_CNTL_SHIFT 1
-#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB (1 << 3)
-#define MMU_CNTL_TWL_EN (1 << 2)
-#define MMU_CNTL_MMU_EN (1 << 1)
-
-#define get_cam_va_mask(pgsz) \
- (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
- ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
- ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
- ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
-
-/* IOMMU errors */
-#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
-#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
-#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
-#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
-#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
-
-static void __iommu_set_twl(struct omap_iommu *obj, bool on)
-{
- u32 l = iommu_read_reg(obj, MMU_CNTL);
-
- if (on)
- iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
- else
- iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
-
- l &= ~MMU_CNTL_MASK;
- if (on)
- l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
- else
- l |= (MMU_CNTL_MMU_EN);
-
- iommu_write_reg(obj, l, MMU_CNTL);
-}
-
-
-static int omap2_iommu_enable(struct omap_iommu *obj)
-{
- u32 l, pa;
-
- if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
- return -EINVAL;
-
- pa = virt_to_phys(obj->iopgd);
- if (!IS_ALIGNED(pa, SZ_16K))
- return -EINVAL;
-
- l = iommu_read_reg(obj, MMU_REVISION);
- dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
- (l >> 4) & 0xf, l & 0xf);
-
- iommu_write_reg(obj, pa, MMU_TTB);
-
- if (obj->has_bus_err_back)
- iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
-
- __iommu_set_twl(obj, true);
-
- return 0;
-}
-
-static void omap2_iommu_disable(struct omap_iommu *obj)
-{
- u32 l = iommu_read_reg(obj, MMU_CNTL);
-
- l &= ~MMU_CNTL_MASK;
- iommu_write_reg(obj, l, MMU_CNTL);
-
- dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
-}
-
-static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
-{
- __iommu_set_twl(obj, false);
-}
-
-static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
-{
- u32 stat, da;
- u32 errs = 0;
-
- stat = iommu_read_reg(obj, MMU_IRQSTATUS);
- stat &= MMU_IRQ_MASK;
- if (!stat) {
- *ra = 0;
- return 0;
- }
-
- da = iommu_read_reg(obj, MMU_FAULT_AD);
- *ra = da;
-
- if (stat & MMU_IRQ_TLBMISS)
- errs |= OMAP_IOMMU_ERR_TLB_MISS;
- if (stat & MMU_IRQ_TRANSLATIONFAULT)
- errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
- if (stat & MMU_IRQ_EMUMISS)
- errs |= OMAP_IOMMU_ERR_EMU_MISS;
- if (stat & MMU_IRQ_TABLEWALKFAULT)
- errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
- if (stat & MMU_IRQ_MULTIHITFAULT)
- errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
- iommu_write_reg(obj, stat, MMU_IRQSTATUS);
-
- return errs;
-}
-
-static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
-{
- cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
- cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
-}
-
-static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
-{
- iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
- iommu_write_reg(obj, cr->ram, MMU_RAM);
-}
-
-static u32 omap2_cr_to_virt(struct cr_regs *cr)
-{
- u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
- u32 mask = get_cam_va_mask(cr->cam & page_size);
-
- return cr->cam & mask;
-}
-
-static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
- struct iotlb_entry *e)
-{
- struct cr_regs *cr;
-
- if (e->da & ~(get_cam_va_mask(e->pgsz))) {
- dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
- e->da);
- return ERR_PTR(-EINVAL);
- }
-
- cr = kmalloc(sizeof(*cr), GFP_KERNEL);
- if (!cr)
- return ERR_PTR(-ENOMEM);
-
- cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
- cr->ram = e->pa | e->endian | e->elsz | e->mixed;
-
- return cr;
-}
-
-static inline int omap2_cr_valid(struct cr_regs *cr)
-{
- return cr->cam & MMU_CAM_V;
-}
-
-static u32 omap2_get_pte_attr(struct iotlb_entry *e)
-{
- u32 attr;
-
- attr = e->mixed << 5;
- attr |= e->endian;
- attr |= e->elsz >> 3;
- attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
- (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
- return attr;
-}
-
-static ssize_t
-omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
-{
- char *p = buf;
-
- /* FIXME: Need more detail analysis of cam/ram */
- p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
- (cr->cam & MMU_CAM_P) ? 1 : 0);
-
- return p - buf;
-}
-
-#define pr_reg(name) \
- do { \
- ssize_t bytes; \
- const char *str = "%20s: %08x\n"; \
- const int maxcol = 32; \
- bytes = snprintf(p, maxcol, str, __stringify(name), \
- iommu_read_reg(obj, MMU_##name)); \
- p += bytes; \
- len -= bytes; \
- if (len < maxcol) \
- goto out; \
- } while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
- char *p = buf;
-
- pr_reg(REVISION);
- pr_reg(IRQSTATUS);
- pr_reg(IRQENABLE);
- pr_reg(WALKING_ST);
- pr_reg(CNTL);
- pr_reg(FAULT_AD);
- pr_reg(TTB);
- pr_reg(LOCK);
- pr_reg(LD_TLB);
- pr_reg(CAM);
- pr_reg(RAM);
- pr_reg(GFLUSH);
- pr_reg(FLUSH_ENTRY);
- pr_reg(READ_CAM);
- pr_reg(READ_RAM);
- pr_reg(EMU_FAULT_AD);
-out:
- return p - buf;
-}
-
-static void omap2_iommu_save_ctx(struct omap_iommu *obj)
-{
- int i;
- u32 *p = obj->ctx;
-
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- p[i] = iommu_read_reg(obj, i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
- }
-
- BUG_ON(p[0] != IOMMU_ARCH_VERSION);
-}
-
-static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
-{
- int i;
- u32 *p = obj->ctx;
-
- for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
- iommu_write_reg(obj, p[i], i * sizeof(u32));
- dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
- }
-
- BUG_ON(p[0] != IOMMU_ARCH_VERSION);
-}
-
-static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
-{
- e->da = cr->cam & MMU_CAM_VATAG_MASK;
- e->pa = cr->ram & MMU_RAM_PADDR_MASK;
- e->valid = cr->cam & MMU_CAM_V;
- e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
- e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
- e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
- e->mixed = cr->ram & MMU_RAM_MIXED;
-}
-
-static const struct iommu_functions omap2_iommu_ops = {
- .version = IOMMU_ARCH_VERSION,
-
- .enable = omap2_iommu_enable,
- .disable = omap2_iommu_disable,
- .set_twl = omap2_iommu_set_twl,
- .fault_isr = omap2_iommu_fault_isr,
-
- .tlb_read_cr = omap2_tlb_read_cr,
- .tlb_load_cr = omap2_tlb_load_cr,
-
- .cr_to_e = omap2_cr_to_e,
- .cr_to_virt = omap2_cr_to_virt,
- .alloc_cr = omap2_alloc_cr,
- .cr_valid = omap2_cr_valid,
- .dump_cr = omap2_dump_cr,
-
- .get_pte_attr = omap2_get_pte_attr,
-
- .save_ctx = omap2_iommu_save_ctx,
- .restore_ctx = omap2_iommu_restore_ctx,
- .dump_ctx = omap2_iommu_dump_ctx,
-};
-
-static int __init omap2_iommu_init(void)
-{
- return omap_install_iommu_arch(&omap2_iommu_ops);
-}
-module_init(omap2_iommu_init);
-
-static void __exit omap2_iommu_exit(void)
-{
- omap_uninstall_iommu_arch(&omap2_iommu_ops);
-}
-module_exit(omap2_iommu_exit);
-
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100644
index 000000000000..b2023af384b9
--- /dev/null
+++ b/drivers/iommu/rockchip-iommu.c
@@ -0,0 +1,1038 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/** MMU register offsets */
+#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
+#define RK_MMU_STATUS 0x04
+#define RK_MMU_COMMAND 0x08
+#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
+#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
+#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
+#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
+#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
+#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
+#define RK_MMU_AUTO_GATING 0x24
+
+#define DTE_ADDR_DUMMY 0xCAFEBABE
+#define FORCE_RESET_TIMEOUT 100 /* ms */
+
+/* RK_MMU_STATUS fields */
+#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define RK_MMU_STATUS_IDLE BIT(3)
+#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
+
+/* RK_MMU_COMMAND command values */
+#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
+#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
+#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
+#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
+#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
+#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
+#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
+
+/* RK_MMU_INT_* register fields */
+#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
+#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
+#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
+
+#define NUM_DT_ENTRIES 1024
+#define NUM_PT_ENTRIES 1024
+
+#define SPAGE_ORDER 12
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+ /*
+ * Support mapping any size that fits in one page table:
+ * 4 KiB to 4 MiB
+ */
+#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
+
+#define IOMMU_REG_POLL_COUNT_FAST 1000
+
+struct rk_iommu_domain {
+ struct list_head iommus;
+ u32 *dt; /* page directory table */
+ spinlock_t iommus_lock; /* lock for iommus list */
+ spinlock_t dt_lock; /* lock for modifying page directory table */
+};
+
+struct rk_iommu {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ struct list_head node; /* entry in rk_iommu_domain.iommus */
+ struct iommu_domain *domain; /* domain to which iommu is attached */
+};
+
+static inline void rk_table_flush(u32 *va, unsigned int count)
+{
+ phys_addr_t pa_start = virt_to_phys(va);
+ phys_addr_t pa_end = virt_to_phys(va + count);
+ size_t size = pa_end - pa_start;
+
+ __cpuc_flush_dcache_area(va, size);
+ outer_flush_range(pa_start, pa_end);
+}
+
+/**
+ * Inspired by _wait_for in intel_drv.h
+ * This is NOT safe for use in interrupt context.
+ *
+ * Note that it's important that we check the condition again after having
+ * timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define rk_wait_for(COND, MS) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = (COND) ? 0 : -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(50, 100); \
+ } \
+ ret__; \
+})
+
+/*
+ * The Rockchip rk3288 iommu uses a 2-level page table.
+ * The first level is the "Directory Table" (DT).
+ * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
+ * to a "Page Table".
+ * The second level is the 1024 Page Tables (PT).
+ * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
+ * a 4 KB page of physical memory.
+ *
+ * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
+ * Each iommu device has a MMU_DTE_ADDR register that contains the physical
+ * address of the start of the DT page.
+ *
+ * The structure of the page table is as follows:
+ *
+ * DT
+ * MMU_DTE_ADDR -> +-----+
+ * | |
+ * +-----+ PT
+ * | DTE | -> +-----+
+ * +-----+ | | Memory
+ * | | +-----+ Page
+ * | | | PTE | -> +-----+
+ * +-----+ +-----+ | |
+ * | | | |
+ * | | | |
+ * +-----+ | |
+ * | |
+ * | |
+ * +-----+
+ */
+
+/*
+ * Each DTE has a PT address and a valid bit:
+ * +---------------------+-----------+-+
+ * | PT address | Reserved |V|
+ * +---------------------+-----------+-+
+ * 31:12 - PT address (PTs always starts on a 4 KB boundary)
+ * 11: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
+#define RK_DTE_PT_VALID BIT(0)
+
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
+{
+ return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
+}
+
+static inline bool rk_dte_is_pt_valid(u32 dte)
+{
+ return dte & RK_DTE_PT_VALID;
+}
+
+static u32 rk_mk_dte(u32 *pt)
+{
+ phys_addr_t pt_phys = virt_to_phys(pt);
+ return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, some flags and a valid bit:
+ * +---------------------+---+-------+-+
+ * | Page address |Rsv| Flags |V|
+ * +---------------------+---+-------+-+
+ * 31:12 - Page address (Pages always start on a 4 KB boundary)
+ * 11: 9 - Reserved
+ * 8: 1 - Flags
+ * 8 - Read allocate - allocate cache space on read misses
+ * 7 - Read cache - enable cache & prefetch of data
+ * 6 - Write buffer - enable delaying writes on their way to memory
+ * 5 - Write allocate - allocate cache space on write misses
+ * 4 - Write cache - different writes can be merged together
+ * 3 - Override cache attributes
+ * if 1, bits 4-8 control cache attributes
+ * if 0, the system bus defaults are used
+ * 2 - Writable
+ * 1 - Readable
+ * 0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
+#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
+#define RK_PTE_PAGE_WRITABLE BIT(2)
+#define RK_PTE_PAGE_READABLE BIT(1)
+#define RK_PTE_PAGE_VALID BIT(0)
+
+static inline phys_addr_t rk_pte_page_address(u32 pte)
+{
+ return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
+}
+
+static inline bool rk_pte_is_page_valid(u32 pte)
+{
+ return pte & RK_PTE_PAGE_VALID;
+}
+
+/* TODO: set cache flags per prot IOMMU_CACHE */
+static u32 rk_mk_pte(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+ page &= RK_PTE_PAGE_ADDRESS_MASK;
+ return page | flags | RK_PTE_PAGE_VALID;
+}
+
+static u32 rk_mk_pte_invalid(u32 pte)
+{
+ return pte & ~RK_PTE_PAGE_VALID;
+}
+
+/*
+ * rk3288 iova (IOMMU Virtual Address) format
+ * 31 22.21 12.11 0
+ * +-----------+-----------+-------------+
+ * | DTE index | PTE index | Page offset |
+ * +-----------+-----------+-------------+
+ * 31:22 - DTE index - index of DTE in DT
+ * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
+ * 11: 0 - Page offset - offset into page @ PTE.page_address
+ */
+#define RK_IOVA_DTE_MASK 0xffc00000
+#define RK_IOVA_DTE_SHIFT 22
+#define RK_IOVA_PTE_MASK 0x003ff000
+#define RK_IOVA_PTE_SHIFT 12
+#define RK_IOVA_PAGE_MASK 0x00000fff
+#define RK_IOVA_PAGE_SHIFT 0
+
+static u32 rk_iova_dte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
+}
+
+static u32 rk_iova_pte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
+}
+
+static u32 rk_iova_page_offset(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
+}
+
+static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+{
+ return readl(iommu->base + offset);
+}
+
+static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+{
+ writel(value, iommu->base + offset);
+}
+
+static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
+{
+ writel(command, iommu->base + RK_MMU_COMMAND);
+}
+
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
+ size_t size)
+{
+ dma_addr_t iova_end = iova + size;
+ /*
+ * TODO(djkurtz): Figure out when it is more efficient to shootdown the
+ * entire iotlb rather than iterate over individual iovas.
+ */
+ for (; iova < iova_end; iova += SPAGE_SIZE)
+ rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+}
+
+static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+}
+
+static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED;
+}
+
+static int rk_iommu_enable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ /* Stall can only be enabled if paging is enabled */
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
+
+ ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
+
+ ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_enable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
+
+ ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
+
+ ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_force_reset(struct rk_iommu *iommu)
+{
+ int ret;
+ u32 dte_addr;
+
+ /*
+ * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+ * and verifying that upper 5 nybbles are read back.
+ */
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+
+ dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+ return -EFAULT;
+ }
+
+ rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
+
+ ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
+ FORCE_RESET_TIMEOUT);
+ if (ret)
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+
+ return ret;
+}
+
+static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+{
+ u32 dte_index, pte_index, page_offset;
+ u32 mmu_dte_addr;
+ phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
+ u32 *dte_addr;
+ u32 dte;
+ phys_addr_t pte_addr_phys = 0;
+ u32 *pte_addr = NULL;
+ u32 pte = 0;
+ phys_addr_t page_addr_phys = 0;
+ u32 page_flags = 0;
+
+ dte_index = rk_iova_dte_index(iova);
+ pte_index = rk_iova_pte_index(iova);
+ page_offset = rk_iova_page_offset(iova);
+
+ mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+
+ dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+ dte_addr = phys_to_virt(dte_addr_phys);
+ dte = *dte_addr;
+
+ if (!rk_dte_is_pt_valid(dte))
+ goto print_it;
+
+ pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+ pte_addr = phys_to_virt(pte_addr_phys);
+ pte = *pte_addr;
+
+ if (!rk_pte_is_page_valid(pte))
+ goto print_it;
+
+ page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
+
+print_it:
+ dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
+ &iova, dte_index, pte_index, page_offset);
+ dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
+ &mmu_dte_addr_phys, &dte_addr_phys, dte,
+ rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
+ rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
+}
+
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
+{
+ struct rk_iommu *iommu = dev_id;
+ u32 status;
+ u32 int_status;
+ dma_addr_t iova;
+
+ int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
+ if (int_status == 0)
+ return IRQ_NONE;
+
+ iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+
+ if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+ int flags;
+
+ status = rk_iommu_read(iommu, RK_MMU_STATUS);
+ flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+ IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+ dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+ &iova,
+ (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+
+ log_iova(iommu, iova);
+
+ /*
+ * Report page fault to any installed handlers.
+ * Ignore the return code, though, since we always zap cache
+ * and clear the page fault anyway.
+ */
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
+ flags);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
+ }
+
+ if (int_status & RK_MMU_IRQ_BUS_ERROR)
+ dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+
+ if (int_status & ~RK_MMU_IRQ_MASK)
+ dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+ int_status);
+
+ rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+
+ return IRQ_HANDLED;
+}
+
+static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ phys_addr_t pt_phys, phys = 0;
+ u32 dte, pte;
+ u32 *page_table;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ if (!rk_dte_is_pt_valid(dte))
+ goto out;
+
+ pt_phys = rk_dte_pt_address(dte);
+ page_table = (u32 *)phys_to_virt(pt_phys);
+ pte = page_table[rk_iova_pte_index(iova)];
+ if (!rk_pte_is_page_valid(pte))
+ goto out;
+
+ phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+out:
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return phys;
+}
+
+static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova, size_t size)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ /* shootdown these iova from all iommus using this domain */
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_for_each(pos, &rk_domain->iommus) {
+ struct rk_iommu *iommu;
+ iommu = list_entry(pos, struct rk_iommu, node);
+ rk_iommu_zap_lines(iommu, iova, size);
+ }
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+}
+
+static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova)
+{
+ u32 *page_table, *dte_addr;
+ u32 dte;
+ phys_addr_t pt_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
+ dte = *dte_addr;
+ if (rk_dte_is_pt_valid(dte))
+ goto done;
+
+ page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ if (!page_table)
+ return ERR_PTR(-ENOMEM);
+
+ dte = rk_mk_dte(page_table);
+ *dte_addr = dte;
+
+ rk_table_flush(page_table, NUM_PT_ENTRIES);
+ rk_table_flush(dte_addr, 1);
+
+ /*
+ * Zap the first iova of newly allocated page table so iommu evicts
+ * old cached value of new dte from the iotlb.
+ */
+ rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
+
+done:
+ pt_phys = rk_dte_pt_address(dte);
+ return (u32 *)phys_to_virt(pt_phys);
+}
+
+static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
+ u32 *pte_addr, dma_addr_t iova, size_t size)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+ if (!rk_pte_is_page_valid(pte))
+ break;
+
+ pte_addr[pte_count] = rk_mk_pte_invalid(pte);
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return pte_count * SPAGE_SIZE;
+}
+
+static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
+ dma_addr_t iova, phys_addr_t paddr, size_t size,
+ int prot)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+ phys_addr_t page_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+
+ if (rk_pte_is_page_valid(pte))
+ goto unwind;
+
+ pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+
+ paddr += SPAGE_SIZE;
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return 0;
+unwind:
+ /* Unmap the range of iovas that we just mapped */
+ rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
+
+ iova += pte_count * SPAGE_SIZE;
+ page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
+ &iova, &page_phys, &paddr, prot);
+
+ return -EADDRINUSE;
+}
+
+static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ u32 *page_table, *pte_addr;
+ int ret;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_map() guarantees that both iova and size will be
+ * aligned, we will always only be mapping from a single dte here.
+ */
+ page_table = rk_dte_get_page_table(rk_domain, iova);
+ if (IS_ERR(page_table)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return PTR_ERR(page_table);
+ }
+
+ pte_addr = &page_table[rk_iova_pte_index(iova)];
+ ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return ret;
+}
+
+static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
+ size_t size)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ phys_addr_t pt_phys;
+ u32 dte;
+ u32 *pte_addr;
+ size_t unmap_size;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_unmap() guarantees that both iova and size will be
+ * aligned, we will always only be unmapping from a single dte here.
+ */
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ /* Just return 0 if iova is unmapped */
+ if (!rk_dte_is_pt_valid(dte)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return 0;
+ }
+
+ pt_phys = rk_dte_pt_address(dte);
+ pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
+
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ /* Shootdown iotlb entries for iova range that was just unmapped */
+ rk_iommu_zap_iova(rk_domain, iova, unmap_size);
+
+ return unmap_size;
+}
+
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
+{
+ struct iommu_group *group;
+ struct device *iommu_dev;
+ struct rk_iommu *rk_iommu;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return NULL;
+ iommu_dev = iommu_group_get_iommudata(group);
+ rk_iommu = dev_get_drvdata(iommu_dev);
+ iommu_group_put(group);
+
+ return rk_iommu;
+}
+
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ int ret;
+ phys_addr_t dte_addr;
+
+ /*
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
+ * Such a device does not belong to an iommu group.
+ */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return 0;
+
+ ret = rk_iommu_enable_stall(iommu);
+ if (ret)
+ return ret;
+
+ ret = rk_iommu_force_reset(iommu);
+ if (ret)
+ return ret;
+
+ iommu->domain = domain;
+
+ ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (ret)
+ return ret;
+
+ dte_addr = virt_to_phys(rk_domain->dt);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+
+ ret = rk_iommu_enable_paging(iommu);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_add_tail(&iommu->node, &rk_domain->iommus);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ dev_info(dev, "Attached to iommu domain\n");
+
+ rk_iommu_disable_stall(iommu);
+
+ return 0;
+}
+
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+
+ /* Allow 'virtual devices' (eg drm) to detach from domain */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_del_init(&iommu->node);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ /* Ignore error while disabling, just keep going */
+ rk_iommu_enable_stall(iommu);
+ rk_iommu_disable_paging(iommu);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+ rk_iommu_disable_stall(iommu);
+
+ devm_free_irq(dev, iommu->irq, iommu);
+
+ iommu->domain = NULL;
+
+ dev_info(dev, "Detached from iommu domain\n");
+}
+
+static int rk_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain;
+
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
+ if (!rk_domain)
+ return -ENOMEM;
+
+ /*
+ * rk32xx iommus use a 2 level pagetable.
+ * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
+ * Allocate one 4 KiB page for each table.
+ */
+ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ if (!rk_domain->dt)
+ goto err_dt;
+
+ rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
+
+ spin_lock_init(&rk_domain->iommus_lock);
+ spin_lock_init(&rk_domain->dt_lock);
+ INIT_LIST_HEAD(&rk_domain->iommus);
+
+ domain->priv = rk_domain;
+
+ return 0;
+err_dt:
+ kfree(rk_domain);
+ return -ENOMEM;
+}
+
+static void rk_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ int i;
+
+ WARN_ON(!list_empty(&rk_domain->iommus));
+
+ for (i = 0; i < NUM_DT_ENTRIES; i++) {
+ u32 dte = rk_domain->dt[i];
+ if (rk_dte_is_pt_valid(dte)) {
+ phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ u32 *page_table = phys_to_virt(pt_phys);
+ free_page((unsigned long)page_table);
+ }
+ }
+
+ free_page((unsigned long)rk_domain->dt);
+ kfree(domain->priv);
+ domain->priv = NULL;
+}
+
+static bool rk_iommu_is_dev_iommu_master(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
+ return (ret > 0);
+}
+
+static int rk_iommu_group_set_iommudata(struct iommu_group *group,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct platform_device *pd;
+ int ret;
+ struct of_phandle_args args;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
+ &args);
+ if (ret) {
+ dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
+ np->full_name, ret);
+ return ret;
+ }
+ if (args.args_count != 0) {
+ dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
+ args.np->full_name, args.args_count);
+ return -EINVAL;
+ }
+
+ pd = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!pd) {
+ dev_err(dev, "iommu %s not found\n", args.np->full_name);
+ return -EPROBE_DEFER;
+ }
+
+ /* TODO(djkurtz): handle multiple slave iommus for a single master */
+ iommu_group_set_iommudata(group, &pd->dev, NULL);
+
+ return 0;
+}
+
+static int rk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return -ENODEV;
+
+ group = iommu_group_get(dev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ if (ret)
+ goto err_put_group;
+
+ ret = rk_iommu_group_set_iommudata(group, dev);
+ if (ret)
+ goto err_remove_device;
+
+ iommu_group_put(group);
+
+ return 0;
+
+err_remove_device:
+ iommu_group_remove_device(dev);
+err_put_group:
+ iommu_group_put(group);
+ return ret;
+}
+
+static void rk_iommu_remove_device(struct device *dev)
+{
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return;
+
+ iommu_group_remove_device(dev);
+}
+
+static const struct iommu_ops rk_iommu_ops = {
+ .domain_init = rk_iommu_domain_init,
+ .domain_destroy = rk_iommu_domain_destroy,
+ .attach_dev = rk_iommu_attach_device,
+ .detach_dev = rk_iommu_detach_device,
+ .map = rk_iommu_map,
+ .unmap = rk_iommu_unmap,
+ .add_device = rk_iommu_add_device,
+ .remove_device = rk_iommu_remove_device,
+ .iova_to_phys = rk_iommu_iova_to_phys,
+ .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+};
+
+static int rk_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_iommu *iommu;
+ struct resource *res;
+
+ iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, iommu);
+ iommu->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iommu->base))
+ return PTR_ERR(iommu->base);
+
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int rk_iommu_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rk_iommu_dt_ids[] = {
+ { .compatible = "rockchip,iommu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
+#endif
+
+static struct platform_driver rk_iommu_driver = {
+ .probe = rk_iommu_probe,
+ .remove = rk_iommu_remove,
+ .driver = {
+ .name = "rk_iommu",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rk_iommu_dt_ids),
+ },
+};
+
+static int __init rk_iommu_init(void)
+{
+ int ret;
+
+ ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&rk_iommu_driver);
+}
+static void __exit rk_iommu_exit(void)
+{
+ platform_driver_unregister(&rk_iommu_driver);
+}
+
+subsys_initcall(rk_iommu_init);
+module_exit(rk_iommu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for Rockchip");
+MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
+MODULE_ALIAS("platform:rockchip-iommu");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index 1333e6fb3405..f1b00774e4de 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = {
.detach_dev = shmobile_iommu_detach_device,
.map = shmobile_iommu_map,
.unmap = shmobile_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = shmobile_iommu_iova_to_phys,
.add_device = shmobile_iommu_add_device,
.pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 3afdf43f732a..73e845a66925 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -955,6 +955,7 @@ static const struct iommu_ops smmu_iommu_ops = {
.detach_dev = smmu_iommu_detach_dev,
.map = smmu_iommu_map,
.unmap = smmu_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
.iova_to_phys = smmu_iommu_iova_to_phys,
.pgsize_bitmap = SMMU_IOMMU_PGSIZES,
};
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 825ca1f87639..afe79719ea32 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1434,9 +1434,9 @@ static void drop_buffers(struct dm_bufio_client *c)
/*
* Test if the buffer is unused and too old, and commit it.
- * At if noio is set, we must not do any I/O because we hold
- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
- * different bufio client.
+ * And if GFP_NOFS is used, we must not do any I/O because we hold
+ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
+ * rerouted to different bufio client.
*/
static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
unsigned long max_jiffies)
@@ -1444,7 +1444,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
if (jiffies - b->last_accessed < max_jiffies)
return 0;
- if (!(gfp & __GFP_IO)) {
+ if (!(gfp & __GFP_FS)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
@@ -1486,7 +1486,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
unsigned long freed;
c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (sc->gfp_mask & __GFP_IO)
+ if (sc->gfp_mask & __GFP_FS)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
return SHRINK_STOP;
@@ -1503,7 +1503,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
unsigned long count;
c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (sc->gfp_mask & __GFP_IO)
+ if (sc->gfp_mask & __GFP_FS)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
return 0;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 4857fa4a5484..07c0fa0fa284 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -789,8 +789,7 @@ struct dm_raid_superblock {
__le32 layout;
__le32 stripe_sectors;
- __u8 pad[452]; /* Round struct to 512 bytes. */
- /* Always set to 0 when writing. */
+ /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
} __packed;
static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -827,7 +826,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
test_bit(Faulty, &(rs->dev[i].rdev.flags)))
failed_devices |= (1ULL << i);
- memset(sb, 0, sizeof(*sb));
+ memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
sb->magic = cpu_to_le32(DM_RAID_MAGIC);
sb->features = cpu_to_le32(0); /* No features yet */
@@ -862,7 +861,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
uint64_t events_sb, events_refsb;
rdev->sb_start = 0;
- rdev->sb_size = sizeof(*sb);
+ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+ if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
+ DMERR("superblock size of a logical block is no longer valid");
+ return -EINVAL;
+ }
ret = read_disk_sb(rdev, rdev->sb_size);
if (ret)
@@ -1169,8 +1172,12 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
for (i = 0; i < rs->md.raid_disks; i++) {
- struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev);
+ struct request_queue *q;
+
+ if (!rs->dev[i].rdev.bdev)
+ continue;
+ q = bdev_get_queue(rs->dev[i].rdev.bdev);
if (!q || !blk_queue_discard(q))
return;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d1600d2aa2e2..f8b37d4c05d8 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -159,8 +159,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
sc->stripes_shift = __ffs(stripes);
r = dm_set_target_max_io_len(ti, chunk_size);
- if (r)
+ if (r) {
+ kfree(sc);
return r;
+ }
ti->num_flush_bios = stripes;
ti->num_discard_bios = stripes;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4843801173fe..0f86d802b533 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
+ /*
+ * We must hold the virtual cell before doing the lookup, otherwise
+ * there's a race with discard.
+ */
+ build_virtual_key(tc->td, block, &key);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
+ return DM_MAPIO_SUBMITTED;
+
r = dm_thin_find_block(td, block, 0, &result);
/*
@@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
* shared flag will be set in their case.
*/
thin_defer_bio(tc, bio);
+ cell_defer_no_holder_no_free(tc, &cell1);
return DM_MAPIO_SUBMITTED;
}
- build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
- return DM_MAPIO_SUBMITTED;
-
build_data_key(tc->td, result.block, &key);
if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
cell_defer_no_holder_no_free(tc, &cell1);
@@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
* of doing so.
*/
handle_unserviceable_bio(tc->pool, bio);
+ cell_defer_no_holder_no_free(tc, &cell1);
return DM_MAPIO_SUBMITTED;
}
/* fall through */
@@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
* provide the hint to load the metadata into cache.
*/
thin_defer_bio(tc, bio);
+ cell_defer_no_holder_no_free(tc, &cell1);
return DM_MAPIO_SUBMITTED;
default:
@@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
* pool is switched to fail-io mode.
*/
bio_io_error(bio);
+ cell_defer_no_holder_no_free(tc, &cell1);
return DM_MAPIO_SUBMITTED;
}
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4dfa15da9cb8..9233c71138f1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5121,6 +5121,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
printk("md: %s still in use.\n",mdname(mddev));
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
err = -EBUSY;
@@ -5135,6 +5136,8 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
sysfs_notify_dirent_safe(mddev->sysfs_state);
err = 0;
}
@@ -5178,6 +5181,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
mutex_unlock(&mddev->open_mutex);
if (did_freeze) {
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
}
return -EBUSY;
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 37d367bb9aa8..bf2b80d5c470 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -42,6 +42,12 @@ struct btree_node {
} __packed;
+/*
+ * Locks a block using the btree node validator.
+ */
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+ struct dm_block **result);
+
void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
struct dm_btree_value_type *vt);
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index cf9fd676ae44..1b5e13ec7f96 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
/*----------------------------------------------------------------*/
-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
struct dm_block **result)
{
return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 416060c25709..200ac12a1d40 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
* FIXME: We shouldn't use a recursive algorithm when we have limited stack
* space. Also this only works for single level trees.
*/
-static int walk_node(struct ro_spine *s, dm_block_t block,
+static int walk_node(struct dm_btree_info *info, dm_block_t block,
int (*fn)(void *context, uint64_t *keys, void *leaf),
void *context)
{
int r;
unsigned i, nr;
+ struct dm_block *node;
struct btree_node *n;
uint64_t keys;
- r = ro_step(s, block);
- n = ro_node(s);
+ r = bn_read_lock(info, block, &node);
+ if (r)
+ return r;
+
+ n = dm_block_data(node);
nr = le32_to_cpu(n->header.nr_entries);
for (i = 0; i < nr; i++) {
if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
- r = walk_node(s, value64(n, i), fn, context);
+ r = walk_node(info, value64(n, i), fn, context);
if (r)
goto out;
} else {
@@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
}
out:
- ro_pop(s);
+ dm_tm_unlock(info->tm, node);
return r;
}
@@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
int (*fn)(void *context, uint64_t *keys, void *leaf),
void *context)
{
- int r;
- struct ro_spine spine;
-
BUG_ON(info->levels > 1);
-
- init_ro_spine(&spine, info);
- r = walk_node(&spine, root, fn, context);
- exit_ro_spine(&spine);
-
- return r;
+ return walk_node(info, root, fn, context);
}
EXPORT_SYMBOL_GPL(dm_btree_walk);
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cf008f45968c..711773e8e64b 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -240,7 +240,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
goto err_irq_charger;
}
- ret = regmap_add_irq_chip(max77693->regmap, max77693->irq,
+ ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq,
IRQF_ONESHOT | IRQF_SHARED |
IRQF_TRIGGER_FALLING, 0,
&max77693_muic_irq_chip,
@@ -250,6 +250,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
goto err_irq_muic;
}
+ /* Unmask interrupts from all blocks in interrupt source register */
+ ret = regmap_update_bits(max77693->regmap,
+ MAX77693_PMIC_REG_INTSRC_MASK,
+ SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL);
+ if (ret < 0) {
+ dev_err(max77693->dev,
+ "Could not unmask interrupts in INTSRC: %d\n",
+ ret);
+ goto err_intsrc;
+ }
+
pm_runtime_set_active(max77693->dev);
ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
@@ -261,6 +272,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
err_mfd:
mfd_remove_devices(max77693->dev);
+err_intsrc:
regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
err_irq_muic:
regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index f2643c221d34..30f7ca89a0e6 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -947,6 +947,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
mutex_unlock(&pcr->pcr_mutex);
}
+#ifdef CONFIG_PM
static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
{
if (pcr->ops->turn_off_led)
@@ -961,6 +962,7 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
if (pcr->ops->force_power_down)
pcr->ops->force_power_down(pcr, pm_state);
}
+#endif
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 2d045f26f193..bee0abf82040 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE24XX_REG_CHIP_ID 0x80
#define STMPE24XX_REG_IEGPIOR_LSB 0x18
#define STMPE24XX_REG_ISGPIOR_MSB 0x19
-#define STMPE24XX_REG_GPMR_LSB 0xA5
+#define STMPE24XX_REG_GPMR_LSB 0xA4
#define STMPE24XX_REG_GPSR_LSB 0x85
#define STMPE24XX_REG_GPCR_LSB 0x88
#define STMPE24XX_REG_GPDR_LSB 0x8B
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index cf92a6d1c532..50f9091bcd38 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b;
#define PWR_DEVSLP BIT(1)
#define PWR_DEVOFF BIT(0)
+/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */
+#define STARTON_SWBUG BIT(7) /* Start on watchdog */
+#define STARTON_VBUS BIT(5) /* Start on VBUS */
+#define STARTON_VBAT BIT(4) /* Start on battery insert */
+#define STARTON_RTC BIT(3) /* Start on RTC */
+#define STARTON_USB BIT(2) /* Start on USB host */
+#define STARTON_CHG BIT(1) /* Start on charger */
+#define STARTON_PWON BIT(0) /* Start on PWRON button */
+
#define SEQ_OFFSYNC (1 << 0)
#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
@@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata)
return 0;
}
+static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues)
+{
+ u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION,
+ TWL4030_PM_MASTER_CFG_P2_TRANSITION,
+ TWL4030_PM_MASTER_CFG_P3_TRANSITION, };
+ u8 val;
+ int i, err;
+
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ if (err)
+ goto relock;
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+ TWL4030_PM_MASTER_KEY_CFG2,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+ if (err)
+ goto relock;
+
+ for (i = 0; i < sizeof(regs); i++) {
+ err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER,
+ &val, regs[i]);
+ if (err)
+ break;
+ val = (~bitmask & val) | (bitmask & bitvalues);
+ err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
+ val, regs[i]);
+ if (err)
+ break;
+ }
+
+ if (err)
+ pr_err("TWL4030 Register access failed: %i\n", err);
+
+relock:
+ return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
+ TWL4030_PM_MASTER_PROTECT_KEY);
+}
+
/*
* In master mode, start the power off sequence.
* After a successful execution, TWL shuts down the power to the SoC
@@ -615,6 +662,11 @@ void twl4030_power_off(void)
{
int err;
+ /* Disable start on charger or VBUS as it can break poweroff */
+ err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0);
+ if (err)
+ pr_err("TWL4030 Unable to configure start-up\n");
+
err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
TWL4030_PM_MASTER_P1_SW_EVENTS);
if (err)
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
index e00f5340ed87..3c2b8f9e3c84 100644
--- a/drivers/mfd/viperboard.c
+++ b/drivers/mfd/viperboard.c
@@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface,
version >> 8, version & 0xff,
vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
- ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs,
- ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL);
+ ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
+ vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
+ NULL);
if (ret != 0) {
dev_err(&interface->dev, "Failed to add mfd devices to core.");
goto error;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 63ea1941e973..7ba83ffb08ac 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
+{
+ if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
+ return false;
+
+ if (ioread32(p->ring_csr_addr + SRST_ADDR))
+ return false;
+
+ return true;
+}
+
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
u32 val;
+ if (!xgene_ring_mgr_init(pdata))
+ return -ENODEV;
+
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
val |= SCAN_AUTO_INCR;
MGMT_CLOCK_SEL_SET(&val, 1);
xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+
+ return 0;
}
static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 38558584080e..ec45f3256f0e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
#define BLOCK_ETH_MAC_OFFSET 0x0000
#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
+#define CLKEN_ADDR 0xc208
+#define SRST_ADDR 0xc200
+
#define MAC_ADDR_REG_OFFSET 0x00
#define MAC_COMMAND_REG_OFFSET 0x04
#define MAC_WRITE_REG_OFFSET 0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
+bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
extern struct xgene_mac_ops xgene_gmac_ops;
extern struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3c208cc6f6bb..123669696184 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct device *dev = ndev_to_dev(ndev);
struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
struct xgene_enet_desc_ring *buf_pool = NULL;
- u8 cpu_bufnum = 0, eth_bufnum = 0;
- u8 bp_bufnum = 0x20;
- u16 ring_id, ring_num = 0;
+ u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
+ u8 bp_bufnum = START_BP_BUFNUM;
+ u16 ring_id, ring_num = START_RING_NUM;
int ret;
/* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
u16 dst_ring_num;
int ret;
- pdata->port_ops->reset(pdata);
+ ret = pdata->port_ops->reset(pdata);
+ if (ret)
+ return ret;
ret = xgene_enet_create_desc_rings(ndev);
if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
return ret;
err:
+ unregister_netdev(ndev);
free_netdev(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 874e5a01161f..f9958fae6ffd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,9 @@
#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
#define NUM_PKT_BUF 64
#define NUM_BUFPOOL 32
+#define START_ETH_BUFNUM 2
+#define START_BP_BUFNUM 0x22
+#define START_RING_NUM 8
#define PHY_POLL_LINK_ON (10 * HZ)
#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
};
struct xgene_port_ops {
- void (*reset)(struct xgene_enet_pdata *pdata);
+ int (*reset)(struct xgene_enet_pdata *pdata);
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id);
void (*shutdown)(struct xgene_enet_pdata *pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index c22f32622fa9..f5d4f68c288c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
xgene_sgmac_rxtx(p, TX_EN, false);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *p)
+static int xgene_enet_reset(struct xgene_enet_pdata *p)
{
+ if (!xgene_ring_mgr_init(p))
+ return -ENODEV;
+
clk_prepare_enable(p->clk);
clk_disable_unprepare(p->clk);
clk_prepare_enable(p->clk);
xgene_enet_ecc_init(p);
xgene_enet_config_ring_if_assoc(p);
+
+ return 0;
}
static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 67d07206b3c7..a18a9d1f1143 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
}
-static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
+static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
+ if (!xgene_ring_mgr_init(pdata))
+ return -ENODEV;
+
clk_prepare_enable(pdata->clk);
clk_disable_unprepare(pdata->clk);
clk_prepare_enable(pdata->clk);
xgene_enet_ecc_init(pdata);
xgene_enet_config_ring_if_assoc(pdata);
+
+ return 0;
}
static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3a6778a667f4..531bb7c57531 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* We just need one DMA descriptor which is DMA-able, since writing to
* the port will allocate a new descriptor in its internal linked-list
*/
- p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
+ GFP_KERNEL);
if (!p) {
netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
if (!(reg & TDMA_DISABLED))
netdev_warn(priv->netdev, "TDMA not stopped!\n");
+ /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
+ * fail, so by checking this pointer we know whether the TX ring was
+ * fully initialized or not.
+ */
+ if (!ring->cbs)
+ return;
+
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = NULL;
if (ring->desc_dma) {
- dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ dma_free_coherent(kdev, sizeof(struct dma_desc),
+ ring->desc_cpu, ring->desc_dma);
ring->desc_dma = 0;
}
ring->size = 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fdc9ec09e453..da1a2500c91c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq0;
}
+ /* Re-configure the port multiplexer towards the PHY device */
+ bcmgenet_mii_config(priv->dev, false);
+
+ phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
+ priv->phy_interface);
+
bcmgenet_netif_start(dev);
return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
bcmgenet_netif_stop(dev);
+ /* Really kill the PHY state machine and disconnect from it */
+ phy_disconnect(priv->phydev);
+
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
phy_init_hw(priv->phydev);
/* Speed settings must be restored */
- bcmgenet_mii_config(priv->dev);
+ bcmgenet_mii_config(priv->dev, false);
/* disable ethernet MAC while updating its registers */
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbf524ea3b19..31b2da5f9b82 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
/* MDIO routines */
int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev, bool init);
void bcmgenet_mii_exit(struct net_device *dev);
void bcmgenet_mii_reset(struct net_device *dev);
+void bcmgenet_mii_setup(struct net_device *dev);
/* Wake-on-LAN routines */
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ff799a9f801..933cd7e7cd33 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
/* setup netdev link state when PHY link status change and
* update UMAC and RGMII block when link up
*/
-static void bcmgenet_mii_setup(struct net_device *dev)
+void bcmgenet_mii_setup(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
}
-int bcmgenet_mii_config(struct net_device *dev)
+int bcmgenet_mii_config(struct net_device *dev, bool init)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
return -EINVAL;
}
- dev_info(kdev, "configuring instance for %s\n", phy_name);
+ if (init)
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
return 0;
}
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
* PHY speed which is needed for bcmgenet_mii_config() to configure
* things appropriately.
*/
- ret = bcmgenet_mii_config(dev);
+ ret = bcmgenet_mii_config(dev, true);
if (ret) {
phy_disconnect(priv->phydev);
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6fe300e316c3..cca604994003 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
app.protocol = dcb->app_priority[i].protocolid;
if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
+ app.priority = dcb->app_priority[i].user_prio_map;
app.selector = dcb->app_priority[i].sel_field + 1;
- err = dcb_ieee_setapp(dev, &app);
+ err = dcb_ieee_delapp(dev, &app);
} else {
app.selector = !!(dcb->app_priority[i].sel_field);
err = dcb_setapp(dev, &app);
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
case CXGB4_DCB_INPUT_FW_ENABLED: {
/* we're going to use Firmware DCB */
dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
- dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+ dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
+ dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
+ else
+ dcb->supported |= DCB_CAP_DCBX_VER_CEE;
break;
}
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
*up_tc_map = (1 << tc);
/* prio_type is link strict */
- *prio_type = 0x2;
+ if (*pgid != 0xF)
+ *prio_type = 0x2;
}
static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
u8 *prio_type, u8 *pgid, u8 *bw_per,
u8 *up_tc_map)
{
- return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
+ /* tc 0 is written at MSB position */
+ return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+ up_tc_map, 1);
}
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
u8 *prio_type, u8 *pgid, u8 *bw_per,
u8 *up_tc_map)
{
- return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
+ /* tc 0 is written at MSB position */
+ return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
+ up_tc_map, 0);
}
static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
struct fw_port_cmd pcmd;
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = pi->adapter;
+ int fw_tc = 7 - tc;
u32 _pgid;
int err;
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
}
_pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
- _pgid &= ~(0xF << (tc * 4));
- _pgid |= pgid << (tc * 4);
+ _pgid &= ~(0xF << (fw_tc * 4));
+ _pgid |= pgid << (fw_tc * 4);
pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
priority >= CXGB4_MAX_PRIORITY)
*pfccfg = 0;
else
- *pfccfg = (pi->dcb.pfcen >> priority) & 1;
+ *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
}
/* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
if (pfccfg)
- pcmd.u.dcb.pfc.pfcen |= (1 << priority);
+ pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
else
- pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
+ pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
if (err != FW_PORT_DCB_CFG_SUCCESS) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 5e1b314e11af..39f2b13e66c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_conm_ctrl;
+ u32 sge_control, sge_control2, sge_conm_ctrl;
+ unsigned int ingpadboundary, ingpackboundary;
int ret, egress_threshold;
/*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
sge_control = t4_read_reg(adap, SGE_CONTROL);
s->pktshift = PKTSHIFT_GET(sge_control);
s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
- s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+ if (is_t4(adap->params.chip)) {
+ s->fl_align = ingpadboundary;
+ } else {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
+ ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ s->fl_align = max(ingpadboundary, ingpackboundary);
+ }
if (adap->flags & USING_SOFT_PARAMS)
ret = t4_sge_init_soft(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a9d9d74e4f09..163a2a14948c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
HOSTPAGESIZEPF6(sge_hps) |
HOSTPAGESIZEPF7(sge_hps));
- t4_set_reg_field(adap, SGE_CONTROL,
- INGPADBOUNDARY_MASK |
- EGRSTATUSPAGESIZE_MASK,
- INGPADBOUNDARY(fl_align_log - 5) |
- EGRSTATUSPAGESIZE(stat_len != 64));
-
+ if (is_t4(adap->params.chip)) {
+ t4_set_reg_field(adap, SGE_CONTROL,
+ INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE_MASK,
+ INGPADBOUNDARY(fl_align_log - 5) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+ } else {
+ /* T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good. For T5 the smallest
+ * Padding Boundary which we can select is 32 bytes which is
+ * larger than any known Memory Controller Line Size so we'll
+ * use that.
+ *
+ * T5 has a different interpretation of the "0" value for the
+ * Packing Boundary. This corresponds to 16 bytes instead of
+ * the expected 32 bytes. We never have a Packing Boundary
+ * less than 32 bytes so we can't use that special value but
+ * on the other hand, if we wanted 32 bytes, the best we can
+ * really do is 64 bytes.
+ */
+ if (fl_align <= 32) {
+ fl_align = 64;
+ fl_align_log = 6;
+ }
+ t4_set_reg_field(adap, SGE_CONTROL,
+ INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE_MASK,
+ INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, SGE_CONTROL2_A,
+ INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
+ INGPACKBOUNDARY_V(fl_align_log -
+ INGPACKBOUNDARY_SHIFT_X));
+ }
/*
* Adjust various SGE Free List Host Buffer Sizes.
*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1024db5dc13..8d2de1006b08 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -95,6 +95,7 @@
#define X_INGPADBOUNDARY_SHIFT 5
#define SGE_CONTROL 0x1008
+#define SGE_CONTROL2_A 0x1124
#define DCASYSTYPE 0x00080000U
#define RXPKTCPLMODE_MASK 0x00040000U
#define RXPKTCPLMODE_SHIFT 18
@@ -106,6 +107,7 @@
#define PKTSHIFT_SHIFT 10
#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
+#define INGPCIEBOUNDARY_32B_X 0
#define INGPCIEBOUNDARY_MASK 0x00000380U
#define INGPCIEBOUNDARY_SHIFT 7
#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
@@ -114,6 +116,14 @@
#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
>> INGPADBOUNDARY_SHIFT)
+#define INGPACKBOUNDARY_16B_X 0
+#define INGPACKBOUNDARY_SHIFT_X 5
+
+#define INGPACKBOUNDARY_S 16
+#define INGPACKBOUNDARY_M 0x7U
+#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
+#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
+ & INGPACKBOUNDARY_M)
#define EGRPCIEBOUNDARY_MASK 0x0000000eU
#define EGRPCIEBOUNDARY_SHIFT 1
#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..3d06e77d7121 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
+ /* Decoded Adapter Parameters.
+ */
+ u32 fl_pg_order; /* large page allocation size */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_starve_thres; /* Free List starvation threshold */
+
/*
* Reverse maps from Absolute Queue IDs to associated queue pointers.
* The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6b42c4..fdd078d7d82c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
#include "../cxgb4/t4_msg.h"
/*
- * Decoded Adapter Parameters.
- */
-static u32 FL_PG_ORDER; /* large page allocation size */
-static u32 STAT_LEN; /* length of status page at ring end */
-static u32 PKTSHIFT; /* padding between CPL and packet data */
-static u32 FL_ALIGN; /* response queue message alignment */
-
-/*
* Constants ...
*/
enum {
@@ -102,12 +94,6 @@ enum {
MAX_TIMER_TX_RECLAIM = 100,
/*
- * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
- * timer will attempt to refill it.
- */
- FL_STARVE_THRES = 4,
-
- /*
* Suspend an Ethernet TX queue with fewer available descriptors than
* this. We always want to have room for a maximum sized packet:
* inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
/**
* fl_starving - return whether a Free List is starving.
+ * @adapter: pointer to the adapter
* @fl: the Free List
*
* Tests specified Free List to see whether the number of buffers
* available to the hardware has falled below our "starvation"
* threshold.
*/
-static inline bool fl_starving(const struct sge_fl *fl)
+static inline bool fl_starving(const struct adapter *adapter,
+ const struct sge_fl *fl)
{
- return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+ const struct sge *s = &adapter->sge;
+
+ return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
/**
* get_buf_size - return the size of an RX Free List buffer.
+ * @adapter: pointer to the associated adapter
* @sdesc: pointer to the software buffer descriptor
*/
-static inline int get_buf_size(const struct rx_sw_desc *sdesc)
+static inline int get_buf_size(const struct adapter *adapter,
+ const struct rx_sw_desc *sdesc)
{
- return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
- ? (PAGE_SIZE << FL_PG_ORDER)
- : PAGE_SIZE;
+ const struct sge *s = &adapter->sge;
+
+ return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
+ ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
}
/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ get_buf_size(adapter, sdesc),
+ PCI_DMA_FROMDEVICE);
put_page(sdesc->page);
sdesc->page = NULL;
if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
- get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
+ get_buf_size(adapter, sdesc),
+ PCI_DMA_FROMDEVICE);
sdesc->page = NULL;
if (++fl->cidx == fl->size)
fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
int n, gfp_t gfp)
{
+ struct sge *s = &adapter->sge;
struct page *page;
dma_addr_t dma_addr;
unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
* If we don't support large pages, drop directly into the small page
* allocation code.
*/
- if (FL_PG_ORDER == 0)
+ if (s->fl_pg_order == 0)
goto alloc_small_pages;
while (n) {
page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- FL_PG_ORDER);
+ s->fl_pg_order);
if (unlikely(!page)) {
/*
* We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
fl->large_alloc_failed++;
break;
}
- poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
+ poison_buf(page, PAGE_SIZE << s->fl_pg_order);
dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
- PAGE_SIZE << FL_PG_ORDER,
+ PAGE_SIZE << s->fl_pg_order,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
/*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
* because DMA mapping resources are typically
* critical resources once they become scarse.
*/
- __free_pages(page, FL_PG_ORDER);
+ __free_pages(page, s->fl_pg_order);
goto out;
}
dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
fl->pend_cred += cred;
ring_fl_db(adapter, fl);
- if (unlikely(fl_starving(fl))) {
+ if (unlikely(fl_starving(adapter, fl))) {
smp_wmb();
set_bit(fl->cntxt_id, adapter->sge.starving_fl);
}
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
const struct cpl_rx_pkt *pkt)
{
+ struct adapter *adapter = rxq->rspq.adapter;
+ struct sge *s = &adapter->sge;
int ret;
struct sk_buff *skb;
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
return;
}
- copy_frags(skb, gl, PKTSHIFT);
- skb->len = gl->tot_len - PKTSHIFT;
+ copy_frags(skb, gl, s->pktshift);
+ skb->len = gl->tot_len - s->pktshift;
skb->data_len = skb->len;
skb->truesize += skb->data_len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
(rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = rspq->adapter;
+ struct sge *s = &adapter->sge;
/*
* If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
rxq->stats.rx_drops++;
return 0;
}
- __skb_pull(skb, PKTSHIFT);
+ __skb_pull(skb, s->pktshift);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
static int process_responses(struct sge_rspq *rspq, int budget)
{
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
+ struct adapter *adapter = rspq->adapter;
+ struct sge *s = &adapter->sge;
int budget_left = budget;
while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
BUG_ON(frag >= MAX_SKB_FRAGS);
BUG_ON(rxq->fl.avail == 0);
sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
- bufsz = get_buf_size(sdesc);
+ bufsz = get_buf_size(adapter, sdesc);
fp->page = sdesc->page;
fp->offset = rspq->offset;
fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
*/
ret = rspq->handler(rspq, rspq->cur_desc, &gl);
if (likely(ret == 0))
- rspq->offset += ALIGN(fp->size, FL_ALIGN);
+ rspq->offset += ALIGN(fp->size, s->fl_align);
else
restore_rx_bufs(&gl, &rxq->fl, frag);
} else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
* schedule napi but the FL is no longer starving.
* No biggie.
*/
- if (fl_starving(fl)) {
+ if (fl_starving(adapter, fl)) {
struct sge_eth_rxq *rxq;
rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
int intr_dest,
struct sge_fl *fl, rspq_handler_t hnd)
{
+ struct sge *s = &adapter->sge;
struct port_info *pi = netdev_priv(dev);
struct fw_iq_cmd cmd, rpl;
int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
sizeof(__be64), sizeof(struct rx_sw_desc),
- &fl->addr, &fl->sdesc, STAT_LEN);
+ &fl->addr, &fl->sdesc, s->stat_len);
if (!fl->desc) {
ret = -ENOMEM;
goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
* free list ring) in Egress Queue Units.
*/
flsz = (fl->size / FL_PER_EQ_UNIT +
- STAT_LEN / EQ_UNIT);
+ s->stat_len / EQ_UNIT);
/*
* Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
struct net_device *dev, struct netdev_queue *devq,
unsigned int iqid)
{
+ struct sge *s = &adapter->sge;
int ret, nentries;
struct fw_eq_eth_cmd cmd, rpl;
struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
* Calculate the size of the hardware TX Queue (including the Status
* Page on the end of the TX Queue) in units of TX Descriptors.
*/
- nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
/*
* Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
sizeof(struct tx_desc),
sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+ &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
if (!txq->q.desc)
return -ENOMEM;
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
*/
static void free_txq(struct adapter *adapter, struct sge_txq *tq)
{
+ struct sge *s = &adapter->sge;
+
dma_free_coherent(adapter->pdev_dev,
- tq->size * sizeof(*tq->desc) + STAT_LEN,
+ tq->size * sizeof(*tq->desc) + s->stat_len,
tq->desc, tq->phys_addr);
tq->cntxt_id = 0;
tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
struct sge_fl *fl)
{
+ struct sge *s = &adapter->sge;
unsigned int flid = fl ? fl->cntxt_id : 0xffff;
t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
if (fl) {
free_rx_bufs(adapter, fl, fl->avail);
dma_free_coherent(adapter->pdev_dev,
- fl->size * sizeof(*fl->desc) + STAT_LEN,
+ fl->size * sizeof(*fl->desc) + s->stat_len,
fl->desc, fl->addr);
kfree(fl->sdesc);
fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
u32 fl0 = sge_params->sge_fl_buffer_size[0];
u32 fl1 = sge_params->sge_fl_buffer_size[1];
struct sge *s = &adapter->sge;
+ unsigned int ingpadboundary, ingpackboundary;
/*
* Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
* Now translate the adapter parameters into our internal forms.
*/
if (fl1)
- FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
- STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
- ? 128 : 64);
- PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
- FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
- SGE_INGPADBOUNDARY_SHIFT);
+ s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
+ s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
+ ? 128 : 64);
+ s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications. (Note that it makes no real practical sense to
+ * have the Pading Boudary be larger than the Packing Boundary but you
+ * could set the chip up that way and, in fact, legacy T4 code would
+ * end doing this because it would initialize the Padding Boundary and
+ * leave the Packing Boundary initialized to 0 (16 bytes).)
+ */
+ ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
+ X_INGPADBOUNDARY_SHIFT);
+ if (is_t4(adapter->params.chip)) {
+ s->fl_align = ingpadboundary;
+ } else {
+ /* T5 has a different interpretation of one of the PCIe Packing
+ * Boundary values.
+ */
+ ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
+ if (ingpackboundary == INGPACKBOUNDARY_16B_X)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ INGPACKBOUNDARY_SHIFT_X);
+
+ s->fl_align = max(ingpadboundary, ingpackboundary);
+ }
+
+ /* A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ s->fl_starve_thres
+ = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
/*
* Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 95df61dcb4ce..4b6a6d14d86d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -134,11 +134,13 @@ struct dev_params {
*/
struct sge_params {
u32 sge_control; /* padding, boundaries, lengths, etc. */
+ u32 sge_control2; /* T5: more of the same */
u32 sge_host_page_size; /* RDMA page sizes */
u32 sge_queues_per_page; /* RDMA queues/page */
u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
+ u32 sge_congestion_control; /* congestion thresholds, etc. */
u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
u32 sge_timer_value_2_and_3;
u32 sge_timer_value_4_and_5;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index e984fdc48ba2..1e896b923234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
sge_params->sge_timer_value_2_and_3 = vals[5];
sge_params->sge_timer_value_4_and_5 = vals[6];
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
+ * SGE_CONTROL in order to determine how ingress packet data will be
+ * laid out in Packed Buffer Mode. Unfortunately, older versions of
+ * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
+ * failure grabbing it we throw an error since we can't figure out the
+ * right value.
+ */
+ if (!is_t4(adapter->params.chip)) {
+ params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter->pdev_dev,
+ "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_params->sge_control2 = vals[0];
+ }
+
params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
- v = t4vf_query_params(adapter, 1, params, vals);
+ params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
+ v = t4vf_query_params(adapter, 2, params, vals);
if (v)
return v;
sge_params->sge_ingress_rx_threshold = vals[0];
+ sge_params->sge_congestion_control = vals[1];
return 0;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 180e53fa628f..73cf1653a4a3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
struct vnic_rq_buf *buf = rq->to_use;
if (buf->os_buf) {
- buf = buf->next;
- rq->to_use = buf;
- rq->ring.desc_avail--;
- if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
- /* Adding write memory barrier prevents compiler and/or
- * CPU reordering, thus avoiding descriptor posting
- * before descriptor is initialized. Otherwise, hardware
- * can read stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &rq->ctrl->posted_index);
- }
+ enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
+ buf->len);
return 0;
}
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
enic->rq_truncated_pkts++;
}
+ pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
+ buf->os_buf = NULL;
return;
}
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
+ pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
+ buf->os_buf = NULL;
}
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 50a851db2852..3dca494797bd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
return bufaddr;
}
+static void swap_buffer2(void *dst_buf, void *src_buf, int len)
+{
+ int i;
+ unsigned int *src = src_buf;
+ unsigned int *dst = dst_buf;
+
+ for (i = 0; i < len; i += 4, src++, dst++)
+ *dst = swab32p(src);
+}
+
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
}
static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length)
+ struct bufdesc *bdp, u32 length, bool swap)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE - fep->rx_align,
DMA_FROM_DEVICE);
- memcpy(new_skb->data, (*skb)->data, length);
+ if (!swap)
+ memcpy(new_skb->data, (*skb)->data, length);
+ else
+ swap_buffer2(new_skb->data, (*skb)->data, length);
*skb = new_skb;
return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
u16 vlan_tag;
int index = 0;
bool is_copybreak;
+ bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
+ is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
+ need_swap);
if (!is_copybreak) {
skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, pkt_len - 4);
data = skb->data;
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ if (!is_copybreak && need_swap)
swap_buffer(data, pkt_len);
/* Extract the enhanced buffer descriptor */
@@ -3343,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
}
rtnl_unlock();
- fec_enet_clk_enable(ndev, false);
- pinctrl_pm_select_sleep_state(&fep->pdev->dev);
-
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
return ret;
}
- pinctrl_pm_select_default_state(&fep->pdev->dev);
- ret = fec_enet_clk_enable(ndev, true);
- if (ret)
- goto failed_clk;
-
rtnl_lock();
if (netif_running(ndev)) {
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret) {
+ rtnl_unlock();
+ goto failed_clk;
+ }
fec_restart(ndev);
netif_tx_lock_bh(ndev);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index d47b19f27c35..28b81ae09b5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
**/
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
{
- s32 status;
u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
bool autoneg = false;
ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
hw->phy.ops.write_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, autoneg_reg);
-
- return status;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b151a949f352..d44560d1d268 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
- struct sk_buff *skb;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;
- skb = NULL;
- if (cmd_sts & TX_LAST_DESC)
- skb = __skb_dequeue(&txq->tx_skb);
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr))
+ dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
+ desc->byte_cnt, DMA_TO_DEVICE);
+
+ if (cmd_sts & TX_ENABLE_INTERRUPT) {
+ struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
+
+ if (!WARN_ON(!skb))
+ dev_kfree_skb(skb);
+ }
if (cmd_sts & ERROR_SUMMARY) {
netdev_info(mp->dev, "tx error\n");
mp->dev->stats.tx_errors++;
}
- if (!IS_TSO_HEADER(txq, desc->buf_ptr))
- dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
- dev_kfree_skb(skb);
}
__netif_tx_unlock_bh(nq);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ece83f101526..fdf3e382e464 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
{
struct mvpp2_prs_entry *pe;
int tid_aux, tid;
+ int ret = 0;
pe = mvpp2_prs_vlan_find(priv, tpid, ai);
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
break;
}
- if (tid <= tid_aux)
- return -EINVAL;
+ if (tid <= tid_aux) {
+ ret = -EINVAL;
+ goto error;
+ }
memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_hw_write(priv, pe);
+error:
kfree(pe);
- return 0;
+ return ret;
}
/* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
unsigned int port_map)
{
struct mvpp2_prs_entry *pe;
- int tid_aux, tid, ai;
+ int tid_aux, tid, ai, ret = 0;
pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
/* Set ai value for new double vlan entry */
ai = mvpp2_prs_double_vlan_ai_free_get(priv);
- if (ai < 0)
- return ai;
+ if (ai < 0) {
+ ret = ai;
+ goto error;
+ }
/* Get first single/triple vlan tid */
for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
break;
}
- if (tid >= tid_aux)
- return -ERANGE;
+ if (tid >= tid_aux) {
+ ret = -ERANGE;
+ goto error;
+ }
memset(pe, 0, sizeof(struct mvpp2_prs_entry));
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_tcam_port_map_set(pe, port_map);
mvpp2_prs_hw_write(priv, pe);
+error:
kfree(pe);
- return 0;
+ return ret;
}
/* IPv4 header parsing for fragmentation and L4 offset */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f3032fec8fce..02266e3de514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 1);
out:
- if (ret)
+ if (ret) {
en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+ return;
+ }
+
+ /* set offloads */
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
int ret;
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
+ /* unset offloads */
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+ priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2568,13 +2581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
- if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL;
- }
-
mdev->pndev[port] = dev;
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a278238a2db6..ad2c96a02a53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
+ eq->irqn = vecidx;
+ eq->dev = dev;
+ eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
eq->name, eq);
if (err)
goto err_eq;
- eq->irqn = vecidx;
- eq->dev = dev;
- eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-
err = mlx5_debug_eq_add(dev, eq);
if (err)
goto err_irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d8e8e489b2d..71b10b210792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
dev->profile = &profile[prof_sel];
dev->event = mlx5_core_event;
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
err = mlx5_dev_init(dev, pdev);
if (err) {
dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
goto out;
}
- INIT_LIST_HEAD(&priv->ctx_list);
- spin_lock_init(&priv->ctx_lock);
err = mlx5_register_device(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 0b2a1ccd276d..613037584d08 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
if (test_bit(__NX_RESETTING, &adapter->state))
goto reschedule;
- if (test_bit(__NX_DEV_UP, &adapter->state)) {
+ if (test_bit(__NX_DEV_UP, &adapter->state) &&
+ !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
if (!adapter->has_link_events) {
netxen_nic_handle_phy_intr(adapter);
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index f3a47147937d..9a49f42ac2ba 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -5,7 +5,6 @@
config NET_VENDOR_QUALCOMM
bool "Qualcomm devices"
default y
- depends on SPI_MASTER && OF_GPIO
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
config QCA7000
tristate "Qualcomm Atheros QCA7000 support"
- depends on SPI_MASTER && OF_GPIO
+ depends on SPI_MASTER && OF
---help---
This SPI protocol driver supports the Qualcomm Atheros QCA7000.
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 002d4cdc319f..a77f05ce8325 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
EFX_MAX_CHANNELS,
resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
(EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
- BUG_ON(efx->max_channels == 0);
+ if (WARN_ON(efx->max_channels == 0))
+ return -EIO;
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 2c62208077fe..6cc3cf6f17c8 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
const struct of_device_id *match = NULL;
struct smc_local *lp;
struct net_device *ndev;
- struct resource *res, *ires;
+ struct resource *res;
unsigned int __iomem *addr;
unsigned long irq_flags = SMC_IRQ_FLAGS;
+ unsigned long irq_resflags;
int ret;
ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
goto out_free_netdev;
}
- ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!ires) {
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
ret = -ENODEV;
goto out_release_io;
}
-
- ndev->irq = ires->start;
-
- if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
- irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+ /*
+ * If this platform does not specify any special irqflags, or if
+ * the resource supplies a trigger, override the irqflags with
+ * the trigger flags from the resource.
+ */
+ irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+ if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
+ irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
ret = smc_request_attrib(pdev, ndev);
if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index affb29da353e..77ed74561e5f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
spin_unlock(&pdata->mac_lock);
}
+static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ /* If the internal PHY is in General Power-Down mode, all, except the
+ * management interface, is powered-down and stays in that condition as
+ * long as Phy register bit 0.11 is HIGH.
+ *
+ * In that case, clear the bit 0.11, so the PHY powers up and we can
+ * access to the phy registers.
+ */
+ rc = phy_read(pdata->phy_dev, MII_BMCR);
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /* If the PHY general power-down bit is not set is not necessary to
+ * disable the general power down-mode.
+ */
+ if (rc & BMCR_PDOWN) {
+ rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ usleep_range(1000, 1500);
+ }
+
+ return 0;
+}
+
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
{
int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
return rc;
}
- /*
- * If energy is detected the PHY is already awake so is not necessary
- * to disable the energy detect power-down mode.
- */
- if ((rc & MII_LAN83C185_EDPWRDOWN) &&
- !(rc & MII_LAN83C185_ENERGYON)) {
+ /* Only disable if energy detect mode is already enabled */
+ if (rc & MII_LAN83C185_EDPWRDOWN) {
/* Disable energy detect mode for this SMSC Transceivers */
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
}
-
- mdelay(1);
+ /* Allow PHY to wakeup */
+ mdelay(2);
}
return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
/* Only enable if energy detect mode is already disabled */
if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
- mdelay(100);
/* Enable energy detect mode for this SMSC Transceivers */
rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
}
-
- mdelay(1);
}
return 0;
}
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
int ret;
/*
+ * Make sure to power-up the PHY chip before doing a reset, otherwise
+ * the reset fails.
+ */
+ ret = smsc911x_phy_general_power_up(pdata);
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
+ return ret;
+ }
+
+ /*
* LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
* are initialized in a Energy Detect Power-Down mode that prevents
* the MAC chip to be software reseted. So we have to wakeup the PHY
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f77a46c7e2c..18c46bb0f3bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
bool stmmac_eee_init(struct stmmac_priv *priv)
{
char *phy_bus_name = priv->plat->phy_bus_name;
+ unsigned long flags;
bool ret = false;
/* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
* changed).
* In that case the driver disable own timers.
*/
+ spin_lock_irqsave(&priv->lock, flags);
if (priv->eee_active) {
pr_debug("stmmac: disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
tx_lpi_timer);
}
priv->eee_active = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
goto out;
}
/* Activate the EEE and start timers */
+ spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
- pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
-
ret = true;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
}
out:
return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
/* At this stage, it could be needed to setup the EEE or adjust some
* MAC related HW registers.
*/
priv->eee_enabled = stmmac_eee_init(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
}
/**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
}
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
- int i)
+ int i, gfp_t flags)
{
struct sk_buff *skb;
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
- GFP_KERNEL);
+ flags);
if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
else
p = priv->dma_rx + i;
- ret = stmmac_init_rx_buffers(priv, p, i);
+ ret = stmmac_init_rx_buffers(priv, p, i, flags);
if (ret)
goto err_init_rx_buffers;
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- ret = init_dma_desc_rings(dev);
- if (ret < 0) {
- pr_err("%s: DMA descriptors initialization failed\n", __func__);
- return ret;
- }
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
}
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
- priv->eee_enabled = stmmac_eee_init(priv);
-
- stmmac_init_tx_coalesce(priv);
-
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
priv->rx_riwt = MAX_DMA_RIWT;
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
goto dma_desc_error;
}
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ goto init_error;
+ }
+
ret = stmmac_hw_setup(dev);
if (ret < 0) {
pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
}
+ stmmac_init_tx_coalesce(priv);
+
if (priv->phydev)
phy_start(priv->phydev);
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
unsigned int enh_desc = priv->plat->enh_desc;
+ spin_lock(&priv->tx_lock);
+
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+ spin_unlock(&priv->tx_lock);
if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev);
/* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- spin_lock(&priv->tx_lock);
-
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
dma_map_err:
+ spin_unlock(&priv->tx_lock);
dev_err(priv->device, "Tx dma map failed\n");
dev_kfree_skb(skb);
priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- spin_lock(&priv->lock);
priv->hw->mac->set_filter(priv->hw, dev);
- spin_unlock(&priv->lock);
}
/**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
stmmac_set_mac(priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- clk_disable_unprepare(priv->stmmac_clk);
+ clk_disable(priv->stmmac_clk);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
} else {
pinctrl_pm_select_default_state(priv->device);
/* enable the clk prevously disabled */
- clk_prepare_enable(priv->stmmac_clk);
+ clk_enable(priv->stmmac_clk);
/* reset the phy so that it's ready */
if (priv->mii)
stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
+ init_dma_desc_rings(ndev, GFP_ATOMIC);
stmmac_hw_setup(ndev);
+ stmmac_init_tx_coalesce(priv);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525d5457..9c014803b03b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
HMD(("init rxring, "));
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
+ u32 mapping;
skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hp->dma_dev, mapping)) {
+ dev_kfree_skb_any(skb);
+ hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
+ continue;
+ }
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
- dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(skb, RX_OFFSET);
}
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
skb = hp->rx_skbs[elem];
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
+ u32 mapping;
/* Now refill the entry, if we can. */
new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
drops++;
goto drop_it;
}
+ skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
+ mapping = dma_map_single(hp->dma_dev, new_skb->data,
+ RX_BUF_ALLOC_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ dev_kfree_skb_any(new_skb);
+ drops++;
+ goto drop_it;
+ }
+
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
- dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
- DMA_FROM_DEVICE));
+ mapping);
skb_reserve(new_skb, RX_OFFSET);
/* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
+static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
+ u32 first_len, u32 first_entry, u32 entry)
+{
+ struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
+
+ dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
+
+ first_entry = NEXT_TX(first_entry);
+ while (first_entry != entry) {
+ struct happy_meal_txd *this = &txbase[first_entry];
+ u32 addr, len;
+
+ addr = hme_read_desc32(hp, &this->tx_addr);
+ len = hme_read_desc32(hp, &this->tx_flags);
+ len &= TXFLAG_SIZE;
+ dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
+ goto out_dma_error;
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
(tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
+ goto out_dma_error;
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
len = skb_frag_size(this_frag);
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
0, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
+ unmap_partial_tx_skb(hp, first_mapping, first_len,
+ first_entry, entry);
+ goto out_dma_error;
+ }
this_txflags = tx_flags;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
+
+out_dma_error:
+ hp->tx_skbs[hp->tx_new] = NULL;
+ spin_unlock_irq(&hp->happy_lock);
+
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 3ae83879a75f..097ebe7077ac 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
{
if (!ale)
return -EINVAL;
- cpsw_ale_stop(ale);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
kfree(ale);
return 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index ab92f67da035..4a4388b813ac 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
switch (ptp_class & PTP_CLASS_PMASK) {
case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
break;
case PTP_CLASS_IPV6:
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6f226de655a4..880cc090dc44 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
vnet_hdr->csum_start = skb_checksum_start_offset(skb);
+ if (vlan_tx_tag_present(skb))
+ vnet_hdr->csum_start += VLAN_HLEN;
vnet_hdr->csum_offset = skb->csum_offset;
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2954052706e8..e22e602beef3 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
switch (type & PTP_CLASS_PMASK) {
case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
break;
case PTP_CLASS_IPV6:
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
switch (type & PTP_CLASS_PMASK) {
case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
break;
case PTP_CLASS_IPV6:
offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1dfffdc9dfc3..767cd110f496 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *mii_data = if_mii(ifr);
u16 val = mii_data->val_in;
+ bool change_autoneg = false;
switch (cmd) {
case SIOCGMIIPHY:
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mii_data->phy_id == phydev->addr) {
switch (mii_data->reg_num) {
case MII_BMCR:
- if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0)
+ if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ change_autoneg = true;
phydev->autoneg = AUTONEG_DISABLE;
- else
+ if (val & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+ if (val & BMCR_SPEED1000)
+ phydev->speed = SPEED_1000;
+ else if (val & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else phydev->speed = SPEED_10;
+ }
+ else {
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ change_autoneg = true;
phydev->autoneg = AUTONEG_ENABLE;
- if (!phydev->autoneg && (val & BMCR_FULLDPLX))
- phydev->duplex = DUPLEX_FULL;
- else
- phydev->duplex = DUPLEX_HALF;
- if (!phydev->autoneg && (val & BMCR_SPEED1000))
- phydev->speed = SPEED_1000;
- else if (!phydev->autoneg &&
- (val & BMCR_SPEED100))
- phydev->speed = SPEED_100;
+ }
break;
case MII_ADVERTISE:
- phydev->advertising = val;
+ phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+ change_autoneg = true;
break;
default:
/* do nothing */
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
if (mii_data->reg_num == MII_BMCR &&
val & BMCR_RESET)
return phy_init_hw(phydev);
+
+ if (change_autoneg)
+ return phy_start_aneg(phydev);
+
return 0;
case SIOCSHWTSTAMP:
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 68c3a3f4e0ab..794a47329368 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
+ struct bpf_prog *pass_filter = NULL;
struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
- ppp_lock(ppp);
- if (ppp->pass_filter) {
- bpf_prog_destroy(ppp->pass_filter);
- ppp->pass_filter = NULL;
+ err = 0;
+ if (fprog.filter)
+ err = bpf_prog_create(&pass_filter, &fprog);
+ if (!err) {
+ ppp_lock(ppp);
+ if (ppp->pass_filter)
+ bpf_prog_destroy(ppp->pass_filter);
+ ppp->pass_filter = pass_filter;
+ ppp_unlock(ppp);
}
- if (fprog.filter != NULL)
- err = bpf_prog_create(&ppp->pass_filter,
- &fprog);
- else
- err = 0;
kfree(code);
- ppp_unlock(ppp);
}
break;
}
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
err = get_filter(argp, &code);
if (err >= 0) {
+ struct bpf_prog *active_filter = NULL;
struct sock_fprog_kern fprog = {
.len = err,
.filter = code,
};
- ppp_lock(ppp);
- if (ppp->active_filter) {
- bpf_prog_destroy(ppp->active_filter);
- ppp->active_filter = NULL;
+ err = 0;
+ if (fprog.filter)
+ err = bpf_prog_create(&active_filter, &fprog);
+ if (!err) {
+ ppp_lock(ppp);
+ if (ppp->active_filter)
+ bpf_prog_destroy(ppp->active_filter);
+ ppp->active_filter = active_filter;
+ ppp_unlock(ppp);
}
- if (fprog.filter != NULL)
- err = bpf_prog_create(&ppp->active_filter,
- &fprog);
- else
- err = 0;
kfree(code);
- ppp_unlock(ppp);
}
break;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7302398f0b1f..9dd3746994a4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
int vlan_offset = 0, copied;
+ int vlan_hlen = 0;
+ int vnet_hdr_sz = 0;
+
+ if (vlan_tx_tag_present(skb))
+ vlan_hlen = VLAN_HLEN;
+
+ if (tun->flags & TUN_VNET_HDR)
+ vnet_hdr_sz = tun->vnet_hdr_sz;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
return -EINVAL;
- if (len < skb->len) {
+ if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
@@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
total += sizeof(pi);
}
- if (tun->flags & TUN_VNET_HDR) {
+ if (vnet_hdr_sz) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
- if ((len -= tun->vnet_hdr_sz) < 0)
+ if ((len -= vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
@@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- gso.csum_start = skb_checksum_start_offset(skb);
+ gso.csum_start = skb_checksum_start_offset(skb) +
+ vlan_hlen;
gso.csum_offset = skb->csum_offset;
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
- total += tun->vnet_hdr_sz;
+ total += vnet_hdr_sz;
}
copied = total;
- total += skb->len;
- if (!vlan_tx_tag_present(skb)) {
- len = min_t(int, skb->len, len);
- } else {
+ len = min_t(int, skb->len + vlan_hlen, len);
+ total += skb->len + vlan_hlen;
+ if (vlan_hlen) {
int copy, ret;
struct {
__be16 h_vlan_proto;
@@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
- len = min_t(int, skb->len + VLAN_HLEN, len);
- total += VLAN_HLEN;
copy = min_t(int, vlan_offset, len);
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2c05f6cdb12f..816d511e34d3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
}
- ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL);
- if (ret < 0)
- return ret;
-
- msleep(150);
-
- ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
- if (ret < 0)
- return ret;
-
- msleep(150);
-
- ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
+ ax88772_reset(dev);
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ca309820d39e..fa9dc45b75a6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -275,13 +275,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
-/* Find VXLAN socket based on network namespace and UDP port */
-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
+/* Find VXLAN socket based on network namespace, address family and UDP port */
+static struct vxlan_sock *vxlan_find_sock(struct net *net,
+ sa_family_t family, __be16 port)
{
struct vxlan_sock *vs;
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
- if (inet_sk(vs->sock->sk)->inet_sport == port)
+ if (inet_sk(vs->sock->sk)->inet_sport == port &&
+ inet_sk(vs->sock->sk)->sk.sk_family == family)
return vs;
}
return NULL;
@@ -300,11 +302,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
}
/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
+ sa_family_t family, __be16 port)
{
struct vxlan_sock *vs;
- vs = vxlan_find_sock(net, port);
+ vs = vxlan_find_sock(net, family, port);
if (!vs)
return NULL;
@@ -621,6 +624,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
int err = -ENOSYS;
+ udp_tunnel_gro_complete(skb, nhoff);
+
eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
type = eh->h_proto;
@@ -1771,7 +1776,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+ dst->sa.sa_family, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1825,7 +1831,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *dst_vxlan;
dst_release(ndst);
- dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port);
+ dst_vxlan = vxlan_find_vni(vxlan->net, vni,
+ dst->sa.sa_family, dst_port);
if (!dst_vxlan)
goto tx_error;
vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1985,13 +1992,15 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs;
+ bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(vxlan->net, vxlan->dst_port);
+ vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
+ vxlan->dst_port);
if (vs) {
/* If we have a socket with same port already, reuse it */
atomic_inc(&vs->refcnt);
@@ -2382,6 +2391,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
+ bool ipv6 = flags & VXLAN_F_IPV6;
vs = vxlan_socket_create(net, port, rcv, data, flags);
if (!IS_ERR(vs))
@@ -2391,7 +2401,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
return vs;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_sock(net, port);
+ vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
if (vs) {
if (vs->rcv == rcv)
atomic_inc(&vs->refcnt);
@@ -2550,7 +2560,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
- if (vxlan_find_vni(net, vni, vxlan->dst_port)) {
+ if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+ vxlan->dst_port)) {
pr_info("duplicate VNI %u\n", vni);
return -EEXIST;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index e0d9f19650b0..eb03943f8463 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
lockdep_assert_held(&mvm->mutex);
- if (WARN_ON_ONCE(mvm->init_ucode_complete))
+ if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
return 0;
iwl_init_notification_wait(&mvm->notif_wait,
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto out;
}
+ mvm->calibrating = true;
+
/* Send TX valid antennas before triggering calibrations */
ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
if (ret)
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
MVM_UCODE_CALIB_TIMEOUT);
if (!ret)
mvm->init_ucode_complete = true;
+
+ if (ret && iwl_mvm_is_radio_killed(mvm)) {
+ IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+ ret = 1;
+ }
goto out;
error:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
+ mvm->calibrating = false;
if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 585fe5b7100f..b62405865b25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->scan_status = IWL_MVM_SCAN_NONE;
mvm->ps_disabled = false;
+ mvm->calibrating = false;
/* just in case one was running */
ieee80211_remain_on_channel_expired(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b153ced7015b..845429c88cf4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -548,6 +548,7 @@ struct iwl_mvm {
enum iwl_ucode_type cur_ucode;
bool ucode_loaded;
bool init_ucode_complete;
+ bool calibrating;
u32 error_event_table;
u32 log_event_table;
u32 umac_error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 48cb25a93591..5b719ee8e789 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
mvm->sf_state = SF_UNINIT;
mvm->low_latency_agg_frame_limit = 6;
+ mvm->cur_ucode = IWL_UCODE_INIT;
mutex_init(&mvm->mutex);
mutex_init(&mvm->d0i3_suspend_mutex);
@@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ bool calibrating = ACCESS_ONCE(mvm->calibrating);
if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
- return state && mvm->cur_ucode != IWL_UCODE_INIT;
+ /* iwl_run_init_mvm_ucode is waiting for results, abort it */
+ if (calibrating)
+ iwl_abort_notification_waits(&mvm->notif_wait);
+
+ /*
+ * Stop the device if we run OPERATIONAL firmware or if we are in the
+ * middle of the calibrations.
+ */
+ return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 3781b029e54a..160c3ebc48d0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* restart. So don't process again if the device is
* already dead.
*/
- if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+ IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status);
- clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status);
clear_bit(STATUS_RFKILL, &trans->status);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index babbdc1ce741..c9ad4cf1adfb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
if (err != 0) {
printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
err);
- goto failed_hw;
+ goto failed_bind;
}
skb_queue_head_init(&data->pending);
@@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
return idx;
failed_hw:
+ device_release_driver(data->dev);
+failed_bind:
device_unregister(data->dev);
failed_drvdata:
ieee80211_free_hw(hw);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 4dcfb7116a04..a2eabe6ff9ad 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -202,6 +202,7 @@ config TC1100_WMI
config HP_ACCEL
tristate "HP laptop accelerometer"
depends on INPUT && ACPI
+ depends on SERIO_I8042
select SENSORS_LIS3LV02D
select NEW_LEDS
select LEDS_CLASS
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 13e14ec1d3d7..6bec745b6b92 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -37,6 +37,8 @@
#include <linux/leds.h>
#include <linux/atomic.h>
#include <linux/acpi.h>
+#include <linux/i8042.h>
+#include <linux/serio.h>
#include "../../misc/lis3lv02d/lis3lv02d.h"
#define DRIVER_NAME "hp_accel"
@@ -73,6 +75,13 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
/* HP-specific accelerometer driver ------------------------------------ */
+/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
+ * HPQ6000 sends through the keyboard bus */
+#define ACCEL_1 0x25
+#define ACCEL_2 0x26
+#define ACCEL_3 0x27
+#define ACCEL_4 0x28
+
/* For automatic insertion of the module */
static const struct acpi_device_id lis3lv02d_device_ids[] = {
{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
@@ -294,6 +303,35 @@ static void lis3lv02d_enum_resources(struct acpi_device *device)
printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n");
}
+static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
+ struct serio *port)
+{
+ static bool extended;
+
+ if (str & I8042_STR_AUXDATA)
+ return false;
+
+ if (data == 0xe0) {
+ extended = true;
+ return true;
+ } else if (unlikely(extended)) {
+ extended = false;
+
+ switch (data) {
+ case ACCEL_1:
+ case ACCEL_2:
+ case ACCEL_3:
+ case ACCEL_4:
+ return true;
+ default:
+ serio_interrupt(port, 0xe0, 0);
+ return false;
+ }
+ }
+
+ return false;
+}
+
static int lis3lv02d_add(struct acpi_device *device)
{
int ret;
@@ -326,6 +364,11 @@ static int lis3lv02d_add(struct acpi_device *device)
if (ret)
return ret;
+ /* filter to remove HPQ6000 accelerometer data
+ * from keyboard bus stream */
+ if (strstr(dev_name(&device->dev), "HPQ6000"))
+ i8042_install_filter(hp_accel_i8042_filter);
+
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
if (ret) {
@@ -343,6 +386,7 @@ static int lis3lv02d_remove(struct acpi_device *device)
if (!device)
return -EINVAL;
+ i8042_remove_filter(hp_accel_i8042_filter);
lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 217da4b2ca86..99a78d365ceb 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/time.h>
+#include <linux/time64.h>
#include <linux/of.h>
#include <linux/completion.h>
#include <linux/mfd/core.h>
@@ -108,7 +109,7 @@ enum ab8500_fg_calibration_state {
struct ab8500_fg_avg_cap {
int avg;
int samples[NBR_AVG_SAMPLES];
- __kernel_time_t time_stamps[NBR_AVG_SAMPLES];
+ time64_t time_stamps[NBR_AVG_SAMPLES];
int pos;
int nbr_samples;
int sum;
@@ -386,15 +387,15 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
*/
static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
{
- struct timespec ts;
+ struct timespec64 ts64;
struct ab8500_fg_avg_cap *avg = &di->avg_cap;
- getnstimeofday(&ts);
+ getnstimeofday64(&ts64);
do {
avg->sum += sample - avg->samples[avg->pos];
avg->samples[avg->pos] = sample;
- avg->time_stamps[avg->pos] = ts.tv_sec;
+ avg->time_stamps[avg->pos] = ts64.tv_sec;
avg->pos++;
if (avg->pos == NBR_AVG_SAMPLES)
@@ -407,7 +408,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
* Check the time stamp for each sample. If too old,
* replace with latest sample
*/
- } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
+ } while (ts64.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
avg->avg = avg->sum / avg->nbr_samples;
@@ -446,14 +447,14 @@ static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
{
int i;
- struct timespec ts;
+ struct timespec64 ts64;
struct ab8500_fg_avg_cap *avg = &di->avg_cap;
- getnstimeofday(&ts);
+ getnstimeofday64(&ts64);
for (i = 0; i < NBR_AVG_SAMPLES; i++) {
avg->samples[i] = sample;
- avg->time_stamps[i] = ts.tv_sec;
+ avg->time_stamps[i] = ts64.tv_sec;
}
avg->pos = 0;
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index e384844a1ae1..1f49986fc605 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1579,8 +1579,15 @@ static int bq2415x_probe(struct i2c_client *client,
if (np) {
bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection");
- if (!bq->notify_psy)
- return -EPROBE_DEFER;
+ if (IS_ERR(bq->notify_psy)) {
+ dev_info(&client->dev,
+ "no 'ti,usb-charger-detection' property (err=%ld)\n",
+ PTR_ERR(bq->notify_psy));
+ bq->notify_psy = NULL;
+ } else if (!bq->notify_psy) {
+ ret = -EPROBE_DEFER;
+ goto error_2;
+ }
}
else if (pdata->notify_device)
bq->notify_psy = power_supply_get_by_name(pdata->notify_device);
@@ -1602,27 +1609,27 @@ static int bq2415x_probe(struct i2c_client *client,
ret = of_property_read_u32(np, "ti,current-limit",
&bq->init_data.current_limit);
if (ret)
- return ret;
+ goto error_2;
ret = of_property_read_u32(np, "ti,weak-battery-voltage",
&bq->init_data.weak_battery_voltage);
if (ret)
- return ret;
+ goto error_2;
ret = of_property_read_u32(np, "ti,battery-regulation-voltage",
&bq->init_data.battery_regulation_voltage);
if (ret)
- return ret;
+ goto error_2;
ret = of_property_read_u32(np, "ti,charge-current",
&bq->init_data.charge_current);
if (ret)
- return ret;
+ goto error_2;
ret = of_property_read_u32(np, "ti,termination-current",
&bq->init_data.termination_current);
if (ret)
- return ret;
+ goto error_2;
ret = of_property_read_u32(np, "ti,resistor-sense",
&bq->init_data.resistor_sense);
if (ret)
- return ret;
+ goto error_2;
} else {
memcpy(&bq->init_data, pdata, sizeof(bq->init_data));
}
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 7098a1ce2d3c..ef8094a61f1e 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -97,6 +97,7 @@ static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
static bool is_batt_present(struct charger_manager *cm)
{
union power_supply_propval val;
+ struct power_supply *psy;
bool present = false;
int i, ret;
@@ -107,16 +108,27 @@ static bool is_batt_present(struct charger_manager *cm)
case CM_NO_BATTERY:
break;
case CM_FUEL_GAUGE:
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+ if (!psy)
+ break;
+
+ ret = psy->get_property(psy,
POWER_SUPPLY_PROP_PRESENT, &val);
if (ret == 0 && val.intval)
present = true;
break;
case CM_CHARGER_STAT:
- for (i = 0; cm->charger_stat[i]; i++) {
- ret = cm->charger_stat[i]->get_property(
- cm->charger_stat[i],
- POWER_SUPPLY_PROP_PRESENT, &val);
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
+ psy = power_supply_get_by_name(
+ cm->desc->psy_charger_stat[i]);
+ if (!psy) {
+ dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
+ cm->desc->psy_charger_stat[i]);
+ continue;
+ }
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT,
+ &val);
if (ret == 0 && val.intval) {
present = true;
break;
@@ -139,14 +151,20 @@ static bool is_batt_present(struct charger_manager *cm)
static bool is_ext_pwr_online(struct charger_manager *cm)
{
union power_supply_propval val;
+ struct power_supply *psy;
bool online = false;
int i, ret;
/* If at least one of them has one, it's yes. */
- for (i = 0; cm->charger_stat[i]; i++) {
- ret = cm->charger_stat[i]->get_property(
- cm->charger_stat[i],
- POWER_SUPPLY_PROP_ONLINE, &val);
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
+ psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
+ if (!psy) {
+ dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
+ cm->desc->psy_charger_stat[i]);
+ continue;
+ }
+
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
if (ret == 0 && val.intval) {
online = true;
break;
@@ -167,12 +185,14 @@ static bool is_ext_pwr_online(struct charger_manager *cm)
static int get_batt_uV(struct charger_manager *cm, int *uV)
{
union power_supply_propval val;
+ struct power_supply *fuel_gauge;
int ret;
- if (!cm->fuel_gauge)
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+ if (!fuel_gauge)
return -ENODEV;
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ ret = fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
if (ret)
return ret;
@@ -189,6 +209,7 @@ static bool is_charging(struct charger_manager *cm)
{
int i, ret;
bool charging = false;
+ struct power_supply *psy;
union power_supply_propval val;
/* If there is no battery, it cannot be charged */
@@ -196,17 +217,22 @@ static bool is_charging(struct charger_manager *cm)
return false;
/* If at least one of the charger is charging, return yes */
- for (i = 0; cm->charger_stat[i]; i++) {
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
/* 1. The charger sholuld not be DISABLED */
if (cm->emergency_stop)
continue;
if (!cm->charger_enabled)
continue;
+ psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
+ if (!psy) {
+ dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
+ cm->desc->psy_charger_stat[i]);
+ continue;
+ }
+
/* 2. The charger should be online (ext-power) */
- ret = cm->charger_stat[i]->get_property(
- cm->charger_stat[i],
- POWER_SUPPLY_PROP_ONLINE, &val);
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
if (ret) {
dev_warn(cm->dev, "Cannot read ONLINE value from %s\n",
cm->desc->psy_charger_stat[i]);
@@ -219,9 +245,7 @@ static bool is_charging(struct charger_manager *cm)
* 3. The charger should not be FULL, DISCHARGING,
* or NOT_CHARGING.
*/
- ret = cm->charger_stat[i]->get_property(
- cm->charger_stat[i],
- POWER_SUPPLY_PROP_STATUS, &val);
+ ret = psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
if (ret) {
dev_warn(cm->dev, "Cannot read STATUS value from %s\n",
cm->desc->psy_charger_stat[i]);
@@ -248,6 +272,7 @@ static bool is_full_charged(struct charger_manager *cm)
{
struct charger_desc *desc = cm->desc;
union power_supply_propval val;
+ struct power_supply *fuel_gauge;
int ret = 0;
int uV;
@@ -255,11 +280,15 @@ static bool is_full_charged(struct charger_manager *cm)
if (!is_batt_present(cm))
return false;
- if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) {
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+ if (!fuel_gauge)
+ return false;
+
+ if (desc->fullbatt_full_capacity > 0) {
val.intval = 0;
/* Not full if capacity of fuel gauge isn't full */
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ ret = fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_CHARGE_FULL, &val);
if (!ret && val.intval > desc->fullbatt_full_capacity)
return true;
@@ -273,10 +302,10 @@ static bool is_full_charged(struct charger_manager *cm)
}
/* Full, if the capacity is more than fullbatt_soc */
- if (cm->fuel_gauge && desc->fullbatt_soc > 0) {
+ if (desc->fullbatt_soc > 0) {
val.intval = 0;
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ ret = fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_CAPACITY, &val);
if (!ret && val.intval >= desc->fullbatt_soc)
return true;
@@ -551,6 +580,20 @@ static int check_charging_duration(struct charger_manager *cm)
return ret;
}
+static int cm_get_battery_temperature_by_psy(struct charger_manager *cm,
+ int *temp)
+{
+ struct power_supply *fuel_gauge;
+
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+ if (!fuel_gauge)
+ return -ENODEV;
+
+ return fuel_gauge->get_property(fuel_gauge,
+ POWER_SUPPLY_PROP_TEMP,
+ (union power_supply_propval *)temp);
+}
+
static int cm_get_battery_temperature(struct charger_manager *cm,
int *temp)
{
@@ -560,15 +603,18 @@ static int cm_get_battery_temperature(struct charger_manager *cm,
return -ENODEV;
#ifdef CONFIG_THERMAL
- ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
- if (!ret)
- /* Calibrate temperature unit */
- *temp /= 100;
-#else
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
- POWER_SUPPLY_PROP_TEMP,
- (union power_supply_propval *)temp);
+ if (cm->tzd_batt) {
+ ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
+ if (!ret)
+ /* Calibrate temperature unit */
+ *temp /= 100;
+ } else
#endif
+ {
+ /* if-else continued from CONFIG_THERMAL */
+ ret = cm_get_battery_temperature_by_psy(cm, temp);
+ }
+
return ret;
}
@@ -827,6 +873,7 @@ static int charger_get_property(struct power_supply *psy,
struct charger_manager *cm = container_of(psy,
struct charger_manager, charger_psy);
struct charger_desc *desc = cm->desc;
+ struct power_supply *fuel_gauge;
int ret = 0;
int uV;
@@ -857,14 +904,20 @@ static int charger_get_property(struct power_supply *psy,
ret = get_batt_uV(cm, &val->intval);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+ if (!fuel_gauge) {
+ ret = -ENODEV;
+ break;
+ }
+ ret = fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_CURRENT_NOW, val);
break;
case POWER_SUPPLY_PROP_TEMP:
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
return cm_get_battery_temperature(cm, &val->intval);
case POWER_SUPPLY_PROP_CAPACITY:
- if (!cm->fuel_gauge) {
+ fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
+ if (!fuel_gauge) {
ret = -ENODEV;
break;
}
@@ -875,7 +928,7 @@ static int charger_get_property(struct power_supply *psy,
break;
}
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ ret = fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_CAPACITY, val);
if (ret)
break;
@@ -924,7 +977,14 @@ static int charger_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_NOW:
if (is_charging(cm)) {
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ fuel_gauge = power_supply_get_by_name(
+ cm->desc->psy_fuel_gauge);
+ if (!fuel_gauge) {
+ ret = -ENODEV;
+ break;
+ }
+
+ ret = fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_CHARGE_NOW,
val);
if (ret) {
@@ -970,6 +1030,7 @@ static struct power_supply psy_default = {
.properties = default_charger_props,
.num_properties = ARRAY_SIZE(default_charger_props),
.get_property = charger_get_property,
+ .no_thermal = true,
};
/**
@@ -1485,14 +1546,15 @@ err:
return ret;
}
-static int cm_init_thermal_data(struct charger_manager *cm)
+static int cm_init_thermal_data(struct charger_manager *cm,
+ struct power_supply *fuel_gauge)
{
struct charger_desc *desc = cm->desc;
union power_supply_propval val;
int ret;
/* Verify whether fuel gauge provides battery temperature */
- ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ ret = fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_TEMP, &val);
if (!ret) {
@@ -1502,8 +1564,6 @@ static int cm_init_thermal_data(struct charger_manager *cm)
cm->desc->measure_battery_temp = true;
}
#ifdef CONFIG_THERMAL
- cm->tzd_batt = cm->fuel_gauge->tzd;
-
if (ret && desc->thermal_zone) {
cm->tzd_batt =
thermal_zone_get_zone_by_name(desc->thermal_zone);
@@ -1666,6 +1726,7 @@ static int charger_manager_probe(struct platform_device *pdev)
int ret = 0, i = 0;
int j = 0;
union power_supply_propval val;
+ struct power_supply *fuel_gauge;
if (g_desc && !rtc_dev && g_desc->rtc_name) {
rtc_dev = rtc_class_open(g_desc->rtc_name);
@@ -1729,23 +1790,20 @@ static int charger_manager_probe(struct platform_device *pdev)
while (desc->psy_charger_stat[i])
i++;
- cm->charger_stat = devm_kzalloc(&pdev->dev,
- sizeof(struct power_supply *) * i, GFP_KERNEL);
- if (!cm->charger_stat)
- return -ENOMEM;
-
+ /* Check if charger's supplies are present at probe */
for (i = 0; desc->psy_charger_stat[i]; i++) {
- cm->charger_stat[i] = power_supply_get_by_name(
- desc->psy_charger_stat[i]);
- if (!cm->charger_stat[i]) {
+ struct power_supply *psy;
+
+ psy = power_supply_get_by_name(desc->psy_charger_stat[i]);
+ if (!psy) {
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
desc->psy_charger_stat[i]);
return -ENODEV;
}
}
- cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
- if (!cm->fuel_gauge) {
+ fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
+ if (!fuel_gauge) {
dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
desc->psy_fuel_gauge);
return -ENODEV;
@@ -1788,13 +1846,13 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.num_properties = psy_default.num_properties;
/* Find which optional psy-properties are available */
- if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+ if (!fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
cm->charger_psy.properties[cm->charger_psy.num_properties] =
POWER_SUPPLY_PROP_CHARGE_NOW;
cm->charger_psy.num_properties++;
}
- if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+ if (!fuel_gauge->get_property(fuel_gauge,
POWER_SUPPLY_PROP_CURRENT_NOW,
&val)) {
cm->charger_psy.properties[cm->charger_psy.num_properties] =
@@ -1802,7 +1860,7 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.num_properties++;
}
- ret = cm_init_thermal_data(cm);
+ ret = cm_init_thermal_data(cm, fuel_gauge);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize thermal data\n");
cm->desc->measure_battery_temp = false;
@@ -2066,8 +2124,8 @@ static bool find_power_supply(struct charger_manager *cm,
int i;
bool found = false;
- for (i = 0; cm->charger_stat[i]; i++) {
- if (psy == cm->charger_stat[i]) {
+ for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
+ if (!strcmp(psy->name, cm->desc->psy_charger_stat[i])) {
found = true;
break;
}
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 6cb7fe5c022d..694e8cddd5c1 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -417,6 +417,9 @@ static int psy_register_thermal(struct power_supply *psy)
{
int i;
+ if (psy->no_thermal)
+ return 0;
+
/* Register battery zone device psy reports temperature */
for (i = 0; i < psy->num_properties; i++) {
if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) {
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 6cbe6ef3c889..bda52f18e967 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -888,7 +888,6 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
int i;
struct virtqueue *vq;
- struct virtio_driver *drv;
if (!vcdev)
return;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ca75c7ca2559..ef355c13ccc4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -480,9 +480,7 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
bnx2fc_initiate_cleanup(orig_io_req);
/* Post a new IO req with the same sc_cmd */
BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
- spin_unlock_bh(&tgt->tgt_lock);
rc = bnx2fc_post_io_req(tgt, new_io_req);
- spin_lock_bh(&tgt->tgt_lock);
if (!rc)
goto free_frame;
BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0679782d9d15..5b99844ef6bf 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1894,18 +1894,24 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
goto exit_qcmd;
}
}
+
+ spin_lock_bh(&tgt->tgt_lock);
+
io_req = bnx2fc_cmd_alloc(tgt);
if (!io_req) {
rc = SCSI_MLQUEUE_HOST_BUSY;
- goto exit_qcmd;
+ goto exit_qcmd_tgtlock;
}
io_req->sc_cmd = sc_cmd;
if (bnx2fc_post_io_req(tgt, io_req)) {
printk(KERN_ERR PFX "Unable to post io_req\n");
rc = SCSI_MLQUEUE_HOST_BUSY;
- goto exit_qcmd;
+ goto exit_qcmd_tgtlock;
}
+
+exit_qcmd_tgtlock:
+ spin_unlock_bh(&tgt->tgt_lock);
exit_qcmd:
return rc;
}
@@ -2020,6 +2026,8 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
int task_idx, index;
u16 xid;
+ /* bnx2fc_post_io_req() is called with the tgt_lock held */
+
/* Initialize rest of io_req fields */
io_req->cmd_type = BNX2FC_SCSI_CMD;
io_req->port = port;
@@ -2047,9 +2055,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
/* Build buffer descriptor list for firmware from sg list */
if (bnx2fc_build_bd_list_from_sg(io_req)) {
printk(KERN_ERR PFX "BD list creation failed\n");
- spin_lock_bh(&tgt->tgt_lock);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
return -EAGAIN;
}
@@ -2061,19 +2067,15 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
task = &(task_page[index]);
bnx2fc_init_task(io_req, task);
- spin_lock_bh(&tgt->tgt_lock);
-
if (tgt->flush_in_prog) {
printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
return -EAGAIN;
}
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
printk(KERN_ERR PFX "Session not ready...post_io\n");
kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
return -EAGAIN;
}
@@ -2091,6 +2093,5 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
/* Ring doorbell */
bnx2fc_ring_doorbell(tgt);
- spin_unlock_bh(&tgt->tgt_lock);
return 0;
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 3e0a0d315f72..81bb3bd7909d 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -936,20 +936,23 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
- if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
- cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
- cxgbi_sock_set_state(csk, CTP_ABORTING);
- goto done;
+ cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
+
+ if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
+ send_tx_flowc_wr(csk);
+ cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
}
- cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
+ cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
+ cxgbi_sock_set_state(csk, CTP_ABORTING);
+
send_abort_rpl(csk, rst_status);
if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
csk->err = abort_status_to_errno(csk, req->status, &rst_status);
cxgbi_sock_closed(csk);
}
-done:
+
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
rel_skb:
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 674d498b46ab..13d869a92248 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -905,18 +905,16 @@ void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
{
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
+
+ cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
- if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD))
- cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
- else {
- cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD);
- cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
- if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
- pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
- csk, csk->state, csk->flags, csk->tid);
- cxgbi_sock_closed(csk);
- }
+ cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
+ if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
+ pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
+ csk, csk->state, csk->flags, csk->tid);
+ cxgbi_sock_closed(csk);
}
+
spin_unlock_bh(&csk->lock);
cxgbi_sock_put(csk);
}
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index e99507ed0e3c..fd78bdc53528 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -474,6 +474,13 @@ static int alua_check_sense(struct scsi_device *sdev,
* LUN Not Ready -- Offline
*/
return SUCCESS;
+ if (sdev->allow_restart &&
+ sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02)
+ /*
+ * if the device is not started, we need to wake
+ * the error handler to start the motor
+ */
+ return FAILED;
break;
case UNIT_ATTENTION:
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f6a69a3b1b3f..5640ad1c8214 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4453,7 +4453,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msixentry[i].entry = i;
i = pci_enable_msix_range(instance->pdev, instance->msixentry,
1, instance->msix_vectors);
- if (i)
+ if (i > 0)
instance->msix_vectors = i;
else
instance->msix_vectors = 0;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9a6f8468225f..bc5ff6ff9c79 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -459,14 +459,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (! scsi_command_normalize_sense(scmd, &sshdr))
return FAILED; /* no valid sense data */
- if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
- /*
- * nasty: for mid-layer issued TURs, we need to return the
- * actual sense data without any recovery attempt. For eh
- * issued ones, we need to try to recover and interpret
- */
- return SUCCESS;
-
scsi_report_sense(sdev, &sshdr);
if (scsi_sense_is_deferred(&sshdr))
@@ -482,6 +474,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
/* handler does not care. Drop down to default handling */
}
+ if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+ /*
+ * nasty: for mid-layer issued TURs, we need to return the
+ * actual sense data without any recovery attempt. For eh
+ * issued ones, we need to try to recover and interpret
+ */
+ return SUCCESS;
+
/*
* Previous logic looked for FILEMARK, EOM or ILI which are
* mainly associated with tapes and returned SUCCESS.
@@ -2001,8 +2001,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* is no point trying to lock the door of an off-line device.
*/
shost_for_each_device(sdev, shost) {
- if (scsi_device_online(sdev) && sdev->locked)
+ if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
scsi_eh_lock_door(sdev);
+ sdev->was_reset = 0;
+ }
}
/*
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 461bf3d033a0..5a1f1070b702 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -459,6 +459,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
int measure_freq;
int ret;
+ if (!cpufreq_get_current_driver()) {
+ dev_dbg(&pdev->dev, "no cpufreq driver!");
+ return -EPROBE_DEFER;
+ }
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -521,6 +525,30 @@ static int imx_thermal_probe(struct platform_device *pdev)
return ret;
}
+ data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(data->thermal_clk)) {
+ ret = PTR_ERR(data->thermal_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get thermal clk: %d\n", ret);
+ cpufreq_cooling_unregister(data->cdev);
+ return ret;
+ }
+
+ /*
+ * Thermal sensor needs clk on to get correct value, normally
+ * we should enable its clk before taking measurement and disable
+ * clk after measurement is done, but if alarm function is enabled,
+ * hardware will auto measure the temperature periodically, so we
+ * need to keep the clk always on for alarm function.
+ */
+ ret = clk_prepare_enable(data->thermal_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
+ cpufreq_cooling_unregister(data->cdev);
+ return ret;
+ }
+
data->tz = thermal_zone_device_register("imx_thermal_zone",
IMX_TRIP_NUM,
BIT(IMX_TRIP_PASSIVE), data,
@@ -531,26 +559,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
"failed to register thermal zone device %d\n", ret);
+ clk_disable_unprepare(data->thermal_clk);
cpufreq_cooling_unregister(data->cdev);
return ret;
}
- data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(data->thermal_clk)) {
- dev_warn(&pdev->dev, "failed to get thermal clk!\n");
- } else {
- /*
- * Thermal sensor needs clk on to get correct value, normally
- * we should enable its clk before taking measurement and disable
- * clk after measurement is done, but if alarm function is enabled,
- * hardware will auto measure the temperature periodically, so we
- * need to keep the clk always on for alarm function.
- */
- ret = clk_prepare_enable(data->thermal_clk);
- if (ret)
- dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
- }
-
/* Enable measurements at ~ 10 Hz */
regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index d20dba986f0f..6e9fb62eb817 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -92,7 +92,13 @@ static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
if (ACPI_FAILURE(status))
return -EIO;
- *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET);
+ /*
+ * Thermal hysteresis represents a temperature difference.
+ * Kelvin and Celsius have same degree size. So the
+ * conversion here between tenths of degree Kelvin unit
+ * and Milli-Celsius unit is just to multiply 100.
+ */
+ *temp = hyst * 100;
return 0;
}
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 2683d2897e90..1724f6cdaef8 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -264,7 +264,6 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
static const struct exynos_tmu_registers exynos5260_tmu_registers = {
.triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
.tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
- .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
.therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
.therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
.therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index 65e2ea6a9579..63de598c9c2c 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -75,7 +75,6 @@
#define EXYNOS_MAX_TRIGGER_PER_REG 4
/* Exynos5260 specific */
-#define EXYNOS_TMU_REG_CONTROL1 0x24
#define EXYNOS5260_TMU_REG_INTEN 0xC0
#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8