summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/apei/erst.c31
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/rbd.c101
-rw-r--r--drivers/block/swim3.c362
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c15
-rw-r--r--drivers/bluetooth/btusb.c3
-rw-r--r--drivers/firmware/efivars.c12
-rw-r--r--drivers/firmware/iscsi_ibft.c42
-rw-r--r--drivers/firmware/iscsi_ibft_find.c26
-rw-r--r--drivers/firmware/sigma.c81
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-da9052.c21
-rw-r--r--drivers/gpio/gpio-ml-ioh.c32
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c18
-rw-r--r--drivers/gpio/gpio-pl061.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c43
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c19
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h30
-rw-r--r--drivers/gpu/drm/i915/intel_display.c89
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c173
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c35
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c149
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hwmon/ad7314.c1
-rw-r--r--drivers/hwmon/ads7871.c1
-rw-r--r--drivers/hwmon/exynos4_tmu.c12
-rw-r--r--drivers/hwmon/gpio-fan.c13
-rw-r--r--drivers/hwmon/jz4740-hwmon.c16
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/s3c-hwmon.c13
-rw-r--r--drivers/hwmon/sch5627.c13
-rw-r--r--drivers/hwmon/sch5636.c13
-rw-r--r--drivers/hwmon/twl4030-madc-hwmon.c14
-rw-r--r--drivers/hwmon/ultra45_env.c13
-rw-r--r--drivers/hwmon/wm831x-hwmon.c12
-rw-r--r--drivers/hwmon/wm8350-hwmon.c12
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c22
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/infiniband/core/addr.c9
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
-rw-r--r--drivers/input/misc/cma3000_d0x.c4
-rw-r--r--drivers/input/mouse/synaptics.c11
-rw-r--r--drivers/input/tablet/wacom_wac.c4
-rw-r--r--drivers/iommu/intel-iommu.c7
-rw-r--r--drivers/iommu/intr_remapping.c2
-rw-r--r--drivers/isdn/divert/divert_procfs.c6
-rw-r--r--drivers/isdn/i4l/isdn_net.c3
-rw-r--r--drivers/md/bitmap.c4
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/core/core.c98
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/mxcmmc.c1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c1
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c2
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c33
-rw-r--r--drivers/net/can/sja1000/peak_pci.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.c11
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c53
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/iseries_veth.c2
-rw-r--r--drivers/net/ethernet/jme.c113
-rw-r--r--drivers/net/ethernet/jme.h19
-rw-r--r--drivers/net/ethernet/pasemi/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c14
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/net/phy/Kconfig2
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h4
-rw-r--r--drivers/net/wireless/p54/p54spi.c5
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/of/irq.c15
-rw-r--r--drivers/oprofile/oprof.c29
-rw-r--r--drivers/oprofile/timer_int.c1
-rw-r--r--drivers/pci/ats.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/platform/x86/toshiba_acpi.c21
-rw-r--r--drivers/power/intel_mid_battery.c12
-rw-r--r--drivers/ptp/ptp_clock.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c41
-rw-r--r--drivers/rapidio/devices/tsi721.h2
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/twl-regulator.c46
-rw-r--r--drivers/rtc/class.c10
-rw-r--r--drivers/rtc/interface.c44
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/s390/cio/chsc.c7
-rw-r--r--drivers/s390/cio/cio.h5
-rw-r--r--drivers/s390/cio/css.c104
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c30
-rw-r--r--drivers/s390/cio/device_ops.c20
-rw-r--r--drivers/s390/cio/io_sch.h5
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/sbus/char/bbc_i2c.c27
-rw-r--r--drivers/sbus/char/display7seg.c13
-rw-r--r--drivers/sbus/char/envctrl.c12
-rw-r--r--drivers/sbus/char/flash.c12
-rw-r--r--drivers/sbus/char/uctrl.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c116
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h55
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h16
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c243
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c11
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c1084
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-nuc900.c3
-rw-r--r--drivers/ssb/driver_pcicore.c8
-rw-r--r--drivers/staging/comedi/comedi_fops.c96
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c7
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c1
-rw-r--r--drivers/staging/tidspbridge/core/dsp-clock.c15
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c4
-rw-r--r--drivers/staging/usbip/vhci_rx.c10
-rw-r--r--drivers/target/iscsi/iscsi_target.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c41
-rw-r--r--drivers/target/target_core_alua.c27
-rw-r--r--drivers/target/target_core_cdb.c20
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c30
-rw-r--r--drivers/target/target_core_file.c20
-rw-r--r--drivers/target/target_core_iblock.c16
-rw-r--r--drivers/target/target_core_pr.c240
-rw-r--r--drivers/target/target_core_pscsi.c28
-rw-r--r--drivers/target/target_core_rd.c258
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_transport.c260
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c3
-rw-r--r--drivers/usb/class/cdc-acm.c10
-rw-r--r--drivers/usb/gadget/amd5536udc.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c1
-rw-r--r--drivers/usb/gadget/f_serial.c4
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c3
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c75
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h10
-rw-r--r--drivers/usb/gadget/m66592-udc.c2
-rw-r--r--drivers/usb/gadget/net2280.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c4
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c3
-rw-r--r--drivers/usb/host/ehci-sched.c9
-rw-r--r--drivers/usb/host/whci/qset.c2
-rw-r--r--drivers/usb/host/xhci.c5
-rw-r--r--drivers/usb/musb/musb_core.c6
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c47
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/xen/swiotlb-xen.c4
269 files changed, 4674 insertions, 2333 deletions
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 127408069ca7..631b9477b99c 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -932,7 +932,8 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
@@ -986,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *time, char **buf,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
u64 record_id;
- struct cper_pstore_record *rcd = (struct cper_pstore_record *)
- (erst_info.buf - sizeof(*rcd));
+ struct cper_pstore_record *rcd;
+ size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
if (erst_disable)
return -ENODEV;
+ rcd = kmalloc(rcd_len, GFP_KERNEL);
+ if (!rcd) {
+ rc = -ENOMEM;
+ goto out;
+ }
skip:
rc = erst_get_record_id_next(&reader_pos, &record_id);
if (rc)
@@ -1004,22 +1011,27 @@ skip:
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- rc = -1;
+ rc = -EINVAL;
goto out;
}
- len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
- erst_info.bufsize);
+ len = erst_read(record_id, &rcd->hdr, rcd_len);
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < 0) {
- rc = -1;
+ else if (len < sizeof(*rcd)) {
+ rc = -EIO;
goto out;
}
if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
goto skip;
+ *buf = kmalloc(len, GFP_KERNEL);
+ if (*buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
@@ -1037,6 +1049,7 @@ skip:
time->tv_nsec = 0;
out:
+ kfree(rcd);
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d8b3d89db043..919daa7cd5b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
- /* Disable all device's runtime power management */
- pm_runtime_disable(dev);
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 8004ac30a7a8..587cce57adae 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h))
+ 0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h))
+ IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 68b205a9338f..1e888c9e85b3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
@@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65cc424359b0..148ab944378d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size);
static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
u32 snap_count = le32_to_cpu(ondisk->snap_count);
int ret = -ENOMEM;
+ if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+ return -ENXIO;
+ }
+
init_rwsem(&header->snap_rwsem);
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1356,32 +1356,6 @@ fail:
}
/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
- u64 snapid,
- const char *obj)
-{
- struct ceph_osd_req_op *ops;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
- if (ret < 0)
- return ret;
-
- ops[0].snap.snapid = snapid;
-
- ret = rbd_req_sync_op(dev, NULL,
- CEPH_NOSNAP,
- 0,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
-
- rbd_destroy_ops(ops);
-
- return ret;
-}
-
-/*
* Request sync osd read
*/
static int rbd_req_sync_exec(struct rbd_device *dev,
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
goto out_dh;
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ pr_warning("unrecognized header format"
+ " for image %s", rbd_dev->obj);
+ }
goto out_dh;
+ }
if (snap_count != header->total_snaps) {
snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
&dev_attr_create_snap.attr,
- &dev_attr_rollback_snap.attr,
NULL
};
@@ -2424,64 +2401,6 @@ err_unlock:
return ret;
}
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
- int ret;
- u64 snapid;
- u64 cur_ofs;
- char *seg_name = NULL;
- char *snap_name = kmalloc(count + 1, GFP_KERNEL);
- ret = -ENOMEM;
- if (!snap_name)
- return ret;
-
- /* parse snaps add command */
- snprintf(snap_name, count, "%s", buf);
- seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
- if (!seg_name)
- goto done;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
- if (ret < 0)
- goto done_unlock;
-
- dout("snapid=%lld\n", snapid);
-
- cur_ofs = 0;
- while (cur_ofs < rbd_dev->header.image_size) {
- cur_ofs += rbd_get_segment(&rbd_dev->header,
- rbd_dev->obj,
- cur_ofs, (u64)-1,
- seg_name, NULL);
- dout("seg_name=%s\n", seg_name);
-
- ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
- if (ret < 0)
- pr_warning("could not roll back obj %s err=%d\n",
- seg_name, ret);
- }
-
- ret = __rbd_update_snaps(rbd_dev);
- if (ret < 0)
- goto done_unlock;
-
- ret = count;
-
-done_unlock:
- mutex_unlock(&ctl_mutex);
-done:
- kfree(seg_name);
- kfree(snap_name);
-
- return ret;
-}
-
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index ae3e167e17ad..89ddab127e33 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -16,6 +16,8 @@
* handle GCR disks
*/
+#undef DEBUG
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -36,13 +38,11 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
#define MAX_FLOPPIES 2
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
enum swim_state {
idle,
locating,
@@ -177,7 +177,6 @@ struct swim3 {
struct floppy_state {
enum swim_state state;
- spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+ int index;
+ struct request *cur_req;
};
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
0, 0, 0, 0, 0, 0
};
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
- if (__blk_end_request(fd_req, err, nr_bytes))
- return true;
+ struct request *req = fs->cur_req;
+ int rc;
- fd_req = NULL;
- return false;
-}
+ swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
+ err, nr_bytes, req);
-static bool swim3_end_request_cur(int err)
-{
- return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+ if (err)
+ nr_bytes = blk_rq_cur_bytes(req);
+ rc = __blk_end_request(req, err, nr_bytes);
+ if (rc)
+ return true;
+ fs->cur_req = NULL;
+ return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void do_fd_request(struct request_queue * q)
-{
- int i;
-
- for(i=0; i<floppy_count; i++) {
- struct floppy_state *fs = &floppy_states[i];
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD)
- continue;
- start_request(fs);
- }
-}
-
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
+ swim3_dbg("start request, initial state=%d\n", fs->state);
+
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
- if (!fd_req) {
- fd_req = blk_fetch_request(swim3_queue);
- if (!fd_req)
+ swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+ if (!fs->cur_req) {
+ fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+ swim3_dbg(" fetched request %p\n", fs->cur_req);
+ if (!fs->cur_req)
break;
}
- req = fd_req;
-#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
- printk(" errors=%d current_nr_sectors=%u\n",
- req->errors, blk_rq_cur_sectors(req));
+ req = fs->cur_req;
+
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, -ENODEV, 0);
+ continue;
+ }
+
+#if 0 /* This is really too verbose */
+ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+ req->rq_disk->disk_name, req->cmd,
+ (long)blk_rq_pos(req), blk_rq_sectors(req),
+ req->buffer);
+ swim3_dbg(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
+ (long)blk_rq_pos(req), (long)fs->total_secs);
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " try to write, disk write protected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
}
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
- fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
}
}
+static void do_fd_request(struct request_queue * q)
+{
+ start_request(q->queuedata);
+}
+
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
- unsigned long flags;
-
- spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
- spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
+ struct request *req = fs->cur_req;
- if (blk_rq_cur_sectors(fd_req) <= 0) {
- printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+ if (blk_rq_cur_sectors(req) <= 0) {
+ swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > blk_rq_cur_sectors(fd_req))
- n = blk_rq_cur_sectors(fd_req);
+ if (n > blk_rq_cur_sectors(req))
+ n = blk_rq_cur_sectors(req);
}
+
+ swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
+ fs->req_sector, fs->secpertrack, fs->head, n);
+
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
- if (rq_data_dir(fd_req) == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
static void act(struct floppy_state *fs)
{
for (;;) {
+ swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+ fs->state, fs->req_cyl, fs->cur_cyl);
+
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
+ swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
break;
}
if (fs->req_cyl == fs->cur_cyl) {
- printk("whoops, seeking 0\n");
+ swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+ fs->req_cyl, fs->cur_cyl);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
return;
default:
- printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+ swim3_err("Unknown state %d\n", fs->state);
return;
}
}
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* scan timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* seek timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- printk(KERN_ERR "swim3: seek timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* settle timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
- return;
+ goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
- return;
+ goto unlock;
}
- printk(KERN_ERR "swim3: seek settle timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek settle timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ unlock:
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
+ unsigned long flags;
int n;
+ swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"),
- (long)blk_rq_pos(fd_req));
- swim3_end_request_cur(-EIO);
+ swim3_err("Timeout %sing sector %ld\n",
+ (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fs->cur_req));
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
+ unsigned long flags;
+ struct request *req = fs->cur_req;
+
+ swim3_dbg("* interrupt, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
- printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
+ swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
- printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+ swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
- printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
- fs->expect_cyl, fs->cur_cyl);
+ swim3_err("Expected cyl %d, got %d\n",
+ fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- blk_update_request(fd_req, 0, n << 9);
+ blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
- printk("swim3: error %sing block %ld (err=%x)\n",
- rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)blk_rq_pos(fd_req), err);
- swim3_end_request_cur(-EIO);
+ swim3_err("Error %sing block %ld (err=%x)\n",
+ rq_data_dir(req) == WRITE? "writ": "read",
+ (long)blk_rq_pos(req), err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
- printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
- printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
- swim3_end_request_cur(-EIO);
+ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+ swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
- if (swim3_end_request(0, fs->scount << 9)) {
+ fs->retries = 0;
+ if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
start_request(fs);
break;
default:
- printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+ swim3_err("Don't know what to do in state %d\n", fs->state);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
}
*/
+/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
- if (fs->state != idle) {
+ swim3_dbg("%s", "-> grab drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
+ if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
+ spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
- spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
+ spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
+
return 0;
}
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
+ swim3_dbg("%s", "-> release drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct floppy_state *fs = macio_get_drvdata(mdev);
+ struct swim3 __iomem *sw = fs->swim3;
+
+ if (!fs)
+ return;
+ if (mb_state != MB_FD)
+ return;
+
+ /* Clear state */
+ out_8(&sw->intr_enable, 0);
+ in_8(&sw->intr);
+ in_8(&sw->error);
+}
+
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
+ /* Do this first for message macros */
+ memset(fs, 0, sizeof(*fs));
+ fs->mdev = mdev;
+ fs->index = index;
+
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
- printk(KERN_WARNING "ifd%d: no address for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
- if (macio_irq_count(mdev) < 2) {
- printk(KERN_WARNING "fd%d: no intrs for device %s\n",
- index, swim->full_name);
+ if (macio_irq_count(mdev) < 1) {
+ swim3_err("%s", "No interrupt in device-tree\n");
+ return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
- printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
- printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
- memset(fs, 0, sizeof(*fs));
- spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
- printk("fd%d: couldn't map registers for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
- printk("fd%d: couldn't map DMA for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+ swim3_mb_event(mdev, MB_FD);
+
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
- printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
- index, fs->swim3_intr, swim->full_name);
+ swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
-/*
- if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
- printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
- fs->dma_intr);
- return -EBUSY;
- }
-*/
init_timer(&fs->timeout);
- printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+ swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
- int i, rc;
struct gendisk *disk;
+ int index, rc;
+
+ index = floppy_count++;
+ if (index >= MAX_FLOPPIES)
+ return -ENXIO;
/* Add the drive */
- rc = swim3_add_device(mdev, floppy_count);
+ rc = swim3_add_device(mdev, index);
if (rc)
return rc;
+ /* Now register that disk. Same comment about failure handling */
+ disk = disks[index] = alloc_disk(1);
+ if (disk == NULL)
+ return -ENOMEM;
+ disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (disk->queue == NULL) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ disk->queue->queuedata = &floppy_states[index];
- /* Now create the queue if not there yet */
- if (swim3_queue == NULL) {
+ if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
- swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (swim3_queue == NULL) {
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- return 0;
- }
}
- /* Now register that disk. Same comment about failure handling */
- i = floppy_count++;
- disk = disks[i] = alloc_disk(1);
- if (disk == NULL)
- return 0;
-
disk->major = FLOPPY_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = index;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[i];
- disk->queue = swim3_queue;
+ disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", i);
+ sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
.of_match_table = swim3_match,
},
.probe = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = swim3_mb_event,
+#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 11b41fd40c27..5ccf142ef0b8 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,7 +188,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 9ef48167e2cf..27b74b0d547b 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fe4ebc375b3d..eabc437ce500 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -777,9 +777,8 @@ skip_waking:
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 8370f72d87ff..b0a81173a268 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -457,7 +457,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *timespec, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct efivars *efivars = psi->data;
@@ -478,7 +479,11 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
timespec->tv_nsec = 0;
get_var_data_locked(efivars, &efivars->walk_entry->var);
size = efivars->walk_entry->var.DataSize;
- memcpy(psi->buf, efivars->walk_entry->var.Data, size);
+ *buf = kmalloc(size, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, efivars->walk_entry->var.Data,
+ size);
efivars->walk_entry = list_entry(efivars->walk_entry->list.next,
struct efivar_entry, list);
return size;
@@ -576,7 +581,8 @@ static int efi_pstore_close(struct pstore_info *psi)
}
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
- struct timespec *time, struct pstore_info *psi)
+ struct timespec *timespec,
+ char **buf, struct pstore_info *psi)
{
return -1;
}
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c811cb107904..2cce44a1d7d0 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
ibft_cleanup();
}
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
{
int rc = 0;
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
if (ibft_addr) {
- printk(KERN_INFO "iBFT detected at 0x%llx.\n",
- (u64)isa_virt_to_bus(ibft_addr));
+ pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index bfe723266fd8..4da4eb9ae926 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
static const struct {
char *sign;
} ibft_signs[] = {
-#ifdef CONFIG_ACPI
- /*
- * One spec says "IBFT", the other says "iBFT". We have to check
- * for both.
- */
- { ACPI_SIG_IBFT },
-#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
@@ -62,14 +55,6 @@ static const struct {
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
- ibft_addr = (struct acpi_table_ibft *)header;
- return 0;
-}
-#endif /* CONFIG_ACPI */
-
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
@@ -108,20 +94,12 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
-#ifdef CONFIG_ACPI
- int i;
-#endif
ibft_addr = NULL;
-#ifdef CONFIG_ACPI
- for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
- acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!ibft_addr && !efi_enabled)
+ if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
index f10fc521951b..1eedb6f7fdab 100644
--- a/drivers/firmware/sigma.c
+++ b/drivers/firmware/sigma.c
@@ -14,13 +14,34 @@
#include <linux/module.h>
#include <linux/sigma.h>
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+ size_t payload = 0;
+
+ switch (sa->instr) {
+ case SIGMA_ACTION_WRITEXBYTES:
+ case SIGMA_ACTION_WRITESINGLE:
+ case SIGMA_ACTION_WRITESAFELOAD:
+ payload = sigma_action_len(sa);
+ break;
+ default:
+ break;
+ }
+
+ payload = ALIGN(payload, 2);
+
+ return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
{
- struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
size_t len = sigma_action_len(sa);
- int ret = 0;
+ int ret;
pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
case SIGMA_ACTION_WRITEXBYTES:
case SIGMA_ACTION_WRITESINGLE:
case SIGMA_ACTION_WRITESAFELOAD:
- if (ssfw->fw->size < ssfw->pos + len)
- return -EINVAL;
ret = i2c_master_send(client, (void *)&sa->addr, len);
if (ret < 0)
return -EINVAL;
break;
-
case SIGMA_ACTION_DELAY:
- ret = 0;
udelay(len);
len = 0;
break;
-
case SIGMA_ACTION_END:
- return 1;
-
+ return 0;
default:
return -EINVAL;
}
- /* when arrive here ret=0 or sent data */
- ssfw->pos += sigma_action_size(sa, len);
- return ssfw->pos == ssfw->fw->size;
+ return 1;
}
static int
process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
{
- pr_debug("%s: processing %p\n", __func__, ssfw);
+ struct sigma_action *sa;
+ size_t size;
+ int ret;
+
+ while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+ sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+ size = sigma_action_size(sa);
+ ssfw->pos += size;
+ if (ssfw->pos > ssfw->fw->size || size == 0)
+ break;
+
+ ret = process_sigma_action(client, sa);
- while (1) {
- int ret = process_sigma_action(client, ssfw);
pr_debug("%s: action returned %i\n", __func__, ret);
- if (ret == 1)
- return 0;
- else if (ret)
+
+ if (ret <= 0)
return ret;
}
+
+ if (ssfw->pos != ssfw->fw->size)
+ return -EINVAL;
+
+ return 0;
}
int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
/* then verify the header */
ret = -EINVAL;
- if (fw->size < sizeof(*ssfw_head))
+
+ /*
+ * Reject too small or unreasonable large files. The upper limit has been
+ * chosen a bit arbitrarily, but it should be enough for all practical
+ * purposes and having the limit makes it easier to avoid integer
+ * overflows later in the loading process.
+ */
+ if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
goto done;
ssfw_head = (void *)fw->data;
if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
goto done;
- crc = crc32(0, fw->data, fw->size);
+ crc = crc32(0, fw->data + sizeof(*ssfw_head),
+ fw->size - sizeof(*ssfw_head));
pr_debug("%s: crc=%x\n", __func__, crc);
- if (crc != ssfw_head->crc)
+ if (crc != le32_to_cpu(ssfw_head->crc))
goto done;
ssfw.pos = sizeof(*ssfw_head);
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index dbcb0bcfd8da..4e018d6a7639 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 038f5eb8b13d..f8ce29ef9f88 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -22,7 +22,6 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
@@ -43,6 +42,9 @@
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
+#define DA9052_IRQ_GPI0 16
+#define DA9052_GPIO_ODD_SHIFT 7
+#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
- unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
- if (value) {
- register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
- }
} else {
- if (value) {
- register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
- }
}
}
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1;
- .ngpio = 16;
- .base = -1;
+ .can_sleep = 1,
+ .ngpio = 16,
+ .base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index ea8e73869250..461958fc2264 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
&chip->reg->regs[chip->ch].imask);
}
+static void ioh_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien &= ~(1 << (d->irq - chip->irq_base));
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien |= 1 << (d->irq - chip->irq_base);
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
int i, j;
int ret = IRQ_NONE;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
+ ct->chip.irq_disable = ioh_irq_disable;
+ ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index ec3fcf0a7e12..5cd04b65c556 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 28..31 are input only on MPC5121 */
+ if (gpio >= 28)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = mpc8xxx_gpio_dir_out;
- if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
- gc->get = mpc8572_gpio_get;
- else
- gc->get = mpc8xxx_gpio_get;
+ gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+ mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+ gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+ mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 093c90bd3c1d..4102f63230fd 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
int ret, irq, i;
static DECLARE_BITMAP(init_irq, NR_IRQS);
- pdata = dev->dev.platform_data;
- if (pdata == NULL)
- return -ENODEV;
-
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3969f7553fe7..d2619d72cece 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -456,6 +456,30 @@ done:
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- set->mode = NULL;
- set->num_connectors = 0;
+ return drm_crtc_helper_disable(set->crtc);
}
dev = set->crtc->dev;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc9..2bb07bca511a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
#include "drm.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
-static DEFINE_MUTEX(exynos_drm_buf_lock);
-
static int lowlevel_buffer_allocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
- (dma_addr_t *)&entry->paddr, GFP_KERNEL);
- if (!entry->paddr) {
+ buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
+ &buffer->dma_addr, GFP_KERNEL);
+ if (!buffer->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
return -ENOMEM;
}
- DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
- (unsigned int)entry->vaddr, entry->paddr, entry->size);
+ DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr,
+ buffer->size);
return 0;
}
static void lowlevel_buffer_deallocate(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (entry->paddr && entry->vaddr && entry->size)
- dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
- entry->paddr);
+ if (buffer->dma_addr && buffer->size)
+ dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
+ (dma_addr_t)buffer->dma_addr);
else
- DRM_DEBUG_KMS("entry data is null.\n");
+ DRM_DEBUG_KMS("buffer data are invalid.\n");
}
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s.\n", __FILE__);
+ DRM_DEBUG_KMS("desired size = 0x%x\n", size);
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
- DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
return ERR_PTR(-ENOMEM);
}
- entry->size = size;
+ buffer->size = size;
/*
* allocate memory region with size and set the memory information
- * to vaddr and paddr of a entry object.
+ * to vaddr and dma_addr of a buffer object.
*/
- if (lowlevel_buffer_allocate(dev, entry) < 0) {
- kfree(entry);
- entry = NULL;
+ if (lowlevel_buffer_allocate(dev, buffer) < 0) {
+ kfree(buffer);
+ buffer = NULL;
return ERR_PTR(-ENOMEM);
}
- return entry;
+ return buffer;
}
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry)
+ struct exynos_drm_gem_buf *buffer)
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- if (!entry) {
- DRM_DEBUG_KMS("entry is null.\n");
+ if (!buffer) {
+ DRM_DEBUG_KMS("buffer is null.\n");
return;
}
- lowlevel_buffer_deallocate(dev, entry);
+ lowlevel_buffer_deallocate(dev, buffer);
- kfree(entry);
- entry = NULL;
+ kfree(buffer);
+ buffer = NULL;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01a..6e91f9caa5db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
#ifndef _EXYNOS_DRM_BUF_H_
#define _EXYNOS_DRM_BUF_H_
-/*
- * exynos drm buffer entry structure.
- *
- * @paddr: physical address of allocated memory.
- * @vaddr: kernel virtual address of allocated memory.
- * @size: size of allocated memory.
- */
-struct exynos_drm_buf_entry {
- dma_addr_t paddr;
- void __iomem *vaddr;
- unsigned int size;
-};
-
/* allocate physical memory. */
-struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size);
-/* get physical memory information of a drm framebuffer. */
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+/* get memory information of a drm framebuffer. */
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
/* remove allocated physical memory. */
void exynos_drm_buf_destroy(struct drm_device *dev,
- struct exynos_drm_buf_entry *entry);
+ struct exynos_drm_gem_buf *buffer);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e768728..d620b0784257 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
struct exynos_drm_connector {
struct drm_connector drm_connector;
+ uint32_t encoder_id;
+ struct exynos_drm_manager *manager;
};
/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
DRM_DEBUG_KMS("%s\n", __FILE__);
mode->clock = timing->pixclock / 1000;
+ mode->vrefresh = timing->refresh;
mode->hdisplay = timing->xres;
mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
mode->vsync_start = mode->vdisplay + timing->upper_margin;
mode->vsync_end = mode->vsync_start + timing->vsync_len;
mode->vtotal = mode->vsync_end + timing->lower_margin;
+
+ if (timing->vmode & FB_VMODE_INTERLACED)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ if (timing->vmode & FB_VMODE_DOUBLE)
+ mode->flags |= DRM_MODE_FLAG_DBLSCAN;
}
/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
memset(timing, 0, sizeof(*timing));
timing->pixclock = mode->clock * 1000;
- timing->refresh = mode->vrefresh;
+ timing->refresh = drm_mode_vrefresh(mode);
timing->xres = mode->hdisplay;
timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
unsigned int count;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (!display) {
- DRM_DEBUG_KMS("display is null.\n");
+ if (!display_ops) {
+ DRM_DEBUG_KMS("display_ops is null.\n");
return 0;
}
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
* P.S. in case of lcd panel, count is always 1 if success
* because lcd panel has only one mode.
*/
- if (display->get_edid) {
+ if (display_ops->get_edid) {
int ret;
void *edid;
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- ret = display->get_edid(manager->dev, connector,
+ ret = display_ops->get_edid(manager->dev, connector,
edid, MAX_EDID);
if (ret < 0) {
DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = drm_mode_create(connector->dev);
struct fb_videomode *timing;
- if (display->get_timing)
- timing = display->get_timing(manager->dev);
+ if (display_ops->get_timing)
+ timing = display_ops->get_timing(manager->dev);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops = manager->display_ops;
struct fb_videomode timing;
int ret = MODE_BAD;
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
convert_to_video_timing(&timing, mode);
- if (display && display->check_timing)
- if (!display->check_timing(manager->dev, (void *)&timing))
+ if (display_ops && display_ops->check_timing)
+ if (!display_ops->check_timing(manager->dev, (void *)&timing))
ret = MODE_OK;
return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- return connector->encoder;
+ obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
+ exynos_connector->encoder_id);
+ return NULL;
+ }
+
+ encoder = obj_to_encoder(obj);
+
+ return encoder;
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
static enum drm_connector_status
exynos_drm_connector_detect(struct drm_connector *connector, bool force)
{
- struct exynos_drm_manager *manager =
- exynos_drm_get_manager(connector->encoder);
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_connector *exynos_connector =
+ to_exynos_connector(connector);
+ struct exynos_drm_manager *manager = exynos_connector->manager;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (display && display->is_connected) {
- if (display->is_connected(manager->dev))
+ if (display_ops && display_ops->is_connected) {
+ if (display_ops->is_connected(manager->dev))
status = connector_status_connected;
else
status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
connector = &exynos_connector->drm_connector;
- switch (manager->display->type) {
+ switch (manager->display_ops->type) {
case EXYNOS_DISPLAY_TYPE_HDMI:
type = DRM_MODE_CONNECTOR_HDMIA;
+ connector->interlace_allowed = true;
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
break;
default:
type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
if (err)
goto err_connector;
+ exynos_connector->encoder_id = encoder->base.id;
+ exynos_connector->manager = manager;
connector->encoder = encoder;
+
err = drm_mode_connector_attach_encoder(connector, encoder);
if (err) {
DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb6..ee43cc220853 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
exynos_drm_fn_encoder(crtc, overlay,
exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
-static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos)
{
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned int actual_w;
unsigned int actual_h;
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
- overlay->paddr = entry->paddr;
- overlay->vaddr = entry->vaddr;
+ overlay->dma_addr = buffer->dma_addr;
+ overlay->vaddr = buffer->kvaddr;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
(unsigned long)overlay->vaddr,
- (unsigned long)overlay->paddr);
+ (unsigned long)overlay->dma_addr);
actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- /* TODO */
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ exynos_drm_fn_encoder(crtc, NULL,
+ exynos_drm_encoder_crtc_disable);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* drm framework doesn't check NULL. */
+ exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
+ exynos_drm_encoder_crtc_commit);
}
static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2c..25f72a62cb88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ * - the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ * - the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+};
+
+int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+ struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c17..53e2216de61d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_crtc_helper.h"
#include <drm/exynos_drm.h>
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_init(dev);
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
exynos_drm_mode_config_init(dev);
/*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae72..5e02e6ecc2e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
#ifndef _EXYNOS_DRM_DRV_H_
#define _EXYNOS_DRM_DRV_H_
+#include <linux/module.h>
#include "drm.h"
#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
* @bpp: pixel size.(in bit)
- * @paddr: bus(accessed by dma) physical memory address to this overlay
- * and this is physically continuous.
+ * @dma_addr: bus(accessed by dma) address to the memory region allocated
+ * for a overlay.
* @vaddr: virtual memory addresss to this overlay.
* @default_win: a window to be enabled.
* @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
* @check_timing: check if timing is valid or not.
* @power_on: display device on or off.
*/
-struct exynos_drm_display {
+struct exynos_drm_display_ops {
enum exynos_drm_output_type type;
bool (*is_connected)(struct device *dev);
int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
* @commit: set current hw specific display mode to hw.
+ * @disable: disable hardware specific display mode.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
*/
struct exynos_drm_manager_ops {
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*commit)(struct device *subdrv_dev);
+ void (*disable)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
int pipe;
struct exynos_drm_manager_ops *ops;
struct exynos_drm_overlay_ops *overlay_ops;
- struct exynos_drm_display *display;
+ struct exynos_drm_display_ops *display_ops;
};
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67e..153061415baf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ if (manager_ops && manager_ops->commit)
+ manager_ops->commit(manager->dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ /* TODO */
+ if (manager_ops && manager_ops->disable)
+ manager_ops->disable(manager->dev);
+ break;
+ default:
+ DRM_ERROR("unspecified mode %d\n", mode);
+ break;
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
- struct exynos_drm_display *display = manager->display;
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
- if (display && display->power_on)
- display->power_on(manager->dev, mode);
+ DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+ connector->base.id, mode);
+ if (display_ops && display_ops->power_on)
+ display_ops->power_on(manager->dev, mode);
}
}
}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
{
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (manager_ops && manager_ops->commit)
manager_ops->commit(manager->dev);
-
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev);
}
static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
+ struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_manager *manager;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
+ /*
+ * if crtc is detached from encoder, check pipe,
+ * otherwise check crtc attached to encoder
+ */
+ if (!encoder->crtc) {
+ manager = to_exynos_encoder(encoder)->manager;
+ if (manager->pipe < 0 ||
+ private->crtc[manager->pipe] != crtc)
+ continue;
+ } else {
+ if (encoder->crtc != crtc)
+ continue;
+ }
fn(encoder, data);
}
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int crtc = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * when crtc is detached from encoder, this pipe is used
+ * to select manager operation
+ */
+ manager->pipe = crtc;
- overlay_ops->commit(manager->dev);
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev);
}
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
struct exynos_drm_overlay *overlay = data;
- overlay_ops->mode_set(manager->dev, overlay);
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
+}
+
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (overlay_ops && overlay_ops->disable)
+ overlay_ops->disable(manager->dev);
+
+ /*
+ * crtc is already detached from encoder and last
+ * function for detaching is properly done, so
+ * clear pipe from manager to prevent repeated call
+ */
+ if (!encoder->crtc)
+ manager->pipe = -1;
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a9..a22acfbf0e4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd5240..5bf4a1ac7f82 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_buf.h"
#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
*
* @fb: drm framebuffer obejct.
* @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @entry: pointer to exynos drm buffer entry object.
- * - containing only the information to physically continuous memory
- * region allocated at default framebuffer creation.
+ * @buffer: pointer to exynos_drm_gem_buffer object.
+ * - contain the memory information to memory region allocated
+ * at default framebuffer creation.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
* default framebuffer has no gem object so
* a buffer of the default framebuffer should be released at here.
*/
- if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
- exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+ if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
+ exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
kfree(exynos_fb);
exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
*/
if (!mode_cmd->handle) {
if (!file_priv) {
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
/*
* in case that file_priv is NULL, it allocates
* only buffer and this buffer would be used
* for default framebuffer.
*/
- entry = exynos_drm_buf_create(dev, size);
- if (IS_ERR(entry)) {
- ret = PTR_ERR(entry);
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
goto err_buffer;
}
- exynos_fb->entry = entry;
+ exynos_fb->buffer = buffer;
- DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
- (unsigned long)entry->paddr, size);
+ DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
+ (unsigned long)buffer->dma_addr, size);
goto out;
} else {
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
- size,
- &mode_cmd->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &mode_cmd->handle,
+ size);
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
* so that default framebuffer has no its own gem object,
* only its own buffer object.
*/
- exynos_fb->entry = exynos_gem_obj->entry;
+ exynos_fb->buffer = exynos_gem_obj->buffer;
- DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
- (unsigned long)exynos_fb->entry->paddr, size,
+ DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+ (unsigned long)exynos_fb->buffer->dma_addr, size,
(unsigned int)&exynos_gem_obj->base);
out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
return exynos_drm_fb_init(file_priv, dev, mode_cmd);
}
-struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s\n", __FILE__);
- entry = exynos_fb->entry;
- if (!entry)
+ buffer = exynos_fb->buffer;
+ if (!buffer)
return NULL;
- DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
- (unsigned long)entry->vaddr,
- (unsigned long)entry->paddr);
+ DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ (unsigned long)buffer->kvaddr,
+ (unsigned long)buffer->dma_addr);
- return entry;
+ return buffer;
+}
+
+static void exynos_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = private->fb_helper;
+
+ if (fb_helper)
+ drm_fb_helper_hotplug_event(fb_helper);
}
static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_drm_fb_create,
+ .output_poll_changed = exynos_drm_output_poll_changed,
};
void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a7713..836f41008187 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
};
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
- struct drm_framebuffer *fb,
- unsigned int fb_width,
- unsigned int fb_height)
+ struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
- struct exynos_drm_buf_entry *entry;
- unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+ struct exynos_drm_gem_buf *buffer;
+ unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
exynos_fb->fb = fb;
drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
- drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
- entry = exynos_drm_fb_get_buf(fb);
- if (!entry) {
- DRM_LOG_KMS("entry is null.\n");
+ buffer = exynos_drm_fb_get_buf(fb);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitch;
- dev->mode_config.fb_base = entry->paddr;
- fbi->screen_base = entry->vaddr + offset;
- fbi->fix.smem_start = entry->paddr + offset;
+ dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ fbi->screen_base = buffer->kvaddr + offset;
+ fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ ret = exynos_drm_fbdev_update(helper, helper->fb);
if (ret < 0)
fb_dealloc_cmap(&fbi->cmap);
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
}
helper->fb = exynos_fbdev->fb;
- return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
- sizes->fb_height);
+ return exynos_drm_fbdev_update(helper, helper->fb);
}
static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
fb_helper = private->fb_helper;
if (fb_helper) {
+ struct list_head temp_list;
+
+ INIT_LIST_HEAD(&temp_list);
+
+ /*
+ * fb_helper is reintialized but kernel fb is reused
+ * so kernel_fb_list need to be backuped and restored
+ */
+ if (!list_empty(&fb_helper->kernel_fb_list))
+ list_replace_init(&fb_helper->kernel_fb_list,
+ &temp_list);
+
drm_fb_helper_fini(fb_helper);
ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
return ret;
}
+ if (!list_empty(&temp_list))
+ list_replace(&temp_list, &fb_helper->kernel_fb_list);
+
ret = drm_fb_helper_single_add_all_connectors(fb_helper);
if (ret < 0) {
DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9b..db3b3d9e731d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
- dma_addr_t paddr;
+ dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
return 0;
}
-static struct exynos_drm_display fimd_display = {
+static struct exynos_drm_display_ops fimd_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.is_connected = fimd_display_is_connected,
.get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
writel(val, ctx->regs + VIDCON0);
}
+static void fimd_disable(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct drm_device *drm_dev = subdrv->drm_dev;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+ u32 val;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /* fimd dma off */
+ val = readl(ctx->regs + VIDCON0);
+ val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
+ writel(val, ctx->regs + VIDCON0);
+
+ /*
+ * if vblank is enabled status with dma off then
+ * it disables vsync interrupt.
+ */
+ if (drm_dev->vblank_enabled[manager->pipe] &&
+ atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
+ drm_vblank_put(drm_dev, manager->pipe);
+
+ /*
+ * if vblank_disable_allowed is 0 then disable
+ * vsync interrupt right now else the vsync interrupt
+ * would be disabled by drm timer once a current process
+ * gives up ownershop of vblank event.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, manager->pipe);
+ }
+}
+
static int fimd_enable_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
static struct exynos_drm_manager_ops fimd_manager_ops = {
.commit = fimd_commit,
+ .disable = fimd_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
- win_data->paddr = overlay->paddr + offset;
+ win_data->dma_addr = overlay->dma_addr + offset;
win_data->vaddr = overlay->vaddr + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->paddr,
+ (unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
writel(val, ctx->regs + SHADOWCON);
/* buffer start address */
- val = win_data->paddr;
+ val = (unsigned long)win_data->dma_addr;
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = win_data->paddr + size;
+ val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->paddr, val, size);
+ (unsigned long)win_data->dma_addr, val, size);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
static void fimd_win_disable(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data;
int win = ctx->default_win;
u32 val;
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
if (win < 0 || win > WINDOWS_NR)
return;
- win_data = &ctx->win_data[win];
-
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* VSYNC interrupt */
writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ /*
+ * in case that vblank_disable_allowed is 1, it could induce
+ * the problem that manager->pipe could be -1 because with
+ * disable callback, vsync interrupt isn't disabled and at this moment,
+ * vsync interrupt could occur. the vsync interrupt would be disabled
+ * by timer handler later.
+ */
+ if (manager->pipe == -1)
+ return IRQ_HANDLED;
+
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->irq_enabled = 1;
- /*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(drm_vblank_put function was called)
- */
- drm_dev->vblank_disable_allowed = 1;
-
return 0;
}
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
subdrv->manager.pipe = -1;
subdrv->manager.ops = &fimd_manager_ops;
subdrv->manager.overlay_ops = &fimd_overlay_ops;
- subdrv->manager.display = &fimd_display;
+ subdrv->manager.display_ops = &fimd_display_ops;
subdrv->manager.dev = dev;
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906ed..aba0fe47f7ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
}
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle)
+static struct exynos_drm_gem_obj
+ *exynos_drm_gem_init(struct drm_device *drm_dev,
+ struct drm_file *file_priv, unsigned int *handle,
+ unsigned int size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_buf_entry *entry;
struct drm_gem_object *obj;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- size = roundup(size, PAGE_SIZE);
-
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
DRM_ERROR("failed to allocate exynos gem object.\n");
return ERR_PTR(-ENOMEM);
}
- /* allocate the new buffer object and memory region. */
- entry = exynos_drm_buf_create(dev, size);
- if (!entry) {
- kfree(exynos_gem_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- exynos_gem_obj->entry = entry;
-
obj = &exynos_gem_obj->base;
- ret = drm_gem_object_init(dev, obj, size);
+ ret = drm_gem_object_init(drm_dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initailize gem object.\n");
- goto err_obj_init;
+ DRM_ERROR("failed to initialize gem object.\n");
+ ret = -EINVAL;
+ goto err_object_init;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
err_create_mmap_offset:
drm_gem_object_release(obj);
-err_obj_init:
- exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
-
+err_object_init:
kfree(exynos_gem_obj);
return ERR_PTR(ret);
}
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size)
+{
+
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+ struct exynos_drm_gem_buf *buffer;
+
+ size = roundup(size, PAGE_SIZE);
+
+ DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
+
+ buffer = exynos_drm_buf_create(dev, size);
+ if (IS_ERR(buffer)) {
+ return ERR_CAST(buffer);
+ }
+
+ exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
+ if (IS_ERR(exynos_gem_obj)) {
+ exynos_drm_buf_destroy(dev, buffer);
+ return exynos_gem_obj;
+ }
+
+ exynos_gem_obj->buffer = buffer;
+
+ return exynos_gem_obj;
+}
+
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
- struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
- DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
+ &args->handle, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
unsigned long pfn, vm_size;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vm_size = vma->vm_end - vma->vm_start;
/*
- * a entry contains information to physically continuous memory
+ * a buffer contains information to physically continuous memory
* allocated by user request or at framebuffer creation.
*/
- entry = exynos_gem_obj->entry;
+ buffer = exynos_gem_obj->buffer;
/* check if user-requested size is valid. */
- if (vm_size > entry->size)
+ if (vm_size > buffer->size)
return -EINVAL;
/*
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+ pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
exynos_gem_obj = to_exynos_gem_obj(gem_obj);
- exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+ exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
kfree(exynos_gem_obj);
}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * args->bpp >> 3;
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
- &args->handle);
+ exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
+ args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+ pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ PAGE_SHIFT) + page_offset;
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277b..ef8797334e6d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
struct exynos_drm_gem_obj, base)
/*
+ * exynos drm gem buffer structure.
+ *
+ * @kvaddr: kernel virtual address to allocated memory region.
+ * @dma_addr: bus address(accessed by dma) to allocated memory region.
+ * - this address could be physical address without IOMMU and
+ * device address with IOMMU.
+ * @size: size of allocated memory region.
+ */
+struct exynos_drm_gem_buf {
+ void __iomem *kvaddr;
+ dma_addr_t dma_addr;
+ unsigned long size;
+};
+
+/*
* exynos drm buffer structure.
*
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @entry: pointer to exynos drm buffer entry object.
- * - containing the information to physically
+ * @buffer: a pointer to exynos_drm_gem_buffer object.
+ * - contain the information to memory region allocated
+ * by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
*
@@ -45,13 +61,13 @@
*/
struct exynos_drm_gem_obj {
struct drm_gem_object base;
- struct exynos_drm_buf_entry *entry;
+ struct exynos_drm_gem_buf *buffer;
};
/* create a new buffer and get a new gem handle. */
-struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int size,
- unsigned int *handle);
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ unsigned int *handle, unsigned long size);
/*
* request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d09a6e02dc95..004b048c5192 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9533c54c93c..a9ae374861e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
diff1 = now - dev_priv->last_time1;
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
+ dev_priv->chipset_power = ret;
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 15bfa9145d2b..a1103fc6597d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: false)");
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: true)");
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ udelay(10);
+}
+
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
- __gen6_gt_force_wake_get(dev_priv);
+ dev_priv->display.force_wake_get(dev_priv);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ POSTING_READ(FORCEWAKE_MT);
+}
+
/*
* see gen6_gt_force_wake_get()
*/
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_put(dev_priv);
+ dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a9c1b979804..554bef7a3b9c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -107,6 +107,7 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
+struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
u64 last_count1;
unsigned long last_time1;
+ unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3693e83a97f3..c681dc149d2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6)
+ return !intel_iommu_enabled;
+
+ return 1;
+}
+
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b080cc824001..a26d5b0a3690 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3303,10 +3303,10 @@
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3447,8 +3447,30 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e77a863a3833..d809b038ca88 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -38,8 +38,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
-
#include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
+ * @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp)
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
+ drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
- ret = drm_vblank_get(dev, intel_crtc->pipe);
- if (ret)
- goto cleanup_objs;
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
kfree(work);
return ret;
@@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Enable rc6 on Sandybridge if DMA remapping is disabled
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
+ intel_iommu_enabled ? "true" : "false",
+ !intel_iommu_enabled ? "en" : "dis");
+ return !intel_iommu_enabled;
+ }
+ DRM_DEBUG_DRIVER("RC6 enabled\n");
+ return 1;
+}
+
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (i915_enable_rc6)
+ if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
@@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!i915_enable_rc6)
+ if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4d0358fad937..92b041b66e49 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
- if (intel_crtc)
+ if (check_bpp)
+ bpp = check_bpp;
+ else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- if (intel_dp_link_required(intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes))
- return MODE_CLOCK_HIGH;
+ mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(intel_dp,
+ mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+ mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+ }
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev))
- aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
/*
- * There are three kinds of DP registers:
+ * There are four kinds of DP registers:
*
* IBX PCH
- * CPU
+ * SNB CPU
+ * IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Split out the IBX/CPU vs CPT settings */
- if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
}
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
- int voltage_max;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
p = this_p;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- voltage_max = I830_DP_VOLTAGE_MAX_CPT;
- else
- voltage_max = I830_DP_VOLTAGE_MAX;
+ voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bd9a604b73da..a1b4343814e8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,7 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 42f165a520de..e44191132ac4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21f60b7d69a3..04d79fd1dc9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (IS_PINEVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
- } else {
+ else
max >>= 16;
- if (INTEL_INFO(dev)->gen < 4)
- max &= ~1;
- }
if (is_backlight_combination_mode(dev))
max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (IS_PINEVIEW(dev))
+ if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
- if (IS_PINEVIEW(dev)) {
- tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
- } else
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 3003fb25aefd..f7b9268df266 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -50,6 +50,7 @@
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
return status;
}
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ddbabefb4273..b12fd2c80812 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct nouveau_bo *bo;
+ int ret;
+
+ args->pitch = roundup(args->width * (args->bpp / 8), 256);
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(bo->gem);
+ return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *poffset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gem) {
+ struct nouveau_bo *bo = gem->driver_private;
+ *poffset = bo->bo.addr_space_offset;
+ drm_gem_object_unreference_unlocked(gem);
+ return 0;
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9f7bb1295262..9791d13c9e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
+ .dumb_create = nouveau_display_dumb_create,
+ .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_destroy = nouveau_display_dumb_destroy,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098b..4c0be3a4ed88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t handle);
/* nv10_gpio.c */
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540aee..960c0ae0c0c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size - base);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe44..c8a463b76c89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
+ nvbe->unmap_pages = false;
}
+
+ nvbe->pages = NULL;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d62..06de250fe617 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index a74e501afd25..ecfafd70cf0e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
+ nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
/*
* TP ROP UNKVAL(magic_not_rop_nr)
* 450: 4/0/0/0 2 3
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d77..cb006a718e70 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
continue;
if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->or) {
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
return;
break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87631fede1f8..2b97262e3ab1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0: /* 1KB rows */
+ default:
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+ break;
+ case 1: /* 2KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+ break;
+ case 2: /* 4KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+ break;
+ }
+
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
+ } else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1d603a3335db..5e00d1670aa9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 38e1bda73d33..cd4590aae154 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
u32 npipes;
+ u32 row_size;
/* value we track */
u32 nsamples;
u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
struct radeon_bo *db_s_write_bo;
};
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+ switch (row_size) {
+ case 1:
+ default:
+ return ADDR_SURF_TILE_SPLIT_1KB;
+ case 2:
+ return ADDR_SURF_TILE_SPLIT_2KB;
+ case 4:
+ return ADDR_SURF_TILE_SPLIT_4KB;
+ }
+}
+
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
}
}
break;
@@ -618,13 +656,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR8_INFO:
@@ -640,13 +673,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
break;
case CB_COLOR0_DIM:
case CB_COLOR1_DIM:
@@ -1318,10 +1356,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (!p->keep_tiling_flags) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx+1+(i*8)+6] |=
+ TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx+1+(i*8)+7] |=
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
}
texture = reloc->robj;
/* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
+ u32 tmp;
int r;
if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- track->npipes = p->rdev->config.evergreen.tiling_npipes;
- track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
- track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
p->track = track;
}
do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c3451..7d7f2155e34c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d9..e00039e59a75 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea49901..bfc08f6320f8 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
- while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7f..3516a6081dcf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
acpi_handle handle;
int ret;
- /* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
- return 0;
-
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a920..4b27efa4405b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return true;
+ return radeon_encoder->encoder_id;
default:
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
}
-
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f65..b1053d640423 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab89..23ae1c60ab3d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1f..5ff561d4e0b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 880e285d7578..37d40545ed77 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1809,7 +1809,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
- rects = kzalloc(rects_size, GFP_KERNEL);
+ rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
@@ -1824,10 +1825,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
}
for (i = 0; i < arg->num_outputs; ++i) {
- if (rects->x < 0 ||
- rects->y < 0 ||
- rects->x + rects->w > mode_config->max_width ||
- rects->y + rects->h > mode_config->max_height) {
+ if (rects[i].x < 0 ||
+ rects[i].y < 0 ||
+ rects[i].x + rects[i].w > mode_config->max_width ||
+ rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 848a56c0279c..af353842f75f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 06ce996b8b65..4a441a6f9967 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -266,7 +266,7 @@
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 318e38e85376..5d760f3d21c2 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -160,7 +160,6 @@ MODULE_DEVICE_TABLE(spi, ad7314_id);
static struct spi_driver ad7314_driver = {
.driver = {
.name = "ad7314",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7314_probe,
diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c
index 52319340e182..04450f8bf5da 100644
--- a/drivers/hwmon/ads7871.c
+++ b/drivers/hwmon/ads7871.c
@@ -227,7 +227,6 @@ static int __devexit ads7871_remove(struct spi_device *spi)
static struct spi_driver ads7871_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index faa0884f61f6..f2359a0093bd 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -506,17 +506,7 @@ static struct platform_driver exynos4_tmu_driver = {
.resume = exynos4_tmu_resume,
};
-static int __init exynos4_tmu_driver_init(void)
-{
- return platform_driver_register(&exynos4_tmu_driver);
-}
-module_init(exynos4_tmu_driver_init);
-
-static void __exit exynos4_tmu_driver_exit(void)
-{
- platform_driver_unregister(&exynos4_tmu_driver);
-}
-module_exit(exynos4_tmu_driver_exit);
+module_platform_driver(exynos4_tmu_driver);
MODULE_DESCRIPTION("EXYNOS4 TMU Driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 89aa9fb743af..9ba38f318ffb 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,18 +539,7 @@ static struct platform_driver gpio_fan_driver = {
},
};
-static int __init gpio_fan_init(void)
-{
- return platform_driver_register(&gpio_fan_driver);
-}
-
-static void __exit gpio_fan_exit(void)
-{
- platform_driver_unregister(&gpio_fan_driver);
-}
-
-module_init(gpio_fan_init);
-module_exit(gpio_fan_exit);
+module_platform_driver(gpio_fan_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("GPIO FAN driver");
diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c
index fea292d43407..5253d23361d9 100644
--- a/drivers/hwmon/jz4740-hwmon.c
+++ b/drivers/hwmon/jz4740-hwmon.c
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
- unsigned long t;
+ long t;
unsigned long val;
int ret;
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {
@@ -212,17 +212,7 @@ struct platform_driver jz4740_hwmon_driver = {
},
};
-static int __init jz4740_hwmon_init(void)
-{
- return platform_driver_register(&jz4740_hwmon_driver);
-}
-module_init(jz4740_hwmon_init);
-
-static void __exit jz4740_hwmon_exit(void)
-{
- platform_driver_unregister(&jz4740_hwmon_driver);
-}
-module_exit(jz4740_hwmon_exit);
+module_platform_driver(jz4740_hwmon_driver);
MODULE_DESCRIPTION("JZ4740 SoC HWMON driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index eab11615dced..9b382ec2c3bd 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -432,19 +432,7 @@ static struct platform_driver ntc_thermistor_driver = {
.id_table = ntc_thermistor_id,
};
-static int __init ntc_thermistor_init(void)
-{
- return platform_driver_register(&ntc_thermistor_driver);
-}
-
-module_init(ntc_thermistor_init);
-
-static void __exit ntc_thermistor_cleanup(void)
-{
- platform_driver_unregister(&ntc_thermistor_driver);
-}
-
-module_exit(ntc_thermistor_cleanup);
+module_platform_driver(ntc_thermistor_driver);
MODULE_DESCRIPTION("NTC Thermistor Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index b39f52e2752a..f6c26d19f521 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -393,18 +393,7 @@ static struct platform_driver s3c_hwmon_driver = {
.remove = __devexit_p(s3c_hwmon_remove),
};
-static int __init s3c_hwmon_init(void)
-{
- return platform_driver_register(&s3c_hwmon_driver);
-}
-
-static void __exit s3c_hwmon_exit(void)
-{
- platform_driver_unregister(&s3c_hwmon_driver);
-}
-
-module_init(s3c_hwmon_init);
-module_exit(s3c_hwmon_exit);
+module_platform_driver(s3c_hwmon_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C ADC HWMon driver");
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index e3b5c6039c25..79b6dabe3161 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -590,19 +590,8 @@ static struct platform_driver sch5627_driver = {
.remove = sch5627_remove,
};
-static int __init sch5627_init(void)
-{
- return platform_driver_register(&sch5627_driver);
-}
-
-static void __exit sch5627_exit(void)
-{
- platform_driver_unregister(&sch5627_driver);
-}
+module_platform_driver(sch5627_driver);
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5627_init);
-module_exit(sch5627_exit);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 244407aa79fc..9d5236fb09b4 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -521,19 +521,8 @@ static struct platform_driver sch5636_driver = {
.remove = sch5636_remove,
};
-static int __init sch5636_init(void)
-{
- return platform_driver_register(&sch5636_driver);
-}
-
-static void __exit sch5636_exit(void)
-{
- platform_driver_unregister(&sch5636_driver);
-}
+module_platform_driver(sch5636_driver);
MODULE_DESCRIPTION("SMSC SCH5636 Hardware Monitoring Driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
-
-module_init(sch5636_init);
-module_exit(sch5636_exit);
diff --git a/drivers/hwmon/twl4030-madc-hwmon.c b/drivers/hwmon/twl4030-madc-hwmon.c
index 57240740b161..0018c7dd0097 100644
--- a/drivers/hwmon/twl4030-madc-hwmon.c
+++ b/drivers/hwmon/twl4030-madc-hwmon.c
@@ -136,19 +136,7 @@ static struct platform_driver twl4030_madc_hwmon_driver = {
},
};
-static int __init twl4030_madc_hwmon_init(void)
-{
- return platform_driver_register(&twl4030_madc_hwmon_driver);
-}
-
-module_init(twl4030_madc_hwmon_init);
-
-static void __exit twl4030_madc_hwmon_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_hwmon_driver);
-}
-
-module_exit(twl4030_madc_hwmon_exit);
+module_platform_driver(twl4030_madc_hwmon_driver);
MODULE_DESCRIPTION("TWL4030 ADC Hwmon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c
index 3cd07bf42dca..b9a87e89bab4 100644
--- a/drivers/hwmon/ultra45_env.c
+++ b/drivers/hwmon/ultra45_env.c
@@ -309,15 +309,4 @@ static struct platform_driver env_driver = {
.remove = __devexit_p(env_remove),
};
-static int __init env_init(void)
-{
- return platform_driver_register(&env_driver);
-}
-
-static void __exit env_exit(void)
-{
- platform_driver_unregister(&env_driver);
-}
-
-module_init(env_init);
-module_exit(env_exit);
+module_platform_driver(env_driver);
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 97b1f834a471..9b598ed26020 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -209,17 +209,7 @@ static struct platform_driver wm831x_hwmon_driver = {
},
};
-static int __init wm831x_hwmon_init(void)
-{
- return platform_driver_register(&wm831x_hwmon_driver);
-}
-module_init(wm831x_hwmon_init);
-
-static void __exit wm831x_hwmon_exit(void)
-{
- platform_driver_unregister(&wm831x_hwmon_driver);
-}
-module_exit(wm831x_hwmon_exit);
+module_platform_driver(wm831x_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x Hardware Monitoring");
diff --git a/drivers/hwmon/wm8350-hwmon.c b/drivers/hwmon/wm8350-hwmon.c
index 13290595ca86..3ff67edbdc44 100644
--- a/drivers/hwmon/wm8350-hwmon.c
+++ b/drivers/hwmon/wm8350-hwmon.c
@@ -133,17 +133,7 @@ static struct platform_driver wm8350_hwmon_driver = {
},
};
-static int __init wm8350_hwmon_init(void)
-{
- return platform_driver_register(&wm8350_hwmon_driver);
-}
-module_init(wm8350_hwmon_init);
-
-static void __exit wm8350_hwmon_exit(void)
-{
- platform_driver_unregister(&wm8350_hwmon_driver);
-}
-module_exit(wm8350_hwmon_exit);
+module_platform_driver(wm8350_hwmon_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM8350 Hardware Monitoring");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 8cebef49aeac..18936ac9d51c 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
+ ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+ KBUILD_MODNAME, adap_info);
+ if (ret) {
+ pch_pci_err(pdev, "request_irq FAILED\n");
+ goto err_request_irq;
+ }
+
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
+ pch_i2c_init(&adap_info->pch_data[i]);
ret = i2c_add_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
- goto err_i2c_add_adapter;
+ goto err_add_adapter;
}
-
- pch_i2c_init(&adap_info->pch_data[i]);
- }
- ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
- KBUILD_MODNAME, adap_info);
- if (ret) {
- pch_pci_err(pdev, "request_irq FAILED\n");
- goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
-err_i2c_add_adapter:
+err_add_adapter:
for (j = 0; j < i; j++)
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+ free_irq(pdev->irq, adap_info);
+err_request_irq:
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 835e47b39bc2..03b615778887 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index a43d0023446a..fa23faa20f0e 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
- if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
- dev->fifo_size = 0;
+
+ dev->fifo_size = (dev->fifo_size / 2);
+
+ if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
dev->b_hw = 0; /* Disable hardware fixes */
- } else {
- dev->fifo_size = (dev->fifo_size / 2);
+ else
dev->b_hw = 1; /* Enable hardware fixes */
- }
+
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 2754cef86a06..4c1718081685 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* first, try busy waiting briefly */
do {
+ cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
- return -EINVAL;
+ return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 691276bafd78..e9cf51b1343b 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -216,7 +216,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
+ rcu_read_lock();
neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
+ rcu_read_unlock();
ret = -ENODATA;
if (neigh)
goto release;
@@ -274,15 +276,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
if (neigh)
neigh_event_send(neigh, NULL);
ret = -ENODATA;
- goto put;
+ } else {
+ ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
}
-
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
+ rcu_read_unlock();
put:
dst_release(dst);
return ret;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 75ff821c0af0..d0d4aa9f4802 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.private_data_len = sizeof(struct cma_hdr) +
conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!req.private_data)
return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv->id.ps);
req.private_data_len = offset + conn_param->private_data_len;
+ if (req.private_data_len < conn_param->private_data_len)
+ return -EINVAL;
+
private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!private_data)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index de6d0774e609..c88b12beef25 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1375,8 +1375,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
l2t = t3_l2t_get(tdev, neigh, neigh->dev);
+ rcu_read_unlock();
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1946,10 +1948,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b36cdac9c558..0747004313ad 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -542,8 +542,10 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
mpa->private_data_size = htons(ep->plen);
mpa->revision = mpa_rev_to_use;
- if (mpa_rev_to_use == 1)
+ if (mpa_rev_to_use == 1) {
ep->tried_with_mpa_v1 = 1;
+ ep->retry_with_mpa_v1 = 0;
+ }
if (mpa_rev_to_use == 2) {
mpa->private_data_size +=
@@ -1594,6 +1596,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(dst);
if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, peer_ip);
@@ -1620,6 +1623,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
rss_qid = dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(neigh->dev) * step];
}
+ rcu_read_unlock();
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1820,6 +1824,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
@@ -1856,6 +1861,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
cxgb4_port_idx(neigh->dev) * step];
}
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
@@ -2301,6 +2307,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
+ rcu_read_lock();
neigh = dst_get_neighbour(ep->dst);
/* get a l2t entry */
@@ -2339,6 +2346,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->retry_with_mpa_v1 = 0;
ep->tried_with_mpa_v1 = 0;
}
+ rcu_read_unlock();
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index f35a935267e7..0f1607c8325a 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -311,7 +311,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr];
if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
- (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq))
+ (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
(*count)++;
if (++ptr == cq->size)
ptr = 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77f3dbc0aaa1..18836cdf1e10 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1244,7 +1244,8 @@ err_reg:
err_counter:
for (; i; --i)
- mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+ if (ibdev->counters[i - 1] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->uar_map);
@@ -1275,7 +1276,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
- mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+ if (ibdev->counters[p] != -1)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index dfce9ea98a39..0a52d72371ee 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1377,9 +1377,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
neigh_release(neigh);
}
- if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
+ if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
+ rcu_read_lock();
neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
-
+ rcu_read_unlock();
+ }
ip_rt_put(rt);
return rc;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 574600ef5b42..a7403248d83d 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
- dd->freectxts++;
+ dd->freectxts--;
ret = 0;
goto bail;
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
- dd->freectxts--;
+ dd->freectxts++;
}
mutex_unlock(&qib_mutex);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bd2162b95dc..1d5895941e19 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2307,19 +2307,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
SYM_LSB(IBCCtrlA_0, MaxPktLen);
ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
- /* initially come up waiting for TS1, without sending anything. */
- val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
- QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
-
- ppd->cpspec->ibcctrl_a = val;
/*
* Reset the PCS interface to the serdes (and also ibc, which is still
* in reset from above). Writes new value of ibcctrl_a as last step.
*/
qib_7322_mini_pcs_reset(ppd);
- qib_write_kreg(dd, kr_scratch, 0ULL);
- /* clear the linkinit cmds */
- ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
if (!ppd->cpspec->ibcctrl_b) {
unsigned lse = ppd->link_speed_enabled;
@@ -2385,6 +2377,14 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
set_vls(ppd);
+ /* initially come up DISABLED, without sending anything. */
+ val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
+ QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
+ qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
+ qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* clear the linkinit cmds */
+ ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
+
/* be paranoid against later code motion, etc. */
spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
@@ -5241,7 +5241,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
qd->t_insert = get_jiffies_64();
- schedule_work(&qd->work);
+ queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
if (__qib_sdma_running(ppd))
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c
index e06c4ed383f1..fa71b1e666c5 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.c
+++ b/drivers/infiniband/hw/qib/qib_qsfp.c
@@ -480,18 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
- /* Spec says module can take up to two seconds! */
- mask = QSFP_GPIO_MOD_PRS_N;
- if (qd->ppd->hw_pidx)
- mask <<= QSFP_GPIO_PORT2_SHIFT;
-
- /* Do not try to wait here. Better to let event handle it */
- if (!qib_qsfp_mod_present(qd->ppd))
- goto bail;
- /* We see a module, but it may be unwise to look yet. Just schedule */
- qd->t_insert = get_jiffies_64();
- queue_work(ib_wq, &qd->work);
-bail:
return;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 0ef9af94997d..4115be54ba3b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -57,21 +57,24 @@ struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
struct ipoib_ah *ah;
+ struct ib_ah *vah;
ah = kmalloc(sizeof *ah, GFP_KERNEL);
if (!ah)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
- ah->ah = ib_create_ah(pd, attr);
- if (IS_ERR(ah->ah)) {
+ vah = ib_create_ah(pd, attr);
+ if (IS_ERR(vah)) {
kfree(ah);
- ah = NULL;
- } else
+ ah = (struct ipoib_ah *)vah;
+ } else {
+ ah->ah = vah;
ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
+ }
return ah;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7567b6000230..83695b48b010 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -432,7 +432,7 @@ static void path_rec_completion(int status,
spin_lock_irqsave(&priv->lock, flags);
- if (ah) {
+ if (!IS_ERR_OR_NULL(ah)) {
path->pathrec = *pathrec;
old_ah = path->ah;
@@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
return 0;
}
+/* called with rcu_read_lock */
static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -636,6 +637,7 @@ err_drop:
spin_unlock_irqrestore(&priv->lock, flags);
}
+/* called with rcu_read_lock */
static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
@@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct neighbour *n = NULL;
unsigned long flags;
+ rcu_read_lock();
if (likely(skb_dst(skb)))
n = dst_get_neighbour(skb_dst(skb));
if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ goto unlock;
}
neigh = *to_ipoib_neigh(n);
@@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_neigh_free(dev, neigh);
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_path_lookup(skb, dev);
- return NETDEV_TX_OK;
+ goto unlock;
}
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- return NETDEV_TX_OK;
+ goto unlock;
}
} else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
- return NETDEV_TX_OK;
+ goto unlock;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
@@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
phdr->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
- return NETDEV_TX_OK;
+ goto unlock;
}
unicast_arp_send(skb, dev, phdr);
}
}
-
+unlock:
+ rcu_read_unlock();
return NETDEV_TX_OK;
}
@@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
dst = skb_dst(skb);
n = NULL;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_raw(dst);
if ((!dst || !n) && daddr) {
struct ipoib_pseudoheader *phdr =
(struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 1b7a97686356..873bff97e69e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -240,8 +240,11 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
av.grh.dgid = mcast->mcmember.mgid;
ah = ipoib_create_ah(dev, priv->pd, &av);
- if (!ah) {
- ipoib_warn(priv, "ib_address_create failed\n");
+ if (IS_ERR(ah)) {
+ ipoib_warn(priv, "ib_address_create failed %ld\n",
+ -PTR_ERR(ah));
+ /* use original error */
+ return PTR_ERR(ah);
} else {
spin_lock_irq(&priv->lock);
mcast->ah = ah;
@@ -266,7 +269,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
skb->dev = dev;
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_raw(dst);
if (!dst || !n) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof (struct ipoib_pseudoheader));
@@ -722,6 +725,8 @@ out:
if (mcast && mcast->ah) {
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n = NULL;
+
+ rcu_read_lock();
if (dst)
n = dst_get_neighbour(dst);
if (n && !*to_ipoib_neigh(n)) {
@@ -734,7 +739,7 @@ out:
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
-
+ rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index 80793f1608eb..06517e60e50c 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
{
struct cma3000_accl_data *data = dev_id;
- int datax, datay, dataz;
- u8 ctrl, mode, range, intr_status;
+ int datax, datay, dataz, intr_status;
+ u8 ctrl, mode, range;
intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
if (intr_status < 0)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index c080b828e5dc..a6dcd18e9adf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -24,6 +24,7 @@
*/
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
do {
psmouse_reset(psmouse);
+ if (retry) {
+ /*
+ * On some boxes, right after resuming, the touchpad
+ * needs some time to finish initializing (I assume
+ * it needs time to calibrate) and start responding
+ * to Synaptics-specific queries, so let's wait a
+ * bit.
+ */
+ ssleep(1);
+ }
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index da0d8761e778..2ee47d01a3b4 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+ { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
+ 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE6) },
+ { USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d1c17934d66f..31053a951c34 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -406,6 +406,9 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@@ -3519,7 +3522,7 @@ found:
return 0;
}
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3642,6 +3645,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
+ intel_iommu_enabled = 1;
+
return 0;
}
diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c
index 07c9f189f314..6777ca049471 100644
--- a/drivers/iommu/intr_remapping.c
+++ b/drivers/iommu/intr_remapping.c
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
return ir_supported;
}
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
{
if (!intr_remapping_enabled)
return 0;
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 33ec9e467772..9021182c4b76 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
case IIOCDOCFINT:
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
return (-EINVAL); /* invalid driver */
+ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+ sizeof(dioctl.cf_ctrl.msn))
+ return -EINVAL;
+ if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+ sizeof(dioctl.cf_ctrl.fwd_nr))
+ return -EINVAL;
if ((i = cf_command(dioctl.cf_ctrl.drvid,
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
dioctl.cf_ctrl.cfproc,
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 1f73d7f7e024..2339d7396b9e 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
char *c,
*e;
+ if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+ sizeof(cfg->drvid))
+ return -EINVAL;
drvidx = -1;
chidx = -1;
strcpy(drvid, cfg->drvid);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 7878712721bf..b6907118283a 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
*/
int i;
+ spin_lock_irq(&bitmap->lock);
for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
+ spin_unlock_irq(&bitmap->lock);
}
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1605,7 +1607,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
+ spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
+ spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 84acfe7d10e4..ee981737edfc 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del(&mddev->all_mddevs);
+ list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
- rdev->badblocks.unacked_exist) {
+ (rdev->badblocks.unacked_exist
+ && !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
else {
+ if (mddev->hold_active == UNTIL_IOCTL)
+ mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
- if (mddev->hold_active == UNTIL_IOCTL)
- mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
@@ -7840,6 +7858,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 297e26092178..31670f8d6b65 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (dev->written)
s->written++;
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
if (rdev) {
is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
@@ -3063,12 +3065,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (!test_bit(Faulty, &rdev->flags)) {
+ else {
/* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
- if (test_bit(R5_WriteError, &dev->flags)) {
+ if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
@@ -3076,7 +3078,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
atomic_inc(&rdev->nr_pending);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a1cb21f95302..1e0e27cbe987 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 5278ffb20e74..950b97d7412a 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
data->timeout_clks = 0;
}
}
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 300ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 300000000;
+ data->timeout_clks = 0;
+ }
+
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
mmc_host_clk_release(host);
}
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int timeout;
+ unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+ int err = 0;
+
+ card = host->card;
+
+ /*
+ * Send power notify command only if card
+ * is mmc and notify state is powered ON
+ */
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+}
+
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
- struct mmc_card *card;
- unsigned int notify_type;
- unsigned int timeout;
- int err;
-
mmc_host_clk_hold(host);
- card = host->card;
host->ios.clock = 0;
host->ios.vdd = 0;
- if (card && mmc_card_mmc(card) &&
- (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
- if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
- notify_type = EXT_CSD_POWER_OFF_SHORT;
- timeout = card->ext_csd.generic_cmd6_time;
- card->poweroff_notify_state = MMC_POWEROFF_SHORT;
- } else {
- notify_type = EXT_CSD_POWER_OFF_LONG;
- timeout = card->ext_csd.power_off_longtime;
- card->poweroff_notify_state = MMC_POWEROFF_LONG;
- }
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
-
- if (err && err != -EBADMSG)
- pr_err("Device failed to respond within %d poweroff "
- "time. Forcefully powering down the device\n",
- timeout);
-
- /* Set the card state to no notification after the poweroff */
- card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
- }
+ mmc_poweroff_notify(host);
/*
* Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
mmc_bus_get(host);
- if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
err = host->bus_ops->sleep(host);
mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
* pre-claim the host.
*/
if (mmc_try_claim_host(host)) {
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ /*
+ * For eMMC 4.5 device send notify command
+ * before sleep, because in sleep state eMMC 4.5
+ * devices respond to only RESET and AWAKE cmd
+ */
+ mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
+ }
+ mmc_do_release_host(host);
+
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
host->pm_flags = 0;
err = 0;
}
- mmc_do_release_host(host);
} else {
err = -EBUSY;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index dbf421a6279c..d240427c1246 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* set the notification byte in the ext_csd register of device
*/
if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
- (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+ (card->ext_csd.rev >= 6)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
- }
- if (!err)
- card->poweroff_notify_state = MMC_POWERED_ON;
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+ }
/*
* Activate high speed (if supported)
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 325ea61e12d3..8e0fbe994047 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
"failed to config DMA channel. Falling back to PIO\n");
dma_release_channel(host->dma);
host->do_dma = 0;
+ host->dma = NULL;
}
}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 101cd31c8220..d5fe43d53c51 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
+ host->data->host_cookie = 0;
}
host->data = NULL;
}
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
struct mmc_data *data = mrq->data;
if (host->use_dma) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 4b920b7621cf..87b6f079b6e0 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <linux/module.h>
#include <mach/cns3xxx.h>
#include "sdhci-pltfm.h"
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 3d00e722efc9..cb60c4197e0a 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -644,8 +644,6 @@ static int sdhci_s3c_resume(struct platform_device *dev)
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
- .suspend = sdhci_s3c_suspend,
- .resume = sdhci_s3c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 369366c8e205..d5505f3fe2a1 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->power) {
pm_runtime_put(&host->pd->dev);
host->power = false;
- if (p->down_pwr)
+ if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
p->down_pwr(host->pd);
}
host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index d85a60cda167..4208b3958069 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr)
+ if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
host->set_pwr(host->pdev, 0);
if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
pdata->power) {
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a73d9dc80ff6..84fb6349a59a 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
menuconfig ARCNET
depends on NETDEVICES && (ISA || PCI || PCMCIA)
- bool "ARCnet support"
+ tristate "ARCnet support"
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b0c577256487..7f8756825b8a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2553,30 +2553,6 @@ re_arm:
}
}
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
- struct in_device *idev;
- struct in_ifaddr *ifa;
- __be32 addr = 0;
-
- if (!dev)
- return 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- goto out;
-
- ifa = idev->ifa_list;
- if (!ifa)
- goto out;
-
- addr = ifa->ifa_local;
-out:
- rcu_read_unlock();
- return addr;
-}
-
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct vlan_entry *vlan;
@@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
struct bonding *bond;
struct vlan_entry *vlan;
+ /* we only care about primary address */
+ if(ifa->ifa_flags & IFA_F_SECONDARY)
+ return NOTIFY_DONE;
+
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {
@@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
bond->master_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- bond->master_ip = bond_glean_dev_ip(bond->dev);
+ bond->master_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
@@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
vlan->vlan_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- vlan->vlan_ip =
- bond_glean_dev_ip(vlan_dev);
+ vlan->vlan_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 905bce0b3a43..2c7f5036f570 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4cf835dbc122..3fb66d09ece5 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index bce203fa4b9e..882f48f0a03c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
return 0;
}
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= 0xff00;
+
+ DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
/*****************************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index fc7bd0f23c0b..e58073ef33b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 438f4580bf66..2a22f5256353 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
- else if (dm->wake_state & !opts)
+ else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index c520cfd3b298..5272f9d4dda9 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -24,6 +24,7 @@ config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS)
+ default ARCH_MXC || ARCH_MXS if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 1124ce0a1594..c136230d50bb 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -232,6 +232,7 @@ struct fec_enet_private {
struct platform_device *pdev;
int opened;
+ int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
@@ -837,7 +838,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
}
/* ------------------------------------------------------------------------- */
@@ -953,7 +954,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
- int dev_id = fep->pdev->id;
+ int dev_id = fep->dev_id;
fep->phy_dev = NULL;
@@ -1031,7 +1032,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
fep->mii_bus = fec0_mii_bus;
return 0;
@@ -1063,7 +1064,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1521,6 +1522,7 @@ fec_probe(struct platform_device *pdev)
int i, irq, ret = 0;
struct resource *r;
const struct of_device_id *of_id;
+ static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1548,6 +1550,7 @@ fec_probe(struct platform_device *pdev)
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
+ fep->dev_id = dev_id++;
if (!fep->hwp) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 52f4e8ad48e7..4d9f84b8ab97 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
- int i;
-
- for (i = PHY_MAX_ADDR; i > 0; i--) {
- u32 phy_id;
-
- if (get_phy_id(new_bus, i, &phy_id))
- return -1;
-
- if (phy_id == 0xffffffff)
- break;
- }
-
- return i;
-}
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
- } else
- return NULL;
-}
+ }
#endif
+ return NULL;
+}
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
return err;
else
return -EINVAL;
-}
+#else
+ return -ENODEV;
#endif
-
+}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else {
err = -ENODEV;
goto err_free_irqs;
@@ -386,16 +359,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
}
if (tbiaddr == -1) {
- out_be32(tbipa, 0);
-
- tbiaddr = fsl_pq_mdio_find_free(new_bus);
- }
-
- /*
- * We define TBIPA at 0 to be illegal, opting to fail for boards that
- * have PHYs at 1-31, rather than change tbipa and rescan.
- */
- if (tbiaddr == 0) {
err = -EBUSY;
goto err_free_irqs;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index 410d6a1984ed..6650068c996c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -61,9 +61,9 @@
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
#define EHEA_DEF_ENTRIES_SQ 1023
-#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ1 1023
#define EHEA_DEF_ENTRIES_RQ2 1023
-#define EHEA_DEF_ENTRIES_RQ3 1023
+#define EHEA_DEF_ENTRIES_RQ3 511
#else
#define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 4080
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 37b70f7052b6..bfeccbfde236 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
out_herr:
free_page((unsigned long)cb2);
resched:
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
}
mutex_unlock(&port->port_lock);
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
return ret;
}
diff --git a/drivers/net/ethernet/ibm/iseries_veth.c b/drivers/net/ethernet/ibm/iseries_veth.c
index 4326681df382..acc31af6594a 100644
--- a/drivers/net/ethernet/ibm/iseries_veth.c
+++ b/drivers/net/ethernet/ibm/iseries_veth.c
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
/* FIXME: do we need this? */
memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+ memset(remote_list, 0, sizeof(remote_list));
/* a 0 address marks the end of the valid entries */
if (senddata->addr[startchunk] == 0)
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7becff1f387d..76b84573566b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1745,6 +1745,112 @@ jme_phy_off(struct jme_adapter *jme)
}
static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+ phy_data);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+ u32 ctrl1000, phy_data;
+
+ jme_phy_off(jme);
+ jme_phy_on(jme);
+ /* Enabel PHY test mode 1 */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ ctrl1000 |= PHY_GAD_TEST_MODE_1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+ phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+ JM_PHY_EXT_COMM_2_CALI_ENABLE;
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+ msleep(20);
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+ JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+ JM_PHY_EXT_COMM_2_CALI_LATCH);
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+ /* Disable PHY test mode */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+ return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+ u32 phy_comm0 = 0, phy_comm1 = 0;
+ u8 nic_ctrl;
+
+ pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+ if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+ return 0;
+
+ switch (jme->pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMC250:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ break;
+ case PCI_DEVICE_ID_JMICRON_JMC260:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+ phy_comm0 = 0x608A;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+ phy_comm0 = 0x408A;
+ break;
+ default:
+ return -ENODEV;
+ }
+ if (phy_comm0)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+ if (phy_comm1)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+ return 0;
+}
+
+static int
jme_open(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_reset_link(jme);
return 0;
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 02ea27c1dcb5..4304072bd3c5 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
RXMCS_CHECKSUM,
};
+/* Extern PHY common register 2 */
+
+#define PHY_GAD_TEST_MODE_1 0x00002000
+#define PHY_GAD_TEST_MODE_MSK 0x0000E000
+#define JM_PHY_SPEC_REG_READ 0x00004000
+#define JM_PHY_SPEC_REG_WRITE 0x00008000
+#define PHY_CALIBRATION_DELAY 20
+#define JM_PHY_SPEC_ADDR_REG 0x1E
+#define JM_PHY_SPEC_DATA_REG 0x1F
+
+#define JM_PHY_EXT_COMM_0_REG 0x30
+#define JM_PHY_EXT_COMM_1_REG 0x31
+#define JM_PHY_EXT_COMM_2_REG 0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
+#define PCI_PRIV_SHARE_NICCTRL 0xF5
+#define JME_FLAG_PHYEA_ENABLE 0x2
+
/*
* Wakeup Frame setup interface registers
*/
diff --git a/drivers/net/ethernet/pasemi/Makefile b/drivers/net/ethernet/pasemi/Makefile
index 05db5434bafc..90497ffb1ac3 100644
--- a/drivers/net/ethernet/pasemi/Makefile
+++ b/drivers/net/ethernet/pasemi/Makefile
@@ -2,4 +2,5 @@
# Makefile for the A Semi network device drivers.
#
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 8731f79c9efc..b8478aab050e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -58,10 +58,8 @@
#define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
#else /* all other page sizes */
#define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
struct ob_mac_iocb_req *queue_entry;
u32 index;
struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 1];
+ struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
};
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6f06aa10f0d7..67bf07819992 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
- RTL_W16(IntrMask, 0x0000);
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrStatus, 0xffff);
+ RTL_W16(IntrMask, 0x0000);
+ RTL_W16(IntrStatus, tp->intr_event);
+ RTL_R8(ChipCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -3933,8 +3935,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
break;
udelay(100);
}
-
- rtl8169_init_ring_indexes(tp);
}
static int __devinit
@@ -4339,7 +4339,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
/* Disable interrupts */
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
rtl_rx_close(tp);
@@ -4885,8 +4885,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
@@ -5076,6 +5075,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+ tp->intr_event &= ~RxFIFOOver;
+ tp->napi_event &= ~RxFIFOOver;
+ }
+
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5346,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
/* Wait for any pending NAPI task to complete */
napi_disable(&tp->napi);
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
tp->intr_mask = 0xffff;
RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5393,16 @@ static void rtl8169_reset_task(struct work_struct *work)
if (!netif_running(dev))
goto out_unlock;
+ rtl8169_hw_reset(tp);
+
rtl8169_wait_for_quiescence(dev);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
- rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5413,6 @@ out_unlock:
static void rtl8169_tx_timeout(struct net_device *dev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_hw_reset(tp);
-
- /* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -5804,6 +5805,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
*/
status = RTL_R16(IntrStatus);
while (status && status != 0xffff) {
+ status &= tp->intr_event;
+ if (!status)
+ break;
+
handled = 1;
/* Handle all of the error cases first. These will reset
@@ -5818,27 +5823,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
switch (tp->mac_version) {
/* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
goto done;
- /* Testers needed. */
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- /* Experimental science. Pktgen proof. */
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_25:
- if (status == RxFIFOOver)
- goto done;
- break;
default:
break;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8ea770a89f25..72cd190b9c1a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -781,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- /* Do not manage MMC IRQ (FIXME) */
+ /* Mask MMC irq, counters are managed in SW and registers
+ * are cleared on each READ eventually. */
dwmac_mmc_intr_all_mask(priv->ioaddr);
- dwmac_mmc_ctrl(priv->ioaddr, mode);
- memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ pr_info(" No MAC Management Counters available");
}
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -1012,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- if (priv->dma_cap.rmon)
- stmmac_mmc_setup(priv);
+ stmmac_mmc_setup(priv);
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 10826d8a2a2d..1187a1169eb2 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
goto done;
/* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
/* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
info->napi_enabled = true;
/* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
}
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
for (i = 0; i < sh->nr_frags; i++) {
skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(f->page);
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
/* FIXME: Compute "hash_for_home" properly. */
/* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
/* FIXME: Hmmm. */
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(f->page));
+ BUG_ON(PageHighMem(skb_frag_page(f)));
finv_buffer_remote(va, f->size, 0);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index bb88e12101c7..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig PHYLIB
- bool "PHY Device support and infrastructure"
+ tristate "PHY Device support and infrastructure"
depends on !S390
depends on NETDEVICES
help
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 89f829f5f725..f8a6853b692e 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po)) {
- release_sock(sk);
+ if (add_chan(po))
error = -EBUSY;
- }
release_sock(sk);
return error;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 93fbe6f40898..d2348a5a7809 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_start_ani(common);
}
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index e12b48c2cff6..dd008b0e6417 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index c511c98a89a8..f55fb2d1af52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.no_idle_support = true,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 58a381c01c89..a7a6def40d05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return 0;
}
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx)
+{
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+}
+
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct iwl_priv *priv = hw->priv;
@@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
+ /* if HT40 is used, it should not change
+ * after associated except channel switch */
+ if (iwl_is_associated_ctx(ctx) &&
+ !ctx->ht.is_40mhz)
+ iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index ed6283623932..4b2aa1da0953 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
if (sta)
addr = sta->addr;
else /* station mode case only */
@@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
seq.tkip.iv32, p1k, CMD_SYNC);
break;
case WLAN_CIPHER_SUITE_CCMP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index ccba69b7f8a7..bacc06c95e7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ break;
+ }
+
/*
* We could program these keys into the hardware as well, but we
* don't expect much multicast traffic in IBSS and having keys
@@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
/* Configure HT40 channels */
ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
+ if (ctx->ht.enabled)
+ iwlagn_config_ht40(conf, ctx);
+ else
ctx->ht.is_40mhz = false;
if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -3499,9 +3498,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer (default: 0 [enabled])");
+ "Disable stuck queue watchdog timer 0=system default, "
+ "1=disable, 2=enable (default: 0)");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 5b936ec1a541..3856abaea507 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx);
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 001fdf140abb..fcf54160e4ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
{
unsigned int timeout = priv->cfg->base_params->wd_timeout;
- if (timeout && !iwlagn_mod_params.wd_disable)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
+ if (!iwlagn_mod_params.wd_disable) {
+ /* use system default */
+ if (timeout && !priv->cfg->base_params->wd_disable)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ } else {
+ /* module parameter overwrite default configuration */
+ if (timeout && iwlagn_mod_params.wd_disable == 2)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ }
}
/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 137da3380704..f2fc288f3dd3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
*/
struct iwl_base_params {
int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
const bool shadow_reg_enable;
const bool no_idle_support;
const bool hd_v2;
+ const bool wd_disable;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 1f7a93c67c45..14eaf37ce3b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
* @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true
* @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +141,7 @@ struct iwl_mod_params {
int restart_fw;
bool plcp_check;
bool ack_check;
- bool wd_disable;
+ int wd_disable;
bool bt_coex_active;
int led_mode;
bool no_sleep_autoadjust;
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index f18df82eeb92..78d0d6988553 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
WARN_ON(priv->fw_state != FW_STATE_READY);
- cancel_work_sync(&priv->work);
-
p54spi_power_off(priv);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);
+
+ cancel_work_sync(&priv->work);
}
static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
init_completion(&priv->fw_comp);
INIT_LIST_HEAD(&priv->tx_pending);
mutex_init(&priv->mutex);
+ spin_lock_init(&priv->tx_lock);
SET_IEEE80211_DEV(hw, &spi->dev);
priv->common.open = p54spi_op_start;
priv->common.stop = p54spi_op_stop;
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index d97a2caf582b..bc2ba80c47bb 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
dwrq->flags = 0;
dwrq->length = 0;
}
- essid->octets[essid->length] = '\0';
+ essid->octets[dwrq->length] = '\0';
memcpy(extra, essid->octets, dwrq->length);
kfree(essid);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 3f183a15186e..1ba079dffb11 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
/* Apparently the data is read from end to start */
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
/* The returned value is in CPU order, but eeprom is le */
- rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index db5262844543..55c8e50f45fd 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
/*Leave the leisure power save mode.*/
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
}
/* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 592a10ac5929..3b585aadabfc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
index 72852900df84..e49cf2244c75 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 3ac7af1c5509..0883349e1c83 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index f27171af979c..f10ac1ad9087 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 0cb594c86090..15e332d08c8d 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
@@ -1668,7 +1668,7 @@ static int __init netback_init(void)
"netback/%u", group);
if (IS_ERR(netbk->task)) {
- printk(KERN_ALERT "kthread_run() fails at netback\n");
+ printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 791270b8bd1c..0f0cfa3bca30 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -26,11 +26,6 @@
#include <linux/string.h>
#include <linux/slab.h>
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
struct of_irq oirq;
if (of_irq_map_one(dev, index, &oirq))
- return NO_IRQ;
+ return 0;
return irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
/* Only dereference the resource if both the
* resource and the irq are valid. */
- if (r && irq != NO_IRQ) {
+ if (r && irq) {
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
{
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+ while (of_irq_to_resource(dev, nr, NULL))
nr++;
return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+ if (!of_irq_to_resource(dev, i, res))
break;
return i;
@@ -424,6 +419,8 @@ void __init of_irq_init(const struct of_device_id *matches)
desc->dev = np;
desc->interrupt_parent = of_irq_find_parent(np);
+ if (desc->interrupt_parent == np)
+ desc->interrupt_parent = NULL;
list_add_tail(&desc->list, &intc_desc_list);
}
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index dccd8636095c..f8c752e408a6 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
return err;
}
+static int timer_mode;
+
static int __init oprofile_init(void)
{
int err;
+ /* always init architecture to setup backtrace support */
err = oprofile_arch_init(&oprofile_ops);
- if (err < 0 || timer) {
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+ timer_mode = err || timer; /* fall back to timer mode on errors */
+ if (timer_mode) {
+ if (!err)
+ oprofile_arch_exit();
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
- return oprofilefs_register();
+
+ err = oprofilefs_register();
+ if (!err)
+ return 0;
+
+ /* failed */
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
+
+ return err;
}
static void __exit oprofile_exit(void)
{
- oprofile_timer_exit();
oprofilefs_unregister();
- oprofile_arch_exit();
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
}
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 3ef44624f510..878fba126582 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
ops->start = oprofile_hrtimer_start;
ops->stop = oprofile_hrtimer_stop;
ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
return 0;
}
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 7ec56fb0bd78..b0dd08e6a9da 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/pci-ats.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include "pci.h"
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index fce1c54a0c8d..9ddf69e3bbef 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
+ pdev = pbus->self;
+ if (pdev && pci_is_pcie(pdev)) {
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (tmp) {
+ struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+ if (root && (root->osc_control_set &
+ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return AE_OK;
+ }
+ }
+
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
if (pdev) {
- pdev->current_state = PCI_D0;
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
pci_dev_put(pdev);
}
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
{
acpi_status status;
unsigned long long tmp;
- struct acpi_pci_root *root;
acpi_handle dummy_handle;
- /*
- * We shouldn't use this bridge if PCIe native hotplug control has been
- * granted by the BIOS for it.
- */
- root = acpi_pci_find_root(handle);
- if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
- return -ENODEV;
-
/* if the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
static acpi_status
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- struct acpi_pci_root *root;
int *count = (int *)context;
if (!acpi_is_root_bridge(handle))
return AE_OK;
- root = acpi_pci_find_root(handle);
- if (!root)
- return AE_OK;
-
- if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
- return AE_OK;
-
(*count)++;
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
handle_hotplug_event_bridge, NULL);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b82c155d7b37..1969a3ee3058 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
struct resource *res;
struct pci_dev *pdev;
struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
if (!nr_virtfn)
return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
nres = 0;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
res = dev->resource + PCI_IOV_RESOURCES + i;
if (res->parent)
nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return -ENOMEM;
}
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
if (iov->link != dev->devfn) {
pdev = pci_get_slot(dev->bus, iov->link);
if (!pdev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6f45a73c6e9f..6d4a5319148d 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
error = platform_pci_set_power_state(dev, state);
if (!error)
pci_update_current_state(dev, state);
+ /* Fall back to PCI_D0 if native PM is not supported */
+ if (!dev->pm_cap)
+ dev->current_state = PCI_D0;
} else {
error = -ENODEV;
/* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
if (atomic_add_return(1, &dev->enable_cnt) > 1)
return 0; /* already enabled */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 13ef8c37471d..dcdc1f4a4624 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
int illumination_supported:1;
int video_supported:1;
int fan_supported:1;
+ int system_event_supported:1;
struct mutex mutex;
};
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
u32 hci_result;
u32 value;
- if (!dev->key_event_valid) {
+ if (!dev->key_event_valid && dev->system_event_supported) {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
/* enable event fifo */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ dev->system_event_supported = 1;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
u32 hci_result, value;
+ int retries = 3;
- if (event != 0x80)
+ if (!dev->system_event_supported || event != 0x80)
return;
+
do {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ switch (hci_result) {
+ case HCI_SUCCESS:
if (value == 0x100)
continue;
/* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
pr_info("Unknown key %x\n",
value);
}
- } else if (hci_result == HCI_NOT_SUPPORTED) {
+ break;
+ case HCI_NOT_SUPPORTED:
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
}
- } while (hci_result != HCI_EMPTY);
+ } while (retries && hci_result != HCI_EMPTY);
}
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index cffcb7c00b00..01fa671ec97f 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK 0x86
+
#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
batt_exception = 1;
- } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
- batt_exception = 1;
} else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
batt_exception = 1;
} else {
pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+ /* PMIC will change charging current automatically */
+ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+ }
}
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index cf3f9997546d..10451a15e828 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
{
- return 1; /* always round timer functions to one nanosecond */
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 5225930a10cd..691b1ab1a3d0 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
&priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
- memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
priv->idb_base, (unsigned long long)priv->idb_dma);
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
*/
/* Allocate space for DMA descriptors */
- bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].bd_phys = bd_phys;
priv->bdma[chnum].bd_base = bd_ptr;
- memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
priv->bdma[chnum].sts_base = sts_ptr;
priv->bdma[chnum].sts_size = sts_size;
- memset(sts_ptr, 0, sts_size);
-
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
priv->omsg_ring[mbox].sts_size *
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
goto out_desc;
}
- memset(priv->omsg_ring[mbox].sts_base, 0,
- entries * sizeof(struct tsi721_dma_sts));
-
/*
* Configure Outbound Messaging Engine
*/
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
INIT_LIST_HEAD(&mport->dbells);
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
strcpy(mport->name, "Tsi721 mport");
/* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i;
+ int i, cap;
int err;
u32 regval;
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
}
- /* Clear "no snoop" and "relaxed ordering" bits. */
- pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
- regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
- pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+ cap = pci_pcie_cap(pdev);
+ BUG_ON(cap == 0);
+
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+ regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+ /* Adjust PCIe completion timeout. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+ regval &= ~(0x0f);
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 58be4deb1402..822e54c394d5 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -72,6 +72,8 @@
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
+#define MAX_READ_REQUEST_SZ_SHIFT 12
+
/*
* Event Management Registers
*/
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 5abeb3ac3e8d..298c6c6a2795 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -160,7 +160,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
break;
}
- if (!ri)
+ if (i == ARRAY_SIZE(aat2870_regulators))
return NULL;
ri->enable_addr = AAT2870_LDO_EN;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 669d02160221..938398f3e869 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2799,8 +2799,8 @@ void regulator_unregister(struct regulator_dev *rdev)
list_del(&rdev->list);
if (rdev->supply)
regulator_put(rdev->supply);
- device_unregister(&rdev->dev);
kfree(rdev->constraints);
+ device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index ee8747f4fa08..11cc308d66e9 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -71,6 +71,7 @@ struct twlreg_info {
#define VREG_TYPE 1
#define VREG_REMAP 2
#define VREG_DEDICATED 3 /* LDO control */
+#define VREG_VOLTAGE_SMPS_4030 9
/* TWL6030 register offsets */
#define VREG_TRANS 1
#define VREG_STATE 2
@@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
.get_status = twl4030reg_get_status,
};
+static int
+twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+ unsigned *selector)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
+
+ twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
+ vsel);
+ return 0;
+}
+
+static int twl4030smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
+ VREG_VOLTAGE_SMPS_4030);
+
+ return vsel * 12500 + 600000;
+}
+
+static struct regulator_ops twl4030smps_ops = {
+ .set_voltage = twl4030smps_set_voltage,
+ .get_voltage = twl4030smps_get_voltage,
+};
+
static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
}, \
}
+#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
+ { \
+ .base = offset, \
+ .id = num, \
+ .delay = turnon_delay, \
+ .remap = remap_conf, \
+ .desc = { \
+ .name = #label, \
+ .id = TWL4030_REG_##label, \
+ .ops = &twl4030smps_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ }, \
+ }
+
#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
.base = offset, \
.min_mV = min_mVolts, \
@@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
- TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
+ TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e8326f26fa2f..dc4c2748bbc3 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
*/
delta = timespec_sub(old_system, old_rtc);
delta_delta = timespec_sub(delta, old_delta);
- if (abs(delta_delta.tv_sec) >= 2) {
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
rtc_tm_to_time(&tm, &new_rtc.tv_sec);
new_rtc.tv_nsec = 0;
- if (new_rtc.tv_sec <= old_rtc.tv_sec) {
- if (new_rtc.tv_sec < old_rtc.tv_sec)
- pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
sleep_time = timespec_sub(sleep_time,
timespec_sub(new_system, old_system));
- timekeeping_inject_sleeptime(&sleep_time);
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e286259a007..fa4d9f324189 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -319,6 +319,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ return err;
+}
+
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
@@ -342,14 +356,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
- return err;
+ return ___rtc_set_alarm(rtc, alarm);
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -763,6 +770,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+
+ __rtc_read_time(rtc, &tm);
+
+ alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+ ktime_set(300, 0)));
+ alarm.enabled = 0;
+
+ ___rtc_set_alarm(rtc, &alarm);
+}
+
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
@@ -784,8 +805,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
struct rtc_wkalrm alarm;
int err;
next = timerqueue_getnext(&rtc->timerqueue);
- if (!next)
+ if (!next) {
+ rtc_alarm_disable(rtc);
return;
+ }
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +870,8 @@ again:
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
goto again;
- }
+ } else
+ rtc_alarm_disable(rtc);
mutex_unlock(&rtc->ops_lock);
}
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7639ab906f02..5b979d9cc332 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 75c3f1f8fd43..a84631a7391d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
- struct chp_link link;
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
/* Try to update the channel path descritor. */
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
+ __s390_vary_chpid_on, &chpid);
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 155a82bcb9e5..4a1ff5c2eb88 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -68,8 +68,13 @@ struct schib {
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 92d7324acb1c..21908e67bf67 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(cio_work_q, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d734f4a0ecac..47269858ecb6 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
*/
cdev->private->flags.resuming = 1;
cdev->private->path_new_mask = LPM_ANYPATH;
- css_schedule_eval(sch->schid);
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
spin_unlock_irq(sch->lock);
- css_complete_work();
+ css_wait_for_slow_path();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 52c233fa2b12..1b853513c891 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
cdev->private->pgid_reset_mask = 0;
}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -520,12 +538,8 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f98698d5735e..ec7fb6d3b479 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 2ebb492a5c17..76253dfcc1be 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -111,6 +111,9 @@ enum cdev_todo {
CDEV_TODO_UNREG_EVAL,
};
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int fake_irb:2; /* deliver faked irb */
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec94f049e995..96bbe9d12a79 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
rc = ap_init_queue(ap_dev->qid);
if (rc == -ENODEV)
ap_dev->unregistered = 1;
+ else
+ __ap_schedule_poll_timer();
}
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 11f07f888223..b79576b64f45 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ /* if previous slave_alloc returned early, there is nothing to do */
+ if (!zfcp_sdev->port)
+ return;
+
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
put_device(&zfcp_sdev->port->dev);
}
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index 5f94d22c491e..542668292900 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_writeb(client, *buf, off);
-
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_writeb(client, *buf, off);
+ if (ret < 0)
break;
- }
-
len--;
buf++;
off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_readb(client, buf, off);
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_readb(client, buf, off);
+ if (ret < 0)
break;
- }
len--;
buf++;
off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
.remove = __devexit_p(bbc_i2c_remove),
};
-static int __init bbc_i2c_init(void)
-{
- return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
- platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 965a1fccd66a..4b9939726c34 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
.remove = __devexit_p(d7s_remove),
};
-static int __init d7s_init(void)
-{
- return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
- platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index be7b4e56154f..339fd6f65eda 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
.remove = __devexit_p(envctrl_remove),
};
-static int __init envctrl_init(void)
-{
- return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
- platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
-module_init(envctrl_init);
-module_exit(envctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 73dd4e7afaaa..826157f38694 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
.remove = __devexit_p(flash_remove),
};
-static int __init flash_init(void)
-{
- return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
- platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
-module_init(flash_init);
-module_exit(flash_cleanup);
MODULE_LICENSE("GPL");
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index ebce9639a26a..0b31658ccde5 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
};
-static int __init uctrl_init(void)
-{
- return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
- platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
-module_init(uctrl_init);
-module_exit(uctrl_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index dba72a4e6a1c..1ad0b8225560 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
spin_lock(&session->lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
- if (!task) {
+ if (!task || !task->sc) {
spin_unlock(&session->lock);
return -EINVAL;
}
sc = task->sc;
- spin_unlock(&session->lock);
if (!blk_rq_cpu_valid(sc->request))
cpu = smp_processor_id();
else
cpu = sc->request->cpu;
+ spin_unlock(&session->lock);
+
p = &per_cpu(bnx2i_percpu, cpu);
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cefbe44bb84a..8d67467dd9ce 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,8 @@
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
unsigned int);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
static bool fcoe_match(struct net_device *netdev);
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+ .notifier_call = fcoe_dcb_app_notification,
+};
+
static struct scsi_transport_template *fcoe_nport_scsi_transport;
static struct scsi_transport_template *fcoe_vport_scsi_transport;
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_reset_network_header(skb);
skb->mac_len = elen;
skb->protocol = htons(ETH_P_FCOE);
+ skb->priority = port->priority;
+
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ put_cpu();
return -EINVAL;
}
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
*/
static void fcoe_dev_setup(void)
{
+ register_dcbevent_notifier(&dcb_notifier);
register_netdevice_notifier(&fcoe_notifier);
}
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
*/
static void fcoe_dev_cleanup(void)
{
+ unregister_dcbevent_notifier(&dcb_notifier);
unregister_netdevice_notifier(&fcoe_notifier);
}
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+ struct fcoe_interface *fcoe;
+ struct net_device *real_dev;
+
+ list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+ if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+ real_dev = vlan_dev_real_dev(fcoe->netdev);
+ else
+ real_dev = fcoe->netdev;
+
+ if (netdev == real_dev)
+ return fcoe;
+ }
+ return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct dcb_app_type *entry = ptr;
+ struct fcoe_interface *fcoe;
+ struct net_device *netdev;
+ struct fcoe_port *port;
+ int prio;
+
+ if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+ return NOTIFY_OK;
+
+ netdev = dev_get_by_index(&init_net, entry->ifindex);
+ if (!netdev)
+ return NOTIFY_OK;
+
+ fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+ dev_put(netdev);
+ if (!fcoe)
+ return NOTIFY_OK;
+
+ if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+ prio = ffs(entry->app.priority) - 1;
+ else
+ prio = entry->app.priority;
+
+ if (prio < 0)
+ return NOTIFY_OK;
+
+ if (entry->app.protocol == ETH_P_FIP ||
+ entry->app.protocol == ETH_P_FCOE)
+ fcoe->ctlr.priority = prio;
+
+ if (entry->app.protocol == ETH_P_FCOE) {
+ port = lport_priv(fcoe->ctlr.lp);
+ port->priority = prio;
+ }
+
+ return NOTIFY_OK;
+}
+
/**
* fcoe_device_notification() - Handler for net device events
* @notifier: The context of the notification
@@ -1965,6 +2038,46 @@ static bool fcoe_match(struct net_device *netdev)
}
/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+ int dcbx;
+ u8 fup, up;
+ struct net_device *netdev = fcoe->realdev;
+ struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct dcb_app app = {
+ .priority = 0,
+ .protocol = ETH_P_FCOE
+ };
+
+ /* setup DCB priority attributes. */
+ if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+ dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+ if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+ up = dcb_ieee_getapp_mask(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_ieee_getapp_mask(netdev, &app);
+ } else {
+ app.selector = DCB_APP_IDTYPE_ETHTYPE;
+ up = dcb_getapp(netdev, &app);
+ app.protocol = ETH_P_FIP;
+ fup = dcb_getapp(netdev, &app);
+ }
+
+ port->priority = ffs(up) ? ffs(up) - 1 : 0;
+ fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ }
+#endif
+}
+
+/**
* fcoe_create() - Create a fcoe interface
* @netdev : The net_device object the Ethernet interface to create on
* @fip_mode: The FIP mode for this creation
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this the "master" N_Port */
fcoe->ctlr.lp = lport;
+ /* setup DCB priority attributes. */
+ fcoe_dcb_create(fcoe);
+
/* add to lports list */
fcoe_hostlist_add(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index c74c4b8e71ef..e7522dcc296e 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
skb_put(skb, sizeof(*sol));
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
}
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
skb_put(skb, len);
skb->protocol = htons(ETH_P_FIP);
+ skb->priority = fip->priority;
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 4e041f6d808c..d570573b7963 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ac326c41e931..6465dae5883a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
scsi_qla_host_t *vha = shost_priv(shost);
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
- if (!base_vha->flags.online)
+ if (!base_vha->flags.online) {
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
- else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
- fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
- else
+ return;
+ }
+
+ switch (atomic_read(&base_vha->loop_state)) {
+ case LOOP_UPDATE:
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ break;
+ case LOOP_DOWN:
+ if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+ fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_DEAD:
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ break;
+ case LOOP_READY:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+ break;
+ }
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 9df4787715c0..f3cddd5800c3 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,17 +12,17 @@
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0116 | |
- * | Mailbox commands | 0x1129 | |
+ * | Mailbox commands | 0x112b | |
* | Device Discovery | 0x2083 | |
* | Queue Command and IO tracing | 0x302e | 0x3008 |
* | DPC Thread | 0x401c | |
* | Async Events | 0x5059 | |
- * | Timer Routines | 0x600d | |
+ * | Timer Routines | 0x6010 | 0x600e,0x600f |
* | User Space Interactions | 0x709d | |
- * | Task Management | 0x8041 | |
+ * | Task Management | 0x8041 | 0x800b |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb051 | |
+ * | ISP82XX Specific | 0xb052 | |
* | MultiQ | 0xc00b | |
* | Misc | 0xd00b | |
* ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ce32d8135c9e..c0c11afb685c 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f03e915f1877..54ea68cec4c5 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
&ha->fw_xcb_count, NULL, NULL,
&ha->max_npiv_vports, NULL);
- if (!fw_major_version && ql2xallocfwdump)
+ if (!fw_major_version && ql2xallocfwdump
+ && !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
}
} else {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index dbec89622a0f..a4b267e60a35 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
* Returns a pointer to the continuation type 1 IOCB packet.
*/
static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
{
cont_a64_entry_t *cont_pkt;
- struct req_que *req = vha->req;
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
* Five DSDs are available in the Continuation
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
avail_dsds = 5;
}
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ vha->hw->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
int index;
uint16_t tot_dsds;
scsi_qla_host_t *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
int loop_iterartion = 0;
int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
* Five DSDs are available in the Cont.
* Type 1 IOCB.
*/
- cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+ cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+ ha->req_q_map[0]);
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
avail_dsds = 5;
cont_iocb_prsnt = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2516adf1aeea..7b91b290ffd6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
resid, scsi_bufflen(cp));
cp->result = DID_ERROR << 16 | lscsi_status;
- break;
+ goto check_scsi_status;
}
if (!lscsi_status &&
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 3b3cec9f6ac2..82a33533ed26 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, base_vha, 0x1004,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
+ return QLA_FUNCTION_TIMEOUT;
}
/*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
+ ha->flags.mbox_busy = 0;
ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112a,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101c,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+ if (IS_QLA82XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x112b,
+ "disabling pause transmit on port "
+ "0 & 1.\n");
+ qla82xx_wr_32(ha,
+ QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
ql_log(ql_log_info, base_vha, 0x101e,
"Mailbox cmd timeout occured. "
"Scheduling ISP abort.\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 94bded5ddce4..03554934b0a5 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3817,6 +3817,20 @@ exit:
return rval;
}
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ ha->flags.mbox_busy = 0;
+ ql_log(ql_log_warn, vha, 0x6010,
+ "Doing premature completion of mbx command.\n");
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+}
+
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
+ ql_dbg(ql_dbg_timer, vha, 0x6011,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
halt_status = qla82xx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS1);
- ql_dbg(ql_dbg_timer, vha, 0x6005,
+ ql_log(ql_log_info, vha, 0x6005,
"dumping hw/fw registers:.\n "
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_4 + 0x3c));
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql_log(ql_log_warn, vha, 0xb052,
+ "Firmware aborted with "
+ "error code 0x00006700. Device is "
+ "being reset.\n");
if (halt_status & HALT_STATUS_UNRECOVERABLE) {
set_bit(ISP_UNRECOVERABLE,
&vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
}
qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_log(ql_log_warn, vha, 0x6007,
- "Due to FW hung, doing "
- "premature completion of mbx "
- "command.\n");
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+ qla82xx_clear_pending_mbx(vha);
}
}
}
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
msleep(1000);
if (qla82xx_check_fw_alive(vha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- complete(&ha->mbx_intr_comp);
- }
+ qla82xx_clear_pending_mbx(vha);
break;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57820c199bc2..57a226be339a 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fd14c7bfc626..f9e5b85e84d8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
"Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
-int ql2xmdenable;
+int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdenable,
"Enable/disable MiniDump. "
- "0 (Default) - MiniDump disabled. "
- "1 - MiniDump enabled.");
+ "0 - MiniDump disabled. "
+ "1 (Default) - MiniDump enabled.");
/*
* SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
qla25xx_delete_queues(vha);
destroy_workqueue(ha->wq);
ha->wq = NULL;
+ vha->req = ha->req_q_map[0];
fail:
ha->mqenable = 0;
kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-/*
- * qla2x00_wait_for_loop_ready
- * Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- * to be in LOOP_READY state.
- * Input:
- * ha - pointer to host adapter structure
- *
- * Note:
- * Does context switching-Release SPIN_LOCK
- * (if any) before calling this routine.
- *
- *
- * Return:
- * Success (LOOP_READY) : 0
- * Failed (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
- int return_status = QLA_SUCCESS;
- unsigned long loop_timeout ;
- struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
- /* wait for 5 min at the max for loop to be ready */
- loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
- while ((!atomic_read(&base_vha->loop_down_timer) &&
- atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
- atomic_read(&base_vha->loop_state) != LOOP_READY) {
- if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- msleep(1000);
- if (time_after_eq(jiffies, loop_timeout)) {
- return_status = QLA_FUNCTION_FAILED;
- break;
- }
- }
- return (return_status);
-}
-
static void
sp_get(struct srb *sp)
{
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
"Wait for hba online failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
}
- err = 1;
- if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x800b,
- "Wait for loop ready failed for cmd=%p.\n", cmd);
- goto eh_reset_failed;
- }
err = 2;
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
!= QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
goto eh_bus_reset_done;
}
- if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
- if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
- ret = SUCCESS;
- }
+ if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+ ret = SUCCESS;
+
if (ret == FAILED)
goto eh_bus_reset_done;
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
- /*
- * Fixme-may be dpc thread is active and processing
- * loop_resync,so wait a while for it to
- * be completed and then issue big hammer.Otherwise
- * it may cause I/O failure as big hammer marks the
- * devices as lost kicking of the port_down_timer
- * while dpc is stuck for the mailbox to complete.
- */
- qla2x00_wait_for_loop_ready(vha);
if (vha != base_vha) {
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
qla2x00_mark_all_devices_lost(vha, 0);
- qla2x00_wait_for_loop_ready(vha);
}
if (ha->flags.enable_lip_reset) {
ret = qla2x00_lip_reset(vha);
- if (ret != QLA_SUCCESS) {
+ if (ret != QLA_SUCCESS)
ql_dbg(ql_dbg_taskm, vha, 0x802e,
"lip_reset failed (%d).\n", ret);
- } else
- qla2x00_wait_for_loop_ready(vha);
}
/* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- ql_dbg(ql_dbg_aer, vha, 0x9001,
- "Due to pci channel io frozen, doing premature "
- "completion of mbx command.\n");
- complete(&ha->mbx_intr_comp);
- }
+ ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+ qla82xx_clear_pending_mbx(vha);
}
qla2x00_free_irqs(vha);
pci_disable_device(pdev);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 13b6357c1fa2..23f33a6d52d7 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.07-k"
+#define QLA2XXX_VERSION "8.03.07.12-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ace637bf254e..fd5edc6e166d 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -147,7 +147,7 @@
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
-#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
#define LSDW(x) ((u32)((u64)(x)))
@@ -173,6 +173,8 @@
#define ISNS_DEREG_TOV 5
#define HBA_ONLINE_TOV 30
#define DISABLE_ACB_TOV 30
+#define IP_CONFIG_TOV 30
+#define LOGIN_TOV 12
#define MAX_RESET_HA_RETRIES 2
@@ -240,6 +242,45 @@ struct ddb_entry {
uint16_t fw_ddb_index; /* DDB firmware index */
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
+ uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+ struct dev_db_entry fw_ddb_entry;
+ int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+ int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+
+ /* Driver Re-login */
+ unsigned long flags; /* DDB Flags */
+ uint16_t default_relogin_timeout; /* Max time to wait for
+ * relogin to complete */
+ atomic_t retry_relogin_timer; /* Min Time between relogins
+ * (4000 only) */
+ atomic_t relogin_timer; /* Max Time to wait for
+ * relogin to complete */
+ atomic_t relogin_retry_count; /* Num of times relogin has been
+ * retried */
+ uint32_t default_time2wait; /* Default Min time between
+ * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+ struct list_head list;
+ uint16_t fw_ddb_idx;
+ struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+ int port;
+ int tpgt;
+ char ip_addr[DDB_IPADDR_LEN];
+ char iscsi_name[ISCSI_NAME_SIZE];
+ uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
};
/*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
#define AF_FW_RECOVERY 19 /* 0x00080000 */
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
uint16_t bootload_minor;
uint16_t bootload_patch;
uint16_t bootload_build;
+ uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
#define QLFLASH_WAITING 0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
uint16_t iscsi_pci_func_cnt;
uint8_t model_name[16];
struct completion disable_acb_comp;
+ struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+ uint16_t pri_ddb_idx;
+ uint16_t sec_ddb_idx;
+ int is_reset;
};
struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER 0
+#define RESET_ADAPTER 1
+
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index cbd5a20dbbd1..4ac07f882521 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -12,6 +12,7 @@
#define MAX_PRST_DEV_DB_ENTRIES 64
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
#define MAX_DEV_DB_ENTRIES 512
+#define MAX_DEV_DB_ENTRIES_40XX 256
/*************************************************************************
*
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
uint8_t res14[140]; /* 274-2FF */
};
+#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
+ * One IPv4, one IPv6 link local and 2 IPv6
+ */
+
+#define IP_STATE_MASK 0x0F000000
+#define IP_STATE_SHIFT 24
+
struct init_fw_ctrl_blk {
struct addr_ctrl_blk pri;
/* struct addr_ctrl_blk sec;*/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 160db9d5ea21..d0dd4b330206 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t *mbx_sts);
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
uint16_t stats_size, dma_addr_t stats_dma);
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry);
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
uint32_t region, uint32_t field0,
uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
/* BSG Functions */
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 3075fbaef553..1bdfa8120ac8 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
* be freed so that when login happens from user space there are free DDB
* indices available.
**/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
{
int max_ddbs;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
MAX_DEV_DB_ENTRIES;
for (idx = 0; idx < max_ddbs; idx = next_idx) {
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
&next_idx, &state, &conn_err,
NULL, NULL);
- if (ret == QLA_ERROR)
+ if (ret == QLA_ERROR) {
+ next_idx++;
continue;
+ }
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
state == DDB_DS_SESSION_FAILED) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
}
}
-
/**
* qla4xxx_initialize_adapter - initiailizes hba
* @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
* This routine parforms all of the steps necessary to initialize the adapter.
*
**/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
{
int status = QLA_ERROR;
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
if (status == QLA_ERROR)
goto exit_init_hba;
- qla4xxx_free_ddb_index(ha);
+ if (is_reset == RESET_ADAPTER)
+ qla4xxx_build_ddb_list(ha, is_reset);
set_bit(AF_ONLINE, &ha->flags);
exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
return status;
}
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
- uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
{
- struct ddb_entry * ddb_entry;
uint32_t old_fw_ddb_device_state;
int status = QLA_ERROR;
- /* check for out of range index */
- if (fw_ddb_index >= MAX_DDB_ENTRIES)
- goto exit_ddb_event;
-
- /* Get the corresponging ddb entry */
- ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
- /* Device does not currently exist in our database. */
- if (ddb_entry == NULL) {
- ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
- __func__, fw_ddb_index);
-
- if (state == DDB_DS_NO_CONNECTION_ACTIVE)
- clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
- goto exit_ddb_event;
- }
-
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
status = QLA_SUCCESS;
break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
__func__));
break;
}
+ return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+ /*
+ * This triggers a relogin. After the relogin_timer
+ * expires, the relogin gets scheduled. We must wait a
+ * minimum amount of time since receiving an 0x8014 AEN
+ * with failed device_state or a logout response before
+ * we can issue another relogin.
+ *
+ * Firmware pads this timeout: (time2wait +1).
+ * Driver retry to login should be longer than F/W.
+ * Otherwise F/W will fail
+ * set_ddb() mbx cmd with 0x4005 since it still
+ * counting down its time2wait.
+ */
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+ struct ddb_entry *ddb_entry, uint32_t state)
+{
+ uint32_t old_fw_ddb_device_state;
+ int status = QLA_ERROR;
+
+ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB - old state = 0x%x, new state = 0x%x for "
+ "index [%d]\n", __func__,
+ ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+ ddb_entry->fw_ddb_device_state = state;
+
+ switch (old_fw_ddb_device_state) {
+ case DDB_DS_LOGIN_IN_PROCESS:
+ case DDB_DS_NO_CONNECTION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_ACTIVE:
+ switch (state) {
+ case DDB_DS_SESSION_FAILED:
+ iscsi_block_session(ddb_entry->sess);
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ case DDB_DS_SESSION_FAILED:
+ switch (state) {
+ case DDB_DS_SESSION_ACTIVE:
+ ddb_entry->unblock_sess(ddb_entry->sess);
+ qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ case DDB_DS_SESSION_FAILED:
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ status = QLA_SUCCESS;
+ break;
+ }
+ break;
+ default:
+ DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+ __func__));
+ break;
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+ uint32_t fw_ddb_index,
+ uint32_t state, uint32_t conn_err)
+{
+ struct ddb_entry *ddb_entry;
+ int status = QLA_ERROR;
+
+ /* check for out of range index */
+ if (fw_ddb_index >= MAX_DDB_ENTRIES)
+ goto exit_ddb_event;
+
+ /* Get the corresponging ddb entry */
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+ /* Device does not currently exist in our database. */
+ if (ddb_entry == NULL) {
+ ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+ __func__, fw_ddb_index);
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+ clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+ goto exit_ddb_event;
+ }
+
+ ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
return status;
}
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ uint32_t mbx_sts = 0;
+ int ret;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags))
+ return;
+
+ if (ddb_entry->ddb_type != FLASH_DDB) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Skipping login to non FLASH DB"));
+ goto exit_login;
+ }
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_login;
+ }
+
+ if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+ ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR)
+ goto exit_login;
+
+ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+ ha->tot_ddbs++;
+ }
+
+ memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+ ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+ ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_dma, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+ goto exit_login;
+ }
+
+ ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+ ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+ if (ret == QLA_ERROR) {
+ ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+ sess->targetname);
+ goto exit_login;
+ }
+
+exit_login:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 4c2b84870392..c2593782fbbe 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
return status;
}
+ if (is_qla40XX(ha)) {
+ if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+ "prematurely completing mbx cmd as "
+ "adapter removal detected\n",
+ ha->host_no, __func__));
+ return status;
+ }
+ }
+
if (is_qla8022(ha)) {
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
memcpy(ha->name_string, init_fw_cb->iscsi_name,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iscsi_name)));
+ ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
/*memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 30f31b127f33..4169c8baa112 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables
*/
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+ "Set to disable exporting boot targets to sysfs\n"
+ " 0 - Export boot targets\n"
+ " 1 - Do not export boot targets (Default)");
+
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 30 sec.");
+ " Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qla_ep = ep->dd_data;
ha = to_qla_host(qla_ep->host);
- if (adapter_up(ha))
+ if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
ret = 1;
return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+ uint32_t mbx_sts = 0;
+ uint16_t tmp_ddb_index;
+ int ret;
+
+get_ddb_index:
+ tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+ if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free DDB index not available\n"));
+ ret = QLA_ERROR;
+ goto exit_get_ddb_index;
+ }
+
+ if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+ goto get_ddb_index;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Found a free DDB index at %d\n", tmp_ddb_index));
+ ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+ if (ret == QLA_ERROR) {
+ if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+ ql4_printk(KERN_INFO, ha,
+ "DDB index = %d not available trying next\n",
+ tmp_ddb_index);
+ goto get_ddb_index;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Free FW DDB not available\n"));
+ }
+
+ *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+ return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry,
+ char *existing_ipaddr,
+ char *user_ipaddr)
+{
+ uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+ char formatted_ipaddr[DDB_IPADDR_LEN];
+ int status = QLA_SUCCESS, ret = 0;
+
+ if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+ ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+ } else {
+ ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+ '\0', NULL);
+ if (ret == 0) {
+ status = QLA_ERROR;
+ goto out_match;
+ }
+ ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+ }
+
+ if (strcmp(existing_ipaddr, formatted_ipaddr))
+ status = QLA_ERROR;
+
+out_match:
+ return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int idx = 0, max_ddbs, rval;
+ struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+ struct iscsi_session *sess, *existing_sess;
+ struct iscsi_conn *conn, *existing_conn;
+ struct ddb_entry *ddb_entry;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ if (sess->targetname == NULL ||
+ conn->persistent_address == NULL ||
+ conn->persistent_port == 0)
+ return QLA_ERROR;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (ddb_entry->ddb_type != FLASH_DDB)
+ continue;
+
+ existing_sess = ddb_entry->sess->dd_data;
+ existing_conn = ddb_entry->conn->dd_data;
+
+ if (existing_sess->targetname == NULL ||
+ existing_conn->persistent_address == NULL ||
+ existing_conn->persistent_port == 0)
+ continue;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IQN = %s User IQN = %s\n",
+ existing_sess->targetname,
+ sess->targetname));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "IP = %s User IP = %s\n",
+ existing_conn->persistent_address,
+ conn->persistent_address));
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Port = %d User Port = %d\n",
+ existing_conn->persistent_port,
+ conn->persistent_port));
+
+ if (strcmp(existing_sess->targetname, sess->targetname))
+ continue;
+ rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+ existing_conn->persistent_address,
+ conn->persistent_address);
+ if (rval == QLA_ERROR)
+ continue;
+ if (existing_conn->persistent_port != conn->persistent_port)
+ continue;
+ break;
+ }
+
+ if (idx == max_ddbs)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match found in fwdb sessions\n"));
+ return QLA_SUCCESS;
+}
+
static struct iscsi_cls_session *
qla4xxx_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
struct scsi_qla_host *ha;
struct qla_endpoint *qla_ep;
struct ddb_entry *ddb_entry;
- uint32_t ddb_index;
- uint32_t mbx_sts = 0;
+ uint16_t ddb_index;
struct iscsi_session *sess;
struct sockaddr *dst_addr;
int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
ha = to_qla_host(qla_ep->host);
-get_ddb_index:
- ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
- if (ddb_index >= MAX_DDB_ENTRIES) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free DDB index not available\n"));
- return NULL;
- }
-
- if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
- goto get_ddb_index;
-
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Found a free DDB index at %d\n", ddb_index));
- ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
- if (ret == QLA_ERROR) {
- if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
- ql4_printk(KERN_INFO, ha,
- "DDB index = %d not available trying next\n",
- ddb_index);
- goto get_ddb_index;
- }
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Free FW DDB not available\n"));
+ ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+ if (ret == QLA_ERROR)
return NULL;
- }
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
ddb_entry->ha = ha;
ddb_entry->sess = cls_sess;
+ ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+ ddb_entry->ddb_change = qla4xxx_ddb_change;
cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
conn_idx);
+ if (!cls_conn)
+ return NULL;
+
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- struct dev_db_entry *fw_ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
dma_addr_t fw_ddb_entry_dma;
uint32_t mbx_sts = 0;
int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ /* Check if we have matching FW DDB, if yes then do not
+ * login to this target. This could cause target to logout previous
+ * connection
+ */
+ ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+ if (ret == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "Session already exist in FW.\n");
+ ret = -EEXIST;
+ goto exit_conn_start;
+ }
+
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto exit_conn_start;
}
ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
if (mbx_sts)
if (ddb_entry->fw_ddb_device_state ==
DDB_DS_SESSION_ACTIVE) {
- iscsi_conn_start(ddb_entry->conn);
- iscsi_conn_login_event(ddb_entry->conn,
- ISCSI_CONN_STATE_LOGGED_IN);
+ ddb_entry->unblock_sess(ddb_entry->sess);
goto exit_set_param;
}
@@ -1167,8 +1311,9 @@ exit_set_param:
ret = 0;
exit_conn_start:
- dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
- fw_ddb_entry, fw_ddb_entry_dma);
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
return ret;
}
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
return -ENOSYS;
}
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ struct iscsi_cls_session *cls_sess,
+ struct iscsi_cls_conn *cls_conn)
+{
+ int buflen = 0;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+ char ip_addr[DDB_IPADDR_LEN];
+ uint16_t options = 0;
+
+ sess = cls_sess->dd_data;
+ conn = cls_conn->dd_data;
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+ sess->initial_r2t_en =
+ (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+ sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+ conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+ (char *)fw_ddb_entry->iscsi_name, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+ (char *)ha->name_string, buflen);
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ uint32_t ddb_state;
+ dma_addr_t fw_ddb_entry_dma;
+ struct dev_db_entry *fw_ddb_entry;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_session_conn_fwddb_param;
+ }
+
+ if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+ fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+ NULL, NULL, NULL) == QLA_ERROR) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+ "get_ddb_entry for fw_ddb_index %d\n",
+ ha->host_no, __func__,
+ ddb_entry->fw_ddb_index));
+ goto exit_session_conn_fwddb_param;
+ }
+
+ cls_sess = ddb_entry->sess;
+
+ cls_conn = ddb_entry->conn;
+
+ /* Update params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
if (!fw_ddb_entry) {
ql4_printk(KERN_ERR, ha,
"%s: Unable to allocate dma buffer\n", __func__);
- return;
+ goto exit_session_conn_param;
}
if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
"get_ddb_entry for fw_ddb_index %d\n",
ha->host_no, __func__,
ddb_entry->fw_ddb_index));
- return;
+ goto exit_session_conn_param;
}
cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
cls_conn = ddb_entry->conn;
conn = cls_conn->dd_data;
+ /* Update timers after login */
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(fw_ddb_entry->def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
/* Update params */
conn->max_recv_dlength = BYTE_UNITS *
le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
/*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
vfree(ha->chap_list);
ha->chap_list = NULL;
+ if (ha->fw_ddb_dma_pool)
+ dma_pool_destroy(ha->fw_ddb_dma_pool);
+
/* release io space registers */
if (is_qla8022(ha)) {
if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
goto mem_alloc_error_exit;
}
+ ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+ DDB_DMA_BLOCK_SIZE, 8, 0);
+
+ if (ha->fw_ddb_dma_pool == NULL) {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: fw_ddb_dma_pool allocation failed..\n",
+ __func__);
+ goto mem_alloc_error_exit;
+ }
+
return QLA_SUCCESS;
mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+ INVALID_ENTRY) {
+ if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+ 0) {
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ INVALID_ENTRY);
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index [%d] login device\n",
+ __func__, ddb_entry->fw_ddb_index));
+ } else
+ atomic_dec(&ddb_entry->retry_relogin_timer);
+ }
+ }
+
+ /* Wait for relogin to timeout */
+ if (atomic_read(&ddb_entry->relogin_timer) &&
+ (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+ /*
+ * If the relogin times out and the device is
+ * still NOT ONLINE then try and relogin again.
+ */
+ if (!iscsi_is_session_online(cls_sess)) {
+ /* Reset retry relogin timer */
+ atomic_inc(&ddb_entry->relogin_retry_count);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: index[%d] relogin timed out-retrying"
+ " relogin (%d), retry (%d)\n", __func__,
+ ddb_entry->fw_ddb_index,
+ atomic_read(&ddb_entry->relogin_retry_count),
+ ddb_entry->default_time2wait + 4));
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ atomic_set(&ddb_entry->retry_relogin_timer,
+ ddb_entry->default_time2wait + 4);
+ }
+ }
+}
+
/**
* qla4xxx_timer - checks every second for work to do.
* @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
int start_dpc = 0;
uint16_t w;
+ iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
/* If we are in the middle of AER/EEH processing
* skip any processing and reschedule the timer
*/
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
sess = cls_session->dd_data;
ddb_entry = sess->dd_data;
ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
- iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+ if (ddb_entry->ddb_type == FLASH_DDB)
+ iscsi_block_session(ddb_entry->sess);
+ else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
/**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
/* NOTE: AF_ONLINE flag set upon successful completion of
* qla4xxx_initialize_adapter */
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
}
/* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
iscsi_unblock_session(ddb_entry->sess);
} else {
/* Trigger relogin */
- iscsi_session_failure(cls_session->dd_data,
- ISCSI_ERR_CONN_FAILED);
+ if (ddb_entry->ddb_type == FLASH_DDB) {
+ if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ qla4xxx_arm_relogin_timer(ddb_entry);
+ } else
+ iscsi_session_failure(cls_session->dd_data,
+ ISCSI_ERR_CONN_FAILED);
}
}
}
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+
+ iscsi_unblock_session(ddb_entry->sess);
+
+ /* Start scan target */
+ if (test_bit(AF_ONLINE, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " start scan\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+ }
+ return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_session->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+ " unblock user space session\n", ha->host_no, __func__,
+ ddb_entry->fw_ddb_index);
+ iscsi_conn_start(ddb_entry->conn);
+ iscsi_conn_login_event(ddb_entry->conn,
+ ISCSI_CONN_STATE_LOGGED_IN);
+
+ return QLA_SUCCESS;
+}
+
static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
{
iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
}
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+ uint16_t relogin_timer;
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ relogin_timer = max(ddb_entry->default_relogin_timeout,
+ (uint16_t)RELOGIN_TOV);
+ atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+ ddb_entry->fw_ddb_index, relogin_timer));
+
+ qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+ struct iscsi_session *sess;
+ struct ddb_entry *ddb_entry;
+ struct scsi_qla_host *ha;
+
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ha = ddb_entry->ha;
+
+ if (!(ddb_entry->ddb_type == FLASH_DDB))
+ return;
+
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+ !iscsi_is_session_online(cls_sess)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "relogin issued\n"));
+ qla4xxx_relogin_flash_ddb(cls_sess);
+ }
+}
+
void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
{
if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
qla4xxx_get_dhcp_ip_address(ha);
+ /* ---- relogin device? --- */
+ if (adapter_up(ha) &&
+ test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+ iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+ }
+
/* ---- link change? --- */
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
* fatal error recovery. Therefore, the driver must
* manually relogin to devices when recovering from
* connection failures, logouts, expired KATO, etc. */
-
- qla4xxx_relogin_all_devices(ha);
+ if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+ qla4xxx_build_ddb_list(ha, ha->is_reset);
+ iscsi_host_for_each_session(ha->host,
+ qla4xxx_login_flash_ddb);
+ } else
+ qla4xxx_relogin_all_devices(ha);
}
}
}
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
" target ID %d\n", __func__, ddb_index[0],
ddb_index[1]));
+ ha->pri_ddb_idx = ddb_index[0];
+ ha->sec_ddb_idx = ddb_index[1];
+
exit_boot_info_free:
dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
return ret;
}
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+
if (ddb_index[0] == 0xffff)
goto sec_target;
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
struct iscsi_boot_kobj *boot_kobj;
if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
- return 0;
+ return QLA_ERROR;
+
+ if (ql4xdisablesysfsboot) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: syfsboot disabled - driver will trigger login"
+ "and publish session for discovery .\n", __func__);
+ return QLA_SUCCESS;
+ }
+
ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
if (!boot_kobj)
goto put_host;
- return 0;
+ return QLA_SUCCESS;
put_host:
scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
exit_chap_list:
dma_free_coherent(&ha->pdev->dev, chap_size,
chap_flash_data, chap_dma);
- return;
}
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ struct scsi_qla_host *ha;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_session *sess;
+ struct iscsi_conn *conn;
+
+ DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+ ha = ddb_entry->ha;
+ cls_sess = ddb_entry->sess;
+ sess = cls_sess->dd_data;
+ cls_conn = ddb_entry->conn;
+ conn = cls_conn->dd_data;
+
+ tddb->tpgt = sess->tpgt;
+ tddb->port = conn->persistent_port;
+ strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+ struct ql4_tuple_ddb *tddb)
+{
+ uint16_t options = 0;
+
+ tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+ min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ if (options & DDB_OPT_IPV6_DEVICE)
+ sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+ else
+ sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+ tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ return QLA_ERROR;
+
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+ return QLA_ERROR;
+
+ if (old_tddb->port != new_tddb->port)
+ return QLA_ERROR;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+ old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+ old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+ new_tddb->ip_addr, new_tddb->iscsi_name));
+
+ return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct ddb_entry *ddb_entry;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int idx;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct ql4_tuple_ddb *fw_tddb = NULL;
+ struct ql4_tuple_ddb *tmp_tddb = NULL;
+ int ret = QLA_ERROR;
+
+ fw_tddb = vzalloc(sizeof(*fw_tddb));
+ if (!fw_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+ if (!tmp_tddb) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "Memory Allocation failed.\n"));
+ ret = QLA_SUCCESS;
+ goto exit_check;
+ }
+
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+ if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+ ret = QLA_SUCCESS; /* found */
+ goto exit_check;
+ }
+ }
+
+exit_check:
+ if (fw_tddb)
+ vfree(fw_tddb);
+ if (tmp_tddb)
+ vfree(tmp_tddb);
+ return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+ /* Free up the normaltargets list */
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ list_del_init(&nt_ddb_idx->list);
+ vfree(nt_ddb_idx);
+ }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ struct iscsi_endpoint *ep;
+ struct sockaddr_in *addr;
+ struct sockaddr_in6 *addr6;
+ struct sockaddr *dst_addr;
+ char *ip;
+
+ /* TODO: need to destroy on unload iscsi_endpoint*/
+ dst_addr = vmalloc(sizeof(*dst_addr));
+ if (!dst_addr)
+ return NULL;
+
+ if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+ dst_addr->sa_family = AF_INET6;
+ addr6 = (struct sockaddr_in6 *)dst_addr;
+ ip = (char *)&addr6->sin6_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+ addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+ } else {
+ dst_addr->sa_family = AF_INET;
+ addr = (struct sockaddr_in *)dst_addr;
+ ip = (char *)&addr->sin_addr;
+ memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+ addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+ }
+
+ ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ vfree(dst_addr);
+ return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+ if (ql4xdisablesysfsboot)
+ return QLA_SUCCESS;
+ if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+ return QLA_ERROR;
+ return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+ struct ddb_entry *ddb_entry)
+{
+ ddb_entry->ddb_type = FLASH_DDB;
+ ddb_entry->fw_ddb_index = INVALID_ENTRY;
+ ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+ ddb_entry->ha = ha;
+ ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+ ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+ atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+ ddb_entry->default_relogin_timeout =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ ddb_entry->default_time2wait =
+ le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+ uint32_t idx = 0;
+ uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+ uint32_t sts[MBOX_REG_COUNT];
+ uint32_t ip_state;
+ unsigned long wtime;
+ int ret;
+
+ wtime = jiffies + (HZ * IP_CONFIG_TOV);
+ do {
+ for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+ if (ip_idx[idx] == -1)
+ continue;
+
+ ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+ if (ret == QLA_ERROR) {
+ ip_idx[idx] = -1;
+ continue;
+ }
+
+ ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Waiting for IP state for idx = %d, state = 0x%x\n",
+ ip_idx[idx], ip_state));
+ if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+ ip_state == IP_ADDRSTATE_INVALID ||
+ ip_state == IP_ADDRSTATE_PREFERRED ||
+ ip_state == IP_ADDRSTATE_DEPRICATED ||
+ ip_state == IP_ADDRSTATE_DISABLING)
+ ip_idx[idx] = -1;
+
+ }
+
+ /* Break if all IP states checked */
+ if ((ip_idx[0] == -1) &&
+ (ip_idx[1] == -1) &&
+ (ip_idx[2] == -1) &&
+ (ip_idx[3] == -1))
+ break;
+ schedule_timeout_uninterruptible(HZ);
+ } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ int max_ddbs;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id;
+ struct dev_db_entry *fw_ddb_entry;
+ struct ddb_entry *ddb_entry = NULL;
+ dma_addr_t fw_ddb_dma;
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32, tmo = 0;
+ uint32_t initial_cmdsn = 0;
+ struct list_head list_st, list_nt; /* List of sendtargets */
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ int fw_idx_size;
+ unsigned long wtime;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_ddb_list;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_st;
+
+ /* Check if ST, add to the list_st */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+ goto continue_next_st;
+
+ st_ddb_idx = vzalloc(fw_idx_size);
+ if (!st_ddb_idx)
+ break;
+
+ st_ddb_idx->fw_ddb_idx = idx;
+
+ list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+ if (next_idx == 0)
+ break;
+ }
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+ list) {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ st_ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx,
+ &state, &conn_err, NULL,
+ NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+ }
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ list_del_init(&st_ddb_idx->list);
+ vfree(st_ddb_idx);
+ }
+
+ for (idx = 0; idx < max_ddbs; idx = next_idx) {
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+ fw_ddb_dma, NULL,
+ &next_idx, &state, &conn_err,
+ NULL, &conn_id);
+ if (ret == QLA_ERROR)
+ break;
+
+ if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+ goto continue_next_nt;
+
+ /* Check if NT, then add to list it */
+ if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+ goto continue_next_nt;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n",
+ idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
+
+ nt_ddb_idx->fw_ddb_idx = idx;
+
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
+ }
+ list_add_tail(&nt_ddb_idx->list, &list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha,
+ fw_ddb_entry) == QLA_SUCCESS)
+ goto continue_next_nt;
+ }
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
+ */
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+ ha->host, cmds_max,
+ sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess)
+ goto exit_ddb_list;
+
+ /*
+ * iscsi_session_setup increments the driver reference
+ * count which wouldn't let the driver to be unloaded.
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess,
+ sizeof(struct qla_conn),
+ conn_id);
+ if (!cls_conn)
+ goto exit_ddb_list;
+
+ ddb_entry->conn = cls_conn;
+
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "Unable to get ep\n"));
+ }
+
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+ cls_conn);
+
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
+ }
+ }
+continue_next_nt:
+ if (next_idx == 0)
+ break;
+ }
+exit_ddb_list:
+ qla4xxx_free_nt_list(&list_nt);
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+ qla4xxx_free_ddb_index(ha);
+}
+
+
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
* firmware
* NOTE: interrupts enabled upon successful completion
*/
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
while ((!test_bit(AF_ONLINE, &ha->flags)) &&
init_retry_count++ < MAX_INIT_RETRIES) {
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
continue;
- status = qla4xxx_initialize_adapter(ha);
+ status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
}
if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
- qla4xxx_create_chap_list(ha);
-
if (qla4xxx_setup_boot_info(ha))
ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
__func__);
+ /* Perform the build ddb list and login to each */
+ qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+ iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+ qla4xxx_create_chap_list(ha);
+
qla4xxx_create_ifaces(ha);
return 0;
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
}
}
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ int options;
+ int idx;
+
+ for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if ((ddb_entry != NULL) &&
+ (ddb_entry->ddb_type == FLASH_DDB)) {
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+ == QLA_ERROR)
+ ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+ __func__);
+
+ qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+ /*
+ * we have decremented the reference count of the driver
+ * when we setup the session to have the driver unload
+ * to be seamless without actually destroying the
+ * session
+ **/
+ try_module_get(qla4xxx_iscsi_transport.owner);
+ iscsi_destroy_endpoint(ddb_entry->conn->ep);
+ qla4xxx_free_ddb(ha, ddb_entry);
+ iscsi_session_teardown(ddb_entry->sess);
+ }
+ }
+}
/**
* qla4xxx_remove_adapter - calback function to remove adapter.
* @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
/* destroy iface from sysfs */
qla4xxx_destroy_ifaces(ha);
- if (ha->boot_kset)
+ if ((!ql4xdisablesysfsboot) && ha->boot_kset)
iscsi_boot_destroy_kset(ha->boot_kset);
+ qla4xxx_destroy_fw_ddb_session(ha);
+
scsi_remove_host(ha->host);
qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_idc_unlock(ha);
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
qla4_8xxx_idc_lock(ha);
if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
- rval = qla4xxx_initialize_adapter(ha);
+ rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
if (rval == QLA_SUCCESS) {
ret = qla4xxx_request_irqs(ha);
if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index c15347d3f532..5254e57968f5 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index a1fd73df5416..8ba4510a9519 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
depends on FSL_SOC
config SPI_FSL_SPI
- tristate "Freescale SPI controller"
+ bool "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
- tristate "Freescale eSPI controller"
+ bool "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 024b48aed5ca..acc88b4d2869 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -13,6 +13,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index e093d3ec41ba..0094c645ff0d 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
spi_bitbang_cleanup(spi);
}
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
return value;
}
-static int __init
+static int __devinit
spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
u16 *res_flags)
{
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index e763254741c2..182e9c873822 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -8,6 +8,7 @@
*
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -426,7 +427,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
goto err_clk;
}
- mfp_set_groupg(&pdev->dev);
+ mfp_set_groupg(&pdev->dev, NULL);
nuc900_init_spi(hw);
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 84c934c0a545..520e8286db28 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
- ssb_pcicore_fix_sprom_core_index(pc);
+ struct ssb_device *pdev = pc->dev;
+ struct ssb_bus *bus = pdev->bus;
+
+ if (bus->bustype == SSB_BUSTYPE_PCI)
+ ssb_pcicore_fix_sprom_core_index(pc);
/* Disable PCI interrupts. */
- ssb_write32(pc->dev, SSB_INTVEC, 0);
+ ssb_write32(pdev, SSB_INTVEC, 0);
/* Additional PCIe always once-executed workarounds */
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 21d8c1c16cd8..5e78c77d5a08 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
insns =
- kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+ kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
if (!insns) {
DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+ struct comedi_async *async;
+ struct comedi_device *dev;
+
+ async = area->vm_private_data;
+ dev = async->subdevice->device;
+
+ mutex_lock(&dev->mutex);
+ async->mmap_count++;
+ mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
}
static struct vm_operations_struct comedi_vm_ops = {
- .close = comedi_unmap,
+ .open = comedi_vm_open,
+ .close = comedi_vm_close,
};
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_async *async = NULL;
unsigned long start = vma->vm_start;
unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
int i;
int retval;
struct comedi_subdevice *s;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+
+ dev_file_info = comedi_get_device_file_info(minor);
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
{
unsigned int mask = 0;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *read_subdev;
struct comedi_subdevice *write_subdev;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy)
break;
if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy) {
retval = 0;
break;
@@ -1885,11 +1924,17 @@ ok:
static int comedi_close(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *s = NULL;
int i;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
static int comedi_fasync(int fd, struct file *file, int on)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
return fasync_helper(fd, file, on, &dev->async_queue);
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index a8fea9a91733..6144afb8cbaa 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
/*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
Description: University of Stirling USB DAQ & INCITE Technology Limited
Devices: [ITL] USB-DUX (usbduxsigma.o)
Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
Status: testing
*/
/*
@@ -44,6 +44,7 @@ Status: testing
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
* 0.5: various bug fixes, health check at startup
+ * 0.6: corrected wrong input range
*/
/* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
/* comedi constants */
static const struct comedi_lrange range_usbdux_ai_range = { 1, {
BIP_RANGE
- (2.65)
+ (2.65/2.0)
}
};
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index fb2e89c3056c..5385da2e9cdb 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
{USB_DEVICE(0x177F, 0x0154)},
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 480b0ed2e4de..115635f95024 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
index 3d1279c424a8..7eb56178fb64 100644
--- a/drivers/staging/tidspbridge/core/dsp-clock.c
+++ b/drivers/staging/tidspbridge/core/dsp-clock.c
@@ -54,6 +54,7 @@
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS 4
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
*/
void dsp_clk_exit(void)
{
+ int i;
+
dsp_clock_disable_all(dsp_clocks);
+ for (i = 0; i < DM_TIMER_CLOCKS; i++)
+ omap_dm_timer_free(timer[i]);
+
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
+ int i, id;
dspbridge_device.dev.bus = &platform_bus_type;
+ for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+ timer[i] = omap_dm_timer_request_specific(id);
+
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
clk_enable(iva2_clk);
break;
case GPT_CLK:
- timer[clk_id - 1] =
- omap_dm_timer_request_specific(DMT_ID(clk_id));
+ status = omap_dm_timer_start(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
clk_disable(iva2_clk);
break;
case GPT_CLK:
- omap_dm_timer_free(timer[clk_id - 1]);
+ status = omap_dm_timer_stop(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index c43c7e3421c8..76cfc6edecd9 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -24,11 +24,7 @@
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-
-#ifdef MODULE
#include <linux/module.h>
-#endif
-
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 09c44abb89e8..3872b8cccdcf 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ unsigned long flags;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
{
struct vhci_unlink *unlink;
struct urb *urb;
+ unsigned long flags;
usbip_dump_header(pdu);
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 0fd96c10271d..8599545cdf9e 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -614,13 +614,12 @@ int iscsit_add_reject(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
- if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
-
send_check_condition = 1;
goto attach_cmd;
}
@@ -1044,6 +1037,8 @@ done:
*/
send_check_condition = 1;
} else {
+ cmd->data_length = cmd->se_cmd.data_length;
+
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
- if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index beb39469e7f1..1cd6ce373b83 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -30,9 +30,11 @@
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int j = DIV_ROUND_UP(len, 2);
+ int j = DIV_ROUND_UP(len, 2), rc;
- hex2bin(dst, src, j);
+ rc = hex2bin(dst, src, j);
+ if (rc < 0)
+ pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 3723d90d5ae5..f1a02dad05a0 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -398,7 +398,6 @@ struct iscsi_cmd {
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
- u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
- atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
- atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index c4c68da3e500..101b1beb3bca 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->se_cmd_flags &
- SCF_SCSI_RESERVATION_CONFLICT) {
+ if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index daad362a93ce..d734bdec24f9 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
- return -1;
+ return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
return 0;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 426cd4bf6a9a..98936cb7c294 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
return NULL;
}
- login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
- memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 3df1c9b8ae6b..81d5832fbbd5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
- */
if (scsi_bidi_cmnd(sc))
- se_cmd->t_tasks_bidi = 1;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret == -ENOMEM) {
- /* Out of Resources */
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -EINVAL) {
- /*
- * Handle case for SAM_STAT_RESERVATION_CONFLICT
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- /*
- * Otherwise, return SAM_STAT_CHECK_CONDITION and return
- * sense data.
- */
- return PYX_TRANSPORT_USE_SENSE_REASON;
- }
-
+ if (ret != 0)
+ return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
- if (se_cmd->t_tasks_bidi) {
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
}
/* Tell the core about our preallocated memory */
- ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- return 0;
}
/*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- int host_no = tl_hba->sh->host_no;
+
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
-
- pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 88f2ad43ec8b..1dcbef499d6a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+ if (!l_port) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
/*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
} else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
}
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
- atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
- atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 683ba02b8247..831468b3163d 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[2] = 0x3c;
+ buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
- return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ return -EINVAL;
}
offset += length;
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
+ struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
dev->transport->do_sync_cache(task);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index e0c1e8a8dd4e..93d4f6a1b798 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_device_lock);
- list_add_tail(&se_dev->se_dev_node, &se_dev_list);
- spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_device_lock);
- list_del(&se_dev->se_dev_node);
- spin_unlock(&se_device_lock);
-
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba5edec2c5f8..9b8639425472 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -708,7 +705,7 @@ done:
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == 0) {
+ if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
return -EINVAL;
}
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == 0) {
+ if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 67cd6fe05bfa..b4864fba4ef0 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- cmd->t_tasks_fua) {
+ (cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
}
- if (ret < 0)
+ if (ret < 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
+ }
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7698efe29262..4aa992204438 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
+ (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ if (!bio) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
+ }
bio_list_init(&list);
bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
blk_finish_plug(&plug);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5a4ebfc3a54f..95dee7074aeb 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out_unlock;
}
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
if (!spec_i_pt) {
/*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
} else {
/*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
}
/*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3553,7 +3615,8 @@ after_iport_check:
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3585,7 +3649,8 @@ after_iport_check:
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3603,7 +3668,8 @@ after_iport_check:
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = EINVAL;
goto out;
}
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ed32e1efe429..8b15e56b0384 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
- int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return ret;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (ret < 0) {
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return ret;
+ }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
/* pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 5158d3846f19..02e51faa2f4e 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-/* rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
+ struct scatterlist *rd_sg;
+ struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
+ u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = task->task_sg;
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
- pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
- " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- src_offset = rd_offset;
+ pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ dev->rd_dev_id, read_rd ? "Read" : "Write",
+ task->task_lba, req->rd_size, req->rd_page,
+ rd_offset);
+ src_len = PAGE_SIZE - rd_offset;
+ sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+ read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
- if ((sg_d[i].length - dst_offset) <
- (sg_s[j].length - src_offset)) {
- length = (sg_d[i].length - dst_offset);
-
- pr_debug("Step 1 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
- sg_s[j].length);
- pr_debug("Step 1 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i++]) + dst_offset;
- BUG_ON(!dst);
-
- src = sg_virt(&sg_s[j]) + src_offset;
- BUG_ON(!src);
-
- dst_offset = 0;
- src_offset = length;
- page_end = 0;
- } else {
- length = (sg_s[j].length - src_offset);
-
- pr_debug("Step 2 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset,
- j, sg_s[j].length);
- pr_debug("Step 2 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i]) + dst_offset;
- BUG_ON(!dst);
-
- if (sg_d[i].length == length) {
- i++;
- dst_offset = 0;
- } else
- dst_offset = length;
-
- src = sg_virt(&sg_s[j++]) + src_offset;
- BUG_ON(!src);
-
- src_offset = 0;
- page_end = 1;
- }
+ u32 len;
+ void *rd_addr;
- memcpy(dst, src, length);
+ sg_miter_next(&m);
+ len = min((u32)m.length, src_len);
+ m.consumed = len;
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
+ rd_addr = sg_virt(rd_sg) + rd_offset;
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
+ if (read_rd)
+ memcpy(m.addr, rd_addr, len);
+ else
+ memcpy(rd_addr, m.addr, len);
- if (!page_end)
+ req->rd_size -= len;
+ if (!req->rd_size)
continue;
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ src_len -= len;
+ if (src_len) {
+ rd_offset += len;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- sg_s = &table->sg_table[j = 0];
- }
-
- return 0;
-}
-
-/* rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
- struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
- u32 rd_offset = req->rd_offset;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
- sg_s = task->task_sg;
-
- pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
- " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- dst_offset = rd_offset;
-
- while (req->rd_size) {
- if ((sg_s[i].length - src_offset) <
- (sg_d[j].length - dst_offset)) {
- length = (sg_s[i].length - src_offset);
-
- pr_debug("Step 1 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 1 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i++]) + src_offset;
- BUG_ON(!src);
-
- dst = sg_virt(&sg_d[j]) + dst_offset;
- BUG_ON(!dst);
-
- src_offset = 0;
- dst_offset = length;
- page_end = 0;
- } else {
- length = (sg_d[j].length - dst_offset);
-
- pr_debug("Step 2 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 2 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i]) + src_offset;
- BUG_ON(!src);
-
- if (sg_s[i].length == length) {
- i++;
- src_offset = 0;
- } else
- src_offset = length;
-
- dst = sg_virt(&sg_d[j++]) + dst_offset;
- BUG_ON(!dst);
-
- dst_offset = 0;
- page_end = 1;
- }
-
- memcpy(dst, src, length);
-
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
-
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
-
- if (!page_end)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ /* rd page completed, next one please */
+ req->rd_page++;
+ rd_offset = 0;
+ src_len = PAGE_SIZE;
+ if (req->rd_page <= table->page_end_offset) {
+ rd_sg++;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
+ if (!table) {
+ sg_miter_stop(&m);
return -EINVAL;
+ }
- sg_d = &table->sg_table[j = 0];
+ /* since we increment, the first sg entry is correct */
+ rd_sg = table->sg_table;
}
-
+ sg_miter_stop(&m);
return 0;
}
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
- unsigned long long lba;
+ u64 tmp;
int ret;
- req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ req->rd_offset = do_div(tmp, PAGE_SIZE);
+ req->rd_page = tmp;
req->rd_size = task->task_size;
- if (task->task_data_direction == DMA_FROM_DEVICE)
- ret = rd_MEMCPY_read(req);
- else
- ret = rd_MEMCPY_write(req);
-
+ ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 217e29df6297..684522805a1f 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
- /*
- * Signal that the command has failed via cmd->se_cmd_flags,
- */
- transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3400ae6e93f8..0257658e2e3e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -61,7 +61,6 @@
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_cmd_cache = kmem_cache_create("se_cmd_cache",
- sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!se_cmd_cache) {
- pr_err("kmem_cache_create for struct se_cmd failed\n");
- goto out;
- }
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out_free_cmd_cache;
+ goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
- kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
@@ -191,7 +182,6 @@ out:
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_ILLEGAL_REQUEST;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
}
transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd, 1, 1);
+ transport_generic_request_failure(cmd);
}
/* transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, 0,
- (cmd->data_direction != DMA_TO_DEVICE));
- }
+ if (ret < 0)
+ transport_generic_request_failure(cmd);
+
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(
- struct se_cmd *cmd,
- int complete,
- int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state,
- cmd->transport_error_status);
+ cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
- if (complete) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
- switch (cmd->transport_error_status) {
- case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- break;
- case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- break;
- case PYX_TRANSPORT_INVALID_CDB_FIELD:
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- break;
- case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- break;
- case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
- if (!sc)
- transport_new_cmd_failure(cmd);
- /*
- * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
- * we force this session to fall back to session
- * recovery.
- */
- cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
- cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
- goto check_stop;
- case PYX_TRANSPORT_LU_COMM_FAILURE:
- case PYX_TRANSPORT_ILLEGAL_REQUEST:
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
- case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- break;
- case PYX_TRANSPORT_WRITE_PROTECTED:
- cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ switch (cmd->scsi_sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_INVALID_CDB_FIELD:
+ case TCM_INVALID_PARAMETER_LIST:
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
break;
- case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
- case PYX_TRANSPORT_USE_SENSE_REASON:
- /*
- * struct se_cmd->scsi_sense_reason already set
- */
- break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0],
- cmd->transport_error_status);
+ cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
- if (!sc && !cmd->se_tfo->new_cmd_map)
- transport_new_cmd_failure(cmd);
- else {
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- }
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, 0, 1);
+ if (se_dev_check_online(cmd->se_dev) != 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ transport_generic_request_failure(cmd);
return 0;
}
@@ -2163,14 +2102,13 @@ check_depth:
else
error = dev->transport->do_task(task);
if (error != 0) {
- cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
+ transport_generic_request_failure(cmd);
}
goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
return 0;
}
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Any unsolicited data will get dumped for failed command inside of
- * the fabric plugin
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
return -1;
}
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- /*
- * For UA Interlock Code 11b, a RESERVATION CONFLICT will
- * establish a UNIT ATTENTION with PREVIOUS RESERVATION
- * CONFLICT STATUS.
- *
- * See spc4r17, section 7.4.6 Control Mode Page, Table 349
- */
- if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -EINVAL;
-}
-
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0)
- return transport_handle_reservation_conflict(cmd);
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- return ret;
+ goto out_fail;
}
/*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
else if (ret < 0)
return ret;
- return PYX_TRANSPORT_WRITE_PENDING;
+ return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- transport_new_cmd_failure(cmd);
- return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- set_user_nice(current, -20);
-
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
+ break;
}
break;
case TRANSPORT_PROCESS_WRITE:
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 4fac37c4c615..71fc9cea5dc9 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 5f770412ca40..9402b7387cac 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- pr_debug("del lport %s\n",
- config_item_name(&wwn->wwn_group.cg_item));
+ pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e8c564a53346..a8078d0638fa 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1458,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 4730016d7cd4..45f422ac103f 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
u32 tmp;
if (!driver || !bind || !driver->setup
- || driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index c39d58860fa0..1a6f415c0d02 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2975,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
fsg_common_put(common);
usb_free_descriptors(fsg->function.descriptors);
usb_free_descriptors(fsg->function.hs_descriptors);
+ usb_free_descriptors(fsg->function.ss_descriptors);
kfree(fsg);
}
diff --git a/drivers/usb/gadget/f_serial.c b/drivers/usb/gadget/f_serial.c
index 91fdf790ed20..cf33a8d0fd5d 100644
--- a/drivers/usb/gadget/f_serial.c
+++ b/drivers/usb/gadget/f_serial.c
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
}
if (!gser->port.in->desc || !gser->port.out->desc) {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
- !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+ config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
gser->port.in->desc = NULL;
gser->port.out->desc = NULL;
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index 43a49ecc1f36..dcbc0a2e48dd 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/hardware.h>
@@ -88,7 +89,6 @@ eenahb:
void fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
if (cpu_is_mx35()) {
unsigned int v;
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
USBPHYCTRL_OTGBASE_OFFSET));
}
}
-#endif
/* ULPI transceivers don't need usbpll */
if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 2a03e4de11c1..e00cf92409ce 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index b3b3d83b7c33..dd28ef3def71 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
kfree(req);
}
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+ struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+ /* Clear active and halt bit */
+ qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+ | EP_QUEUE_HEAD_STATUS_HALT));
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime endpoint by writing correct bit to ENDPTPRIME */
+ fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
- int i = ep_index(ep) * 2 + ep_is_in(ep);
u32 temp, bitmask, tmp_stat;
- struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
- goto out;
+ return;
do {
/* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
if (tmp_stat)
- goto out;
+ return;
}
- /* Write dQH next pointer and terminate bit to 0 */
- temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
- /* Clear active and halt bit */
- temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
- | EP_QUEUE_HEAD_STATUS_HALT));
- dQH->size_ioc_int_sts &= temp;
-
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
-
- /* Prime endpoint by writing 1 to ENDPTPRIME */
- temp = ep_is_in(ep)
- ? (1 << (ep_index(ep) + 16))
- : (1 << (ep_index(ep)));
- fsl_writel(temp, &dr_regs->endpointprime);
-out:
- return;
+ fsl_prime_ep(ep, req->head);
}
/* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
VDBG("%s, bad ep", __func__);
return -EINVAL;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct ep_queue_head *qh;
struct fsl_req *next_req;
- qh = ep->qh;
next_req = list_entry(req->queue.next, struct fsl_req,
queue);
- /* Point the QH to the first TD of next request */
- fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ /* prime with dTD of next request */
+ fsl_prime_ep(ep, next_req->head);
}
-
- /* The request hasn't been processed, patch up the TD chain */
+ /* The request hasn't been processed, patch up the TD chain */
} else {
struct fsl_req *prev_req;
prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
- fsl_writel(fsl_readl(&req->tail->next_td_ptr),
- &prev_req->tail->next_td_ptr);
-
+ prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
}
done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
goto out;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
status = -EOPNOTSUPP;
goto out;
}
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
struct fsl_udc *udc;
int size = 0;
u32 bitmask;
- struct ep_queue_head *d_qh;
+ struct ep_queue_head *qh;
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+ qh = get_qh_by_ep(ep);
bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
(1 << (ep_index(ep)));
if (fsl_readl(&dr_regs->endptstatus) & bitmask)
- size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+ size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
pr_debug("%s %u\n", __func__, size);
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 1d51be83fda8..f781f5dec417 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+ /* we only have one ep0 structure but two queue heads */
+ if (ep_index(ep) != 0)
+ return ep->qh;
+ else
+ return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+ USB_DIR_IN) ? 1 : 0];
+}
+
struct platform_device;
#ifdef CONFIG_ARCH_MXC
int fsl_udc_clk_init(struct platform_device *pdev);
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 91d0af2a24a8..9aa1cbbee45b 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !bind
|| !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 7f1bc9a73cda..da2b9d0be3ca 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver || driver->speed != USB_SPEED_HIGH
+ if (!driver || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 24f84b210ce1..fc719a3f8557 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a552453dc946..b31448229f0b 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
return -EINVAL;
}
- if (driver->speed != USB_SPEED_HIGH &&
- driver->speed != USB_SPEED_FULL) {
+ if (driver->speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
- }
if (!bind || !driver->setup) {
dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 8d54f893cefe..20a553b46aed 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
int ret;
if (!driver
- || (driver->speed != USB_SPEED_FULL &&
- driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_FULL
|| !bind
|| !driver->unbind || !driver->disconnect || !driver->setup)
return -EINVAL;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 56a32033adb3..a60679cbbf85 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1475,6 +1475,7 @@ iso_stream_schedule (
* jump until after the queue is primed.
*/
else {
+ int done = 0;
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -1492,18 +1493,18 @@ iso_stream_schedule (
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
stream->usecs, period))
- break;
+ done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, mod, stream,
start, sched, period))
- break;
+ done = 1;
}
- } while (start > next);
+ } while (start > next && !done);
/* no room in the schedule */
- if (start == next) {
+ if (!done) {
ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
urb, now, now + mod);
status = -ENOSPC;
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index d6e175428618..a403b53e86b9 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index aa94c0195791..a1afb7c39f7e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c1fa12ec7a9a..b63ab1570103 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev)
*/
}
- musb_save_context(musb);
-
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume_noirq(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
-
- musb_restore_context(musb);
-
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index d51043acfe1a..922148ff8d29 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
unsigned long flags;
int retval = -EINVAL;
- if (driver->speed != USB_SPEED_HIGH)
+ if (driver->speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 053f86d70009..ad96a3896729 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
if (mod->irq_attch)
intenb1 |= ATTCHE;
- if (mod->irq_attch)
+ if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index d9717e0bc1ff..7f4e80338570 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
- int ret;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->speed < USB_SPEED_FULL)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
/* first hook up the driver ... */
gpriv->driver = driver;
gpriv->gadget.dev.driver = &driver->driver;
- ret = device_add(&gpriv->gadget.dev);
- if (ret) {
- dev_err(dev, "device_add error %d\n", ret);
- goto add_fail;
- }
-
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
- gpriv->driver = NULL;
- gpriv->gadget.dev.driver = NULL;
-
- return ret;
}
static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->unbind)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
- device_del(&gpriv->gadget.dev);
+ gpriv->gadget.dev.driver = NULL;
gpriv->driver = NULL;
return 0;
@@ -827,6 +806,13 @@ static int usbhsg_start(struct usbhs_priv *priv)
static int usbhsg_stop(struct usbhs_priv *priv)
{
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ /* cable disconnect */
+ if (gpriv->driver &&
+ gpriv->driver->disconnect)
+ gpriv->driver->disconnect(&gpriv->gadget);
+
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
@@ -876,12 +862,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
/*
* init gadget
*/
- device_initialize(&gpriv->gadget.dev);
dev_set_name(&gpriv->gadget.dev, "gadget");
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
gpriv->gadget.is_dualspeed = 1;
+ ret = device_register(&gpriv->gadget.dev);
+ if (ret < 0)
+ goto err_add_udc;
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
@@ -912,12 +900,15 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
- goto err_add_udc;
+ goto err_register;
dev_info(dev, "gadget probed\n");
return 0;
+
+err_register:
+ device_unregister(&gpriv->gadget.dev);
err_add_udc:
kfree(gpriv->uep);
@@ -933,6 +924,8 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
usb_del_gadget_udc(&gpriv->gadget);
+ device_unregister(&gpriv->gadget.dev);
+
usbhsg_controller_unregister(gpriv);
kfree(gpriv->uep);
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index bade761a1e52..7955de589951 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -1267,6 +1267,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
+ hcd->has_tt = 1; /* for low/full speed */
pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
if (!pipe_info) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index bd4298bb6750..ff3db5d056a5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 571fa96b49c7..055b64ef0bba 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -112,6 +112,7 @@
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
/* Lenz LI-USB Computer Interface. */
#define FTDI_LENZ_LIUSB_PID 0xD780
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d865878c9f97..6dd64534fad0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -661,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -747,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 3041a974faf3..24caba79d722 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
+ "Kingston",
+ "DT 101 G2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG ),
+
/* Reported by Francesco Foresti <frafore@tiscali.it> */
UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
"Super Top",
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8e964b91c447..284798aaf8b1 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -166,7 +166,7 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- xen_io_tlb_start = alloc_bootmem(bytes);
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
if (!xen_io_tlb_start) {
m = "Cannot allocate Xen-SWIOTLB buffer!\n";
goto error;
@@ -179,7 +179,7 @@ retry:
bytes,
xen_io_tlb_nslabs);
if (rc) {
- free_bootmem(__pa(xen_io_tlb_start), bytes);
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
m = "Failed to get contiguous memory for DMA from Xen!\n"\
"You either: don't have the permissions, do not have"\
" enough free memory under 4GB, or the hypervisor memory"\