diff options
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/acard-ahci.c | 4 | ||||
-rw-r--r-- | drivers/ata/ahci.c | 7 | ||||
-rw-r--r-- | drivers/ata/ahci.h | 10 | ||||
-rw-r--r-- | drivers/ata/ahci_mvebu.c | 56 | ||||
-rw-r--r-- | drivers/ata/ahci_qoriq.c | 2 | ||||
-rw-r--r-- | drivers/ata/ahci_xgene.c | 4 | ||||
-rw-r--r-- | drivers/ata/libahci.c | 28 | ||||
-rw-r--r-- | drivers/ata/libahci_platform.c | 24 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 71 | ||||
-rw-r--r-- | drivers/ata/libata-eh.c | 111 | ||||
-rw-r--r-- | drivers/ata/libata-scsi.c | 31 | ||||
-rw-r--r-- | drivers/ata/libata-zpodd.c | 2 | ||||
-rw-r--r-- | drivers/ata/pata_hpt37x.c | 13 | ||||
-rw-r--r-- | drivers/ata/sata_dwc_460ex.c | 14 | ||||
-rw-r--r-- | drivers/ata/sata_fsl.c | 10 | ||||
-rw-r--r-- | drivers/ata/sata_highbank.c | 2 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 26 | ||||
-rw-r--r-- | drivers/ata/sata_nv.c | 98 | ||||
-rw-r--r-- | drivers/ata/sata_sil24.c | 10 |
19 files changed, 279 insertions, 244 deletions
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c index 940ddbc59aa7..583e366be7e2 100644 --- a/drivers/ata/acard-ahci.c +++ b/drivers/ata/acard-ahci.c @@ -271,7 +271,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) * Fill in command table information. First, the header, * a SATA Register - Host to Device command FIS. */ - cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; + cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ; ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); if (is_atapi) { @@ -294,7 +294,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) if (is_atapi) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; - ahci_fill_cmd_slot(pp, qc->tag, opts); + ahci_fill_cmd_slot(pp, qc->hw_tag, opts); } static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1ff17799769d..738fb22978dd 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -334,6 +334,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9c07), board_ahci_mobile }, /* Lynx LP RAID */ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci_mobile }, /* Lynx LP RAID */ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci_mobile }, /* Lynx LP RAID */ + { PCI_VDEVICE(INTEL, 0x9dd3), board_ahci_mobile }, /* Cannon Lake PCH-LP AHCI */ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */ @@ -698,7 +699,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context), deadline, &online, NULL); @@ -724,7 +725,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, bool online; int rc; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -788,7 +789,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); for (i = 0; i < 2; i++) { u16 val; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 4356ef1d28a8..1609ebab4e23 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -350,7 +350,6 @@ struct ahci_host_priv { u32 em_msg_type; /* EM message type */ bool got_runtime_pm; /* Did we do pm_runtime_get? */ struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ - struct reset_control *rsts; /* Optional */ struct regulator **target_pwrs; /* Optional */ /* * If platform uses PHYs. There is a 1:1 relation between the port number and @@ -366,6 +365,13 @@ struct ahci_host_priv { * be overridden anytime before the host is activated. */ void (*start_engine)(struct ata_port *ap); + /* + * Optional ahci_stop_engine override, if not set this gets set to the + * default ahci_stop_engine during ahci_save_initial_config, this can + * be overridden anytime before the host is activated. + */ + int (*stop_engine)(struct ata_port *ap); + irqreturn_t (*irq_handler)(int irq, void *dev_instance); /* only required for per-port MSI(-X) support */ @@ -384,7 +390,7 @@ extern struct device_attribute *ahci_sdev_attrs[]; */ #define AHCI_SHT(drv_name) \ ATA_NCQ_SHT(drv_name), \ - .can_queue = AHCI_MAX_CMDS - 1, \ + .can_queue = AHCI_MAX_CMDS, \ .sg_tablesize = AHCI_MAX_SG, \ .dma_boundary = AHCI_DMA_BOUNDARY, \ .shost_attrs = ahci_shost_attrs, \ diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c index de7128d81e9c..0045dacd814b 100644 --- a/drivers/ata/ahci_mvebu.c +++ b/drivers/ata/ahci_mvebu.c @@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv) writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); } +/** + * ahci_mvebu_stop_engine + * + * @ap: Target ata port + * + * Errata Ref#226 - SATA Disk HOT swap issue when connected through + * Port Multiplier in FIS-based Switching mode. + * + * To avoid the issue, according to design, the bits[11:8, 0] of + * register PxFBS are cleared when Port Command and Status (0x18) bit[0] + * changes its value from 1 to 0, i.e. falling edge of Port + * Command and Status bit[0] sends PULSE that resets PxFBS + * bits[11:8; 0]. + * + * This function is used to override function of "ahci_stop_engine" + * from libahci.c by adding the mvebu work around(WA) to save PxFBS + * value before the PxCMD ST write of 0, then restore PxFBS value. + * + * Return: 0 on success; Error code otherwise. + */ +int ahci_mvebu_stop_engine(struct ata_port *ap) +{ + void __iomem *port_mmio = ahci_port_base(ap); + u32 tmp, port_fbs; + + tmp = readl(port_mmio + PORT_CMD); + + /* check if the HBA is idle */ + if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) + return 0; + + /* save the port PxFBS register for later restore */ + port_fbs = readl(port_mmio + PORT_FBS); + + /* setting HBA to idle */ + tmp &= ~PORT_CMD_START; + writel(tmp, port_mmio + PORT_CMD); + + /* + * bit #15 PxCMD signal doesn't clear PxFBS, + * restore the PxFBS register right after clearing the PxCMD ST, + * no need to wait for the PxCMD bit #15. + */ + writel(port_fbs, port_mmio + PORT_FBS); + + /* wait for engine to stop. This could be as long as 500 msec */ + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); + if (tmp & PORT_CMD_LIST_ON) + return -EIO; + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state) { @@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev) if (rc) return rc; + hpriv->stop_engine = ahci_mvebu_stop_engine; + if (of_device_is_compatible(pdev->dev.of_node, "marvell,armada-380-ahci")) { dram = mv_mbus_dram_info(); diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 2685f28160f7..cfdef4d44ae9 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -96,7 +96,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* * There is a errata on ls1021a Rev1.0 and Rev2.0 which is: diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index c2b5941d9184..ad58da7c9aff 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) PORT_CMD_ISSUE, 0x0, 1, 100)) return -EBUSY; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); ahci_start_fis_rx(ap); /* @@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class, portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR); portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); rc = xgene_ahci_do_hardreset(link, deadline, &online); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 7adcf3caabd0..965842a08743 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) if (!hpriv->start_engine) hpriv->start_engine = ahci_start_engine; + if (!hpriv->stop_engine) + hpriv->stop_engine = ahci_stop_engine; + if (!hpriv->irq_handler) hpriv->irq_handler = ahci_single_level_irq_intr; } @@ -897,9 +900,10 @@ static void ahci_start_port(struct ata_port *ap) static int ahci_deinit_port(struct ata_port *ap, const char **emsg) { int rc; + struct ahci_host_priv *hpriv = ap->host->private_data; /* disable DMA */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) { *emsg = "failed to stop engine"; return rc; @@ -1310,7 +1314,7 @@ int ahci_kick_engine(struct ata_port *ap) int busy, rc; /* stop engine */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) goto out_restart; @@ -1549,7 +1553,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("ENTER\n"); - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); @@ -1645,7 +1649,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) * Fill in command table information. First, the header, * a SATA Register - Host to Device command FIS. */ - cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; + cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ; ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); if (is_atapi) { @@ -1666,7 +1670,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) if (is_atapi) opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; - ahci_fill_cmd_slot(pp, qc->tag, opts); + ahci_fill_cmd_slot(pp, qc->hw_tag, opts); } static void ahci_fbs_dec_intr(struct ata_port *ap) @@ -2002,7 +2006,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) pp->active_link = qc->dev->link; if (ata_is_ncq(qc->tf.protocol)) - writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); + writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT); if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { u32 fbs = readl(port_mmio + PORT_FBS); @@ -2012,7 +2016,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) pp->fbs_last_dev = qc->dev->link->pmp; } - writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); + writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE); ahci_sw_activity(qc->dev->link); @@ -2075,14 +2079,14 @@ void ahci_error_handler(struct ata_port *ap) if (!(ap->pflags & ATA_PFLAG_FROZEN)) { /* restart engine */ - ahci_stop_engine(ap); + hpriv->stop_engine(ap); hpriv->start_engine(ap); } sata_pmp_error_handler(ap); if (!ata_dev_enabled(ap->link.device)) - ahci_stop_engine(ap); + hpriv->stop_engine(ap); } EXPORT_SYMBOL_GPL(ahci_error_handler); @@ -2129,7 +2133,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) return; /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */ - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2189,7 +2193,7 @@ static void ahci_enable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; @@ -2222,7 +2226,7 @@ static void ahci_disable_fbs(struct ata_port *ap) return; } - rc = ahci_stop_engine(ap); + rc = hpriv->stop_engine(ap); if (rc) return; diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 46a762442dc5..30cc8f1a31e1 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -25,7 +25,6 @@ #include <linux/phy/phy.h> #include <linux/pm_runtime.h> #include <linux/of_platform.h> -#include <linux/reset.h> #include "ahci.h" static void ahci_host_stop(struct ata_host *host); @@ -196,8 +195,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators); * following order: * 1) Regulator * 2) Clocks (through ahci_platform_enable_clks) - * 3) Resets - * 4) Phys + * 3) Phys * * If resource enabling fails at any point the previous enabled resources * are disabled in reverse order. @@ -217,19 +215,12 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv) if (rc) goto disable_regulator; - rc = reset_control_deassert(hpriv->rsts); - if (rc) - goto disable_clks; - rc = ahci_platform_enable_phys(hpriv); if (rc) - goto disable_resets; + goto disable_clks; return 0; -disable_resets: - reset_control_assert(hpriv->rsts); - disable_clks: ahci_platform_disable_clks(hpriv); @@ -248,15 +239,12 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources); * following order: * 1) Phys * 2) Clocks (through ahci_platform_disable_clks) - * 3) Resets - * 4) Regulator + * 3) Regulator */ void ahci_platform_disable_resources(struct ahci_host_priv *hpriv) { ahci_platform_disable_phys(hpriv); - reset_control_assert(hpriv->rsts); - ahci_platform_disable_clks(hpriv); ahci_platform_disable_regulators(hpriv); @@ -405,12 +393,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev) hpriv->clks[i] = clk; } - hpriv->rsts = devm_reset_control_array_get_optional_shared(dev); - if (IS_ERR(hpriv->rsts)) { - rc = PTR_ERR(hpriv->rsts); - goto err_out; - } - hpriv->nports = child_nodes = of_get_child_count(dev->of_node); /* diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 8bc71ca61e7f..c41b9eeabe7c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -759,7 +759,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->flags |= tf_flags; - if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { + if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) { /* yay, NCQ */ if (!lba_48_ok(block, n_block)) return -ERANGE; @@ -1570,8 +1570,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, u8 command = tf->command; int auto_timeout = 0; struct ata_queued_cmd *qc; - unsigned int tag, preempted_tag; - u32 preempted_sactive, preempted_qc_active; + unsigned int preempted_tag; + u32 preempted_sactive; + u64 preempted_qc_active; int preempted_nr_active_links; DECLARE_COMPLETION_ONSTACK(wait); unsigned long flags; @@ -1587,20 +1588,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, } /* initialize internal qc */ + qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL); - /* XXX: Tag 0 is used for drivers with legacy EH as some - * drivers choke if any other tag is given. This breaks - * ata_tag_internal() test for those drivers. Don't use new - * EH stuff without converting to it. - */ - if (ap->ops->error_handler) - tag = ATA_TAG_INTERNAL; - else - tag = 0; - - qc = __ata_qc_from_tag(ap, tag); - - qc->tag = tag; + qc->tag = ATA_TAG_INTERNAL; + qc->hw_tag = 0; qc->scsicmd = NULL; qc->ap = ap; qc->dev = dev; @@ -2295,7 +2286,7 @@ static int ata_dev_config_ncq(struct ata_device *dev, return 0; } if (ap->flags & ATA_FLAG_NCQ) { - hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); + hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE); dev->flags |= ATA_DFLAG_NCQ; } @@ -3573,9 +3564,11 @@ static int ata_dev_set_mode(struct ata_device *dev) DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", dev->xfer_shift, (int)dev->xfer_mode); - ata_dev_info(dev, "configured for %s%s\n", - ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), - dev_err_whine); + if (!(ehc->i.flags & ATA_EHI_QUIET) || + ehc->i.flags & ATA_EHI_DID_HARDRESET) + ata_dev_info(dev, "configured for %s%s\n", + ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), + dev_err_whine); return 0; @@ -4493,6 +4486,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on + SD7SN6S256G and SD8SN8U256G */ + { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, }, + /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, @@ -4549,7 +4546,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM | ATA_HORKAGE_NOLPM, }, + /* These specific Samsung models/firmware-revs do not handle LPM well */ + { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, + { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, + /* devices that don't properly handle queued TRIM commands */ + { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | @@ -5123,7 +5126,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) } qc = __ata_qc_from_tag(ap, tag); - qc->tag = tag; + qc->tag = qc->hw_tag = tag; qc->scsicmd = NULL; qc->ap = ap; qc->dev = dev; @@ -5153,7 +5156,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) qc->flags = 0; tag = qc->tag; - if (likely(ata_tag_valid(tag))) { + if (ata_tag_valid(tag)) { qc->tag = ATA_TAG_POISON; if (ap->flags & ATA_FLAG_SAS_HOST) ata_sas_free_tag(tag, ap); @@ -5175,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) /* command should be marked inactive atomically with qc completion */ if (ata_is_ncq(qc->tf.protocol)) { - link->sactive &= ~(1 << qc->tag); + link->sactive &= ~(1 << qc->hw_tag); if (!link->sactive) ap->nr_active_links--; } else { @@ -5193,7 +5196,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) * is called. (when rc != 0 and atapi request sense is needed) */ qc->flags &= ~ATA_QCFLAG_ACTIVE; - ap->qc_active &= ~(1 << qc->tag); + ap->qc_active &= ~(1ULL << qc->tag); /* call completion callback */ qc->complete_fn(qc); @@ -5350,29 +5353,29 @@ void ata_qc_complete(struct ata_queued_cmd *qc) * RETURNS: * Number of completed commands on success, -errno otherwise. */ -int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) +int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) { int nr_done = 0; - u32 done_mask; + u64 done_mask; done_mask = ap->qc_active ^ qc_active; if (unlikely(done_mask & qc_active)) { - ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", + ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", ap->qc_active, qc_active); return -EINVAL; } while (done_mask) { struct ata_queued_cmd *qc; - unsigned int tag = __ffs(done_mask); + unsigned int tag = __ffs64(done_mask); qc = ata_qc_from_tag(ap, tag); if (qc) { ata_qc_complete(qc); nr_done++; } - done_mask &= ~(1 << tag); + done_mask &= ~(1ULL << tag); } return nr_done; @@ -5403,11 +5406,11 @@ void ata_qc_issue(struct ata_queued_cmd *qc) WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); if (ata_is_ncq(prot)) { - WARN_ON_ONCE(link->sactive & (1 << qc->tag)); + WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag)); if (!link->sactive) ap->nr_active_links++; - link->sactive |= 1 << qc->tag; + link->sactive |= 1 << qc->hw_tag; } else { WARN_ON_ONCE(link->sactive); @@ -5416,7 +5419,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) } qc->flags |= ATA_QCFLAG_ACTIVE; - ap->qc_active |= 1 << qc->tag; + ap->qc_active |= 1ULL << qc->tag; /* * We guarantee to LLDs that they will have at least one @@ -6415,7 +6418,7 @@ void ata_host_init(struct ata_host *host, struct device *dev, { spin_lock_init(&host->lock); mutex_init(&host->eh_mutex); - host->n_tags = ATA_MAX_QUEUE - 1; + host->n_tags = ATA_MAX_QUEUE; host->dev = dev; host->ops = ops; } @@ -6497,7 +6500,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) { int i, rc; - host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); + host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE); /* host must have been started */ if (!(host->flags & ATA_HOST_STARTED)) { diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c016829a38fd..d5412145d76d 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap) { } #endif /* CONFIG_PM */ -static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt, - va_list args) +static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, + const char *fmt, va_list args) { ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len, ATA_EH_DESC_LEN - ehi->desc_len, @@ -500,57 +500,6 @@ void ata_eh_release(struct ata_port *ap) mutex_unlock(&ap->host->eh_mutex); } -/** - * ata_scsi_timed_out - SCSI layer time out callback - * @cmd: timed out SCSI command - * - * Handles SCSI layer timeout. We race with normal completion of - * the qc for @cmd. If the qc is already gone, we lose and let - * the scsi command finish (EH_HANDLED). Otherwise, the qc has - * timed out and EH should be invoked. Prevent ata_qc_complete() - * from finishing it by setting EH_SCHEDULED and return - * EH_NOT_HANDLED. - * - * TODO: kill this function once old EH is gone. - * - * LOCKING: - * Called from timer context - * - * RETURNS: - * EH_HANDLED or EH_NOT_HANDLED - */ -enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd) -{ - struct Scsi_Host *host = cmd->device->host; - struct ata_port *ap = ata_shost_to_port(host); - unsigned long flags; - struct ata_queued_cmd *qc; - enum blk_eh_timer_return ret; - - DPRINTK("ENTER\n"); - - if (ap->ops->error_handler) { - ret = BLK_EH_NOT_HANDLED; - goto out; - } - - ret = BLK_EH_HANDLED; - spin_lock_irqsave(ap->lock, flags); - qc = ata_qc_from_tag(ap, ap->link.active_tag); - if (qc) { - WARN_ON(qc->scsicmd != cmd); - qc->flags |= ATA_QCFLAG_EH_SCHEDULED; - qc->err_mask |= AC_ERR_TIMEOUT; - ret = BLK_EH_NOT_HANDLED; - } - spin_unlock_irqrestore(ap->lock, flags); - - out: - DPRINTK("EXIT, ret=%d\n", ret); - return ret; -} -EXPORT_SYMBOL(ata_scsi_timed_out); - static void ata_eh_unload(struct ata_port *ap) { struct ata_link *link; @@ -873,9 +822,12 @@ static int ata_eh_nr_in_flight(struct ata_port *ap) int nr = 0; /* count only non-internal commands */ - for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) + for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { + if (ata_tag_internal(tag)) + continue; if (ata_qc_from_tag(ap, tag)) nr++; + } return nr; } @@ -900,7 +852,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) /* No progress during the last interval, tag all * in-flight qcs as timed out and freeze the port. */ - for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { + for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc) qc->err_mask |= AC_ERR_TIMEOUT; @@ -1054,7 +1006,8 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) /* we're gonna abort all commands, no need for fast drain */ ata_eh_set_pending(ap, 0); - for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { + /* include internal tag in iteration */ + for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); if (qc && (!link || qc->dev->link == link)) { @@ -1483,6 +1436,10 @@ static const char *ata_err_string(unsigned int err_mask) return "invalid argument"; if (err_mask & AC_ERR_DEV) return "device error"; + if (err_mask & AC_ERR_NCQ) + return "NCQ error"; + if (err_mask & AC_ERR_NODEV_HINT) + return "Polling detection error"; return "unknown error"; } @@ -1866,10 +1823,10 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, if (qc->flags & ATA_QCFLAG_SENSE_VALID) { int ret = scsi_check_sense(qc->scsicmd); /* - * SUCCESS here means that the sense code could + * SUCCESS here means that the sense code could be * evaluated and should be passed to the upper layers * for correct evaluation. - * FAILED means the sense code could not interpreted + * FAILED means the sense code could not be interpreted * and the device would need to be reset. * NEEDS_RETRY and ADD_TO_MLQUEUE means that the * command would need to be retried. @@ -2150,6 +2107,21 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) } /** + * ata_eh_quiet - check if we need to be quiet about a command error + * @qc: qc to check + * + * Look at the qc flags anbd its scsi command request flags to determine + * if we need to be quiet about the command failure. + */ +static inline bool ata_eh_quiet(struct ata_queued_cmd *qc) +{ + if (qc->scsicmd && + qc->scsicmd->request->rq_flags & RQF_QUIET) + qc->flags |= ATA_QCFLAG_QUIET; + return qc->flags & ATA_QCFLAG_QUIET; +} + +/** * ata_eh_link_autopsy - analyze error and determine recovery action * @link: host link to perform autopsy on * @@ -2166,7 +2138,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev; unsigned int all_err_mask = 0, eflags = 0; - int tag; + int tag, nr_failed = 0, nr_quiet = 0; u32 serror; int rc; @@ -2218,12 +2190,16 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (qc->err_mask & ~AC_ERR_OTHER) qc->err_mask &= ~AC_ERR_OTHER; - /* SENSE_VALID trumps dev/unknown error and revalidation */ + /* + * SENSE_VALID trumps dev/unknown error and revalidation. Upper + * layers will determine whether the command is worth retrying + * based on the sense data and device class/type. Otherwise, + * determine directly if the command is worth retrying using its + * error mask and flags. + */ if (qc->flags & ATA_QCFLAG_SENSE_VALID) qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); - - /* determine whether the command is worth retrying */ - if (ata_eh_worth_retry(qc)) + else if (ata_eh_worth_retry(qc)) qc->flags |= ATA_QCFLAG_RETRY; /* accumulate error info */ @@ -2232,8 +2208,17 @@ static void ata_eh_link_autopsy(struct ata_link *link) if (qc->flags & ATA_QCFLAG_IO) eflags |= ATA_EFLAG_IS_IO; trace_ata_eh_link_autopsy_qc(qc); + + /* Count quiet errors */ + if (ata_eh_quiet(qc)) + nr_quiet++; + nr_failed++; } + /* If all failed commands requested silence, then be quiet */ + if (nr_quiet == nr_failed) + ehc->i.flags |= ATA_EHI_QUIET; + /* enforce default EH actions */ if (ap->pflags & ATA_PFLAG_FROZEN || all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 89a9d4a2efc8..6a91d04351d9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -872,6 +872,9 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, qc->sg = scsi_sglist(cmd); qc->n_elem = scsi_sg_count(cmd); + + if (cmd->request->rq_flags & RQF_QUIET) + qc->flags |= ATA_QCFLAG_QUIET; } else { cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); cmd->scsi_done(cmd); @@ -1316,7 +1319,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, int depth; depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); - depth = min(ATA_MAX_QUEUE - 1, depth); + depth = min(ATA_MAX_QUEUE, depth); scsi_change_queue_depth(sdev, depth); } @@ -1429,7 +1432,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, /* limit and apply queue depth */ queue_depth = min(queue_depth, sdev->host->can_queue); queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); - queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1); + queue_depth = min(queue_depth, ATA_MAX_QUEUE); if (sdev->queue_depth == queue_depth) return -EINVAL; @@ -1895,7 +1898,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) qc->nbytes = n_block * scmd->device->sector_size; rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, - qc->tag, class); + qc->hw_tag, class); if (likely(rc == 0)) return 0; @@ -3233,7 +3236,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) /* For NCQ commands copy the tag value */ if (ata_is_ncq(tf->protocol)) - tf->nsect = qc->tag << 3; + tf->nsect = qc->hw_tag << 3; /* enforce correct master/slave bit */ tf->device = dev->devno ? @@ -3513,7 +3516,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) tf->protocol = ATA_PROT_NCQ; tf->command = ATA_CMD_FPDMA_SEND; tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; - tf->nsect = qc->tag << 3; + tf->nsect = qc->hw_tag << 3; tf->hob_feature = (size / 512) >> 8; tf->feature = size / 512; @@ -3733,7 +3736,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc) tf->protocol = ATA_PROT_NCQ; tf->command = ATA_CMD_FPDMA_RECV; tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; - tf->nsect = qc->tag << 3; + tf->nsect = qc->hw_tag << 3; tf->feature = sect & 0xff; tf->hob_feature = (sect >> 8) & 0xff; tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); @@ -3812,7 +3815,7 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) tf->protocol = ATA_PROT_NCQ_NODATA; tf->command = ATA_CMD_NCQ_NON_DATA; tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; - tf->nsect = qc->tag << 3; + tf->nsect = qc->hw_tag << 3; tf->auxiliary = sa | ((u16)all << 8); } else { tf->protocol = ATA_PROT_NODATA; @@ -5051,6 +5054,18 @@ int ata_sas_port_init(struct ata_port *ap) } EXPORT_SYMBOL_GPL(ata_sas_port_init); +int ata_sas_tport_add(struct device *parent, struct ata_port *ap) +{ + return ata_tport_add(parent, ap); +} +EXPORT_SYMBOL_GPL(ata_sas_tport_add); + +void ata_sas_tport_delete(struct ata_port *ap) +{ + ata_tport_delete(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_tport_delete); + /** * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc * @ap: SATA port to destroy @@ -5117,7 +5132,7 @@ int ata_sas_allocate_tag(struct ata_port *ap) tag = tag < max_queue ? tag : 0; /* the last tag is reserved for internal command. */ - if (tag == ATA_TAG_INTERNAL) + if (ata_tag_internal(tag)) continue; if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) { diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c index de4ddd0e8550..b3ed8f9953a8 100644 --- a/drivers/ata/libata-zpodd.c +++ b/drivers/ata/libata-zpodd.c @@ -35,7 +35,7 @@ struct zpodd { static int eject_tray(struct ata_device *dev) { struct ata_taskfile tf; - static const char cdb[] = { GPCMD_START_STOP_UNIT, + static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT, 0, 0, 0, 0x02, /* LoEj */ 0, 0, 0, 0, 0, 0, 0, diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 3ba843f5cdc0..ef8aaeb0c575 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -224,17 +224,14 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char * const list[]) { unsigned char model_num[ATA_ID_PROD_LEN + 1]; - int i = 0; + int i; ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); - while (list[i] != NULL) { - if (!strcmp(list[i], model_num)) { - pr_warn("%s is not supported for %s\n", - modestr, list[i]); - return 1; - } - i++; + i = match_string(list, -1, model_num); + if (i >= 0) { + pr_warn("%s is not supported for %s\n", modestr, list[i]); + return 1; } return 0; } diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index ce3d6674ef80..6f142aa54f5f 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -761,7 +761,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) if (tag > 0) { dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", - __func__, qc->tag, qc->tf.command, + __func__, qc->hw_tag, qc->tf.command, get_dma_dir_descript(qc->dma_dir), get_prot_descript(qc->tf.protocol), sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); @@ -789,7 +789,7 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, { u8 status = 0; u32 mask = 0x0; - u8 tag = qc->tag; + u8 tag = qc->hw_tag; struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); hsdev->sactive_queued = 0; @@ -997,7 +997,7 @@ static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) { - u8 tag = qc->tag; + u8 tag = qc->hw_tag; if (ata_is_ncq(qc->tf.protocol)) { dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", @@ -1059,7 +1059,7 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) { - u8 tag = qc->tag; + u8 tag = qc->hw_tag; if (ata_is_ncq(qc->tf.protocol)) { dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", @@ -1074,17 +1074,17 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) { u32 sactive; - u8 tag = qc->tag; + u8 tag = qc->hw_tag; struct ata_port *ap = qc->ap; struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); #ifdef DEBUG_NCQ - if (qc->tag > 0 || ap->link.sactive > 1) + if (qc->hw_tag > 0 || ap->link.sactive > 1) dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", __func__, ap->print_id, qc->tf.command, ata_get_cmd_descript(qc->tf.command), - qc->tag, get_prot_descript(qc->tf.protocol), + qc->hw_tag, get_prot_descript(qc->tf.protocol), ap->link.active_tag, ap->link.sactive); #endif diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 95bf3abda6f6..b8d9cfc60374 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -519,7 +519,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) struct sata_fsl_port_priv *pp = ap->private_data; struct sata_fsl_host_priv *host_priv = ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; - unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); + unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base); struct command_desc *cd; u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE; u32 num_prde = 0; @@ -566,7 +566,7 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct sata_fsl_host_priv *host_priv = ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; - unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); + unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base); VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n", ioread32(CQ + hcr_base), @@ -595,7 +595,7 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc) struct sata_fsl_port_priv *pp = qc->ap->private_data; struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data; void __iomem *hcr_base = host_priv->hcr_base; - unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); + unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base); struct command_desc *cd; cd = pp->cmdentry + tag; @@ -1266,7 +1266,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) } VPRINTK("Status of all queues :\n"); - VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n", + VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n", done_mask, ioread32(hcr_base + CA), ioread32(hcr_base + CE), @@ -1293,7 +1293,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); return; - } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { + } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) { iowrite32(1, hcr_base + CC); qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index aafb8cc03523..e67815b896fc 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class, int rc; int retry = 100; - ahci_stop_engine(ap); + hpriv->stop_engine(ap); /* clear D2H reception area to properly wait for D2H FIS */ ata_tf_init(link->device, &tf); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 42d4589b43d4..cddf96f6e431 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1802,7 +1802,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) struct mv_sg *mv_sg, *last_sg = NULL; unsigned int si; - mv_sg = pp->sg_tbl[qc->tag]; + mv_sg = pp->sg_tbl[qc->hw_tag]; for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); @@ -1903,9 +1903,9 @@ static void mv_bmdma_setup(struct ata_queued_cmd *qc) writel(0, port_mmio + BMDMA_CMD); /* load PRD table addr. */ - writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, + writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16, port_mmio + BMDMA_PRD_HIGH); - writelfl(pp->sg_tbl_dma[qc->tag], + writelfl(pp->sg_tbl_dma[qc->hw_tag], port_mmio + BMDMA_PRD_LOW); /* issue r/w command */ @@ -2071,17 +2071,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) */ if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; - WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); - flags |= qc->tag << CRQB_TAG_SHIFT; + WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag); + flags |= qc->hw_tag << CRQB_TAG_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; pp->crqb[in_index].sg_addr = - cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); + cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); pp->crqb[in_index].sg_addr_hi = - cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); + cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); cw = &pp->crqb[in_index].ata_cmd[0]; @@ -2164,17 +2164,17 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; - WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); - flags |= qc->tag << CRQB_TAG_SHIFT; - flags |= qc->tag << CRQB_HOSTQ_SHIFT; + WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag); + flags |= qc->hw_tag << CRQB_TAG_SHIFT; + flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; - crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); - crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); + crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); + crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); crqb->flags = cpu_to_le32(flags); crqb->ata_cmd[0] = cpu_to_le32( @@ -2539,7 +2539,7 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) failed_links = hweight16(new_map); ata_port_info(ap, - "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n", + "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n", __func__, pp->delayed_eh_pmp_map, ap->qc_active, failed_links, ap->nr_active_links); diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 8c683ddd0f58..10ae11aa1926 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -400,7 +400,7 @@ static struct scsi_host_template nv_adma_sht = { static struct scsi_host_template nv_swncq_sht = { ATA_NCQ_SHT(DRV_NAME), - .can_queue = ATA_MAX_QUEUE, + .can_queue = ATA_MAX_QUEUE - 1, .sg_tablesize = LIBATA_MAX_PRD, .dma_boundary = ATA_DMA_BOUNDARY, .slave_configure = nv_swncq_slave_config, @@ -740,32 +740,16 @@ static int nv_adma_slave_config(struct scsi_device *sdev) sdev1 = ap->host->ports[1]->link.device[0].sdev; if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { - /** We have to set the DMA mask to 32-bit if either port is in - ATAPI mode, since they are on the same PCI device which is - used for DMA mapping. If we set the mask we also need to set - the bounce limit on both ports to ensure that the block - layer doesn't feed addresses that cause DMA mapping to - choke. If either SCSI device is not allocated yet, it's OK - since that port will discover its correct setting when it - does get allocated. - Note: Setting 32-bit mask should not fail. */ - if (sdev0) - blk_queue_bounce_limit(sdev0->request_queue, - ATA_DMA_MASK); - if (sdev1) - blk_queue_bounce_limit(sdev1->request_queue, - ATA_DMA_MASK); - - dma_set_mask(&pdev->dev, ATA_DMA_MASK); + /* + * We have to set the DMA mask to 32-bit if either port is in + * ATAPI mode, since they are on the same PCI device which is + * used for DMA mapping. If either SCSI device is not allocated + * yet, it's OK since that port will discover its correct + * setting when it does get allocated. + */ + rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); } else { - /** This shouldn't fail as it was set to this value before */ - dma_set_mask(&pdev->dev, pp->adma_dma_mask); - if (sdev0) - blk_queue_bounce_limit(sdev0->request_queue, - pp->adma_dma_mask); - if (sdev1) - blk_queue_bounce_limit(sdev1->request_queue, - pp->adma_dma_mask); + rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask); } blk_queue_segment_boundary(sdev->request_queue, segment_boundary); @@ -1131,12 +1115,11 @@ static int nv_adma_port_start(struct ata_port *ap) VPRINTK("ENTER\n"); - /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and - pad buffers */ - rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (rc) - return rc; - rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + /* + * Ensure DMA mask is set to 32-bit before allocating legacy PRD and + * pad buffers. + */ + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) return rc; @@ -1156,13 +1139,16 @@ static int nv_adma_port_start(struct ata_port *ap) pp->notifier_clear_block = pp->gen_block + NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); - /* Now that the legacy PRD and padding buffer are allocated we can - safely raise the DMA mask to allocate the CPB/APRD table. - These are allowed to fail since we store the value that ends up - being used to set as the bounce limit in slave_config later if - needed. */ - dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + /* + * Now that the legacy PRD and padding buffer are allocated we can + * try to raise the DMA mask to allocate the CPB/APRD table. + */ + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) + return rc; + } pp->adma_dma_mask = *dev->dma_mask; mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, @@ -1356,11 +1342,11 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) for_each_sg(qc->sg, sg, qc->n_elem, si) { aprd = (si < 5) ? &cpb->aprd[si] : - &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)]; + &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; nv_adma_fill_aprd(qc, sg, si, aprd); } if (si > 5) - cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); + cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); else cpb->next_aprd = cpu_to_le64(0); } @@ -1385,7 +1371,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) static void nv_adma_qc_prep(struct ata_queued_cmd *qc) { struct nv_adma_port_priv *pp = qc->ap->private_data; - struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; + struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; u8 ctl_flags = NV_CPB_CTL_CPB_VALID | NV_CPB_CTL_IEN; @@ -1403,7 +1389,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) wmb(); cpb->len = 3; - cpb->tag = qc->tag; + cpb->tag = qc->hw_tag; cpb->next_cpb_idx = 0; /* turn on NCQ flags for NCQ commands */ @@ -1466,9 +1452,9 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) pp->last_issue_ncq = curr_ncq; } - writew(qc->tag, mmio + NV_ADMA_APPEND); + writew(qc->hw_tag, mmio + NV_ADMA_APPEND); - DPRINTK("Issued tag %u\n", qc->tag); + DPRINTK("Issued tag %u\n", qc->hw_tag); return 0; } @@ -1730,8 +1716,8 @@ static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) /* queue is full */ WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); - dq->defer_bits |= (1 << qc->tag); - dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag; + dq->defer_bits |= (1 << qc->hw_tag); + dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag; } static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap) @@ -1796,7 +1782,7 @@ static void nv_swncq_ncq_stop(struct ata_port *ap) u32 sactive; u32 done_mask; - ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", + ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n", ap->qc_active, ap->link.sactive); ata_port_err(ap, "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n " @@ -2010,7 +1996,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) struct ata_bmdma_prd *prd; unsigned int si, idx; - prd = pp->prd + ATA_MAX_PRD * qc->tag; + prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; idx = 0; for_each_sg(qc->sg, sg, qc->n_elem, si) { @@ -2048,16 +2034,16 @@ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, DPRINTK("Enter\n"); - writel((1 << qc->tag), pp->sactive_block); - pp->last_issue_tag = qc->tag; - pp->dhfis_bits &= ~(1 << qc->tag); - pp->dmafis_bits &= ~(1 << qc->tag); - pp->qc_active |= (0x1 << qc->tag); + writel((1 << qc->hw_tag), pp->sactive_block); + pp->last_issue_tag = qc->hw_tag; + pp->dhfis_bits &= ~(1 << qc->hw_tag); + pp->dmafis_bits &= ~(1 << qc->hw_tag); + pp->qc_active |= (0x1 << qc->hw_tag); ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->sff_exec_command(ap, &qc->tf); - DPRINTK("Issued tag %u\n", qc->tag); + DPRINTK("Issued tag %u\n", qc->hw_tag); return 0; } @@ -2207,7 +2193,7 @@ static void nv_swncq_dmafis(struct ata_port *ap) rw = qc->tf.flags & ATA_TFLAG_WRITE; /* load PRD table addr. */ - iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag, + iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 4b1995e2d044..319f517137cd 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -285,13 +285,13 @@ static const struct sil24_cerr_info { [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET, "protocol mismatch" }, [PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET, - "data directon mismatch" }, + "data direction mismatch" }, [PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET, "ran out of SGEs while writing" }, [PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET, "ran out of SGEs while reading" }, [PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET, - "invalid data directon for ATAPI CDB" }, + "invalid data direction for ATAPI CDB" }, [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET, "SGT not on qword boundary" }, [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET, @@ -849,7 +849,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) struct sil24_sge *sge; u16 ctrl = 0; - cb = &pp->cmd_block[sil24_tag(qc->tag)]; + cb = &pp->cmd_block[sil24_tag(qc->hw_tag)]; if (!ata_is_atapi(qc->tf.protocol)) { prb = &cb->ata.prb; @@ -891,7 +891,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) struct ata_port *ap = qc->ap; struct sil24_port_priv *pp = ap->private_data; void __iomem *port = sil24_port_base(ap); - unsigned int tag = sil24_tag(qc->tag); + unsigned int tag = sil24_tag(qc->hw_tag); dma_addr_t paddr; void __iomem *activate; @@ -911,7 +911,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc) { - sil24_read_tf(qc->ap, qc->tag, &qc->result_tf); + sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf); return true; } |