summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/devices/mchp48l640.c37
-rw-r--r--drivers/mtd/devices/phram.c13
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c6
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c2
-rw-r--r--drivers/mtd/inftlcore.c9
-rw-r--r--drivers/mtd/mtd_blkdevs.c7
-rw-r--r--drivers/mtd/mtdcore.c14
-rw-r--r--drivers/mtd/mtdpart.c3
-rw-r--r--drivers/mtd/mtdpstore.c12
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/ecc-mxic.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c759
-rw-r--r--drivers/mtd/nand/raw/Kconfig12
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c6
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c137
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c5
-rw-r--r--drivers/mtd/nand/raw/nand_base.c4
-rw-r--r--drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c1029
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1801
-rw-r--r--drivers/mtd/nand/raw/r852.c3
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c2
-rw-r--r--drivers/mtd/nand/spi/Makefile5
-rw-r--r--drivers/mtd/nand/spi/alliancememory.c12
-rw-r--r--drivers/mtd/nand/spi/ato.c6
-rw-r--r--drivers/mtd/nand/spi/core.c126
-rw-r--r--drivers/mtd/nand/spi/esmt.c98
-rw-r--r--drivers/mtd/nand/spi/foresee.c18
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c48
-rw-r--r--drivers/mtd/nand/spi/macronix.c87
-rw-r--r--drivers/mtd/nand/spi/micron.c155
-rw-r--r--drivers/mtd/nand/spi/otp.c362
-rw-r--r--drivers/mtd/nand/spi/paragon.c12
-rw-r--r--drivers/mtd/nand/spi/skyhigh.c147
-rw-r--r--drivers/mtd/nand/spi/toshiba.c8
-rw-r--r--drivers/mtd/nand/spi/winbond.c35
-rw-r--r--drivers/mtd/nand/spi/xtx.c12
-rw-r--r--drivers/mtd/sm_ftl.c4
-rw-r--r--drivers/mtd/spi-nor/atmel.c4
-rw-r--r--drivers/mtd/spi-nor/core.c96
-rw-r--r--drivers/mtd/spi-nor/core.h6
-rw-r--r--drivers/mtd/spi-nor/macronix.c40
-rw-r--r--drivers/mtd/spi-nor/otp.c1
-rw-r--r--drivers/mtd/spi-nor/spansion.c10
-rw-r--r--drivers/mtd/spi-nor/swp.c1
-rw-r--r--drivers/mtd/spi-nor/sysfs.c8
-rw-r--r--drivers/mtd/spi-nor/winbond.c88
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/mtd/ubi/cdev.c70
50 files changed, 3638 insertions, 1694 deletions
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index f576e6a890e8..4af9208f9690 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -23,10 +23,12 @@
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
+#include <linux/string_choices.h>
struct mchp48_caps {
unsigned int size;
unsigned int page_size;
+ bool auto_disable_wel;
};
struct mchp48l640_flash {
@@ -127,11 +129,11 @@ static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable)
mutex_unlock(&flash->lock);
if (ret)
- dev_err(&flash->spi->dev, "write %sable failed ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_err(&flash->spi->dev, "write %s failed ret: %d",
+ str_enable_disable(enable), ret);
- dev_dbg(&flash->spi->dev, "write %sable success ret: %d",
- (enable ? "en" : "dis"), ret);
+ dev_dbg(&flash->spi->dev, "write %s success ret: %d",
+ str_enable_disable(enable), ret);
if (enable)
return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true);
@@ -194,9 +196,15 @@ static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len,
else
goto fail;
- ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
- if (ret)
- goto fail;
+ if (flash->caps->auto_disable_wel) {
+ ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false);
+ if (ret)
+ goto fail;
+ } else {
+ ret = mchp48l640_write_prepare(flash, false);
+ if (ret)
+ goto fail;
+ }
kfree(cmd);
return 0;
@@ -293,6 +301,13 @@ static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len,
static const struct mchp48_caps mchp48l640_caps = {
.size = SZ_8K,
.page_size = 32,
+ .auto_disable_wel = true,
+};
+
+static const struct mchp48_caps mb85rs128ty_caps = {
+ .size = SZ_16K,
+ .page_size = 256,
+ .auto_disable_wel = false,
};
static int mchp48l640_probe(struct spi_device *spi)
@@ -353,6 +368,10 @@ static const struct of_device_id mchp48l640_of_table[] = {
.compatible = "microchip,48l640",
.data = &mchp48l640_caps,
},
+ {
+ .compatible = "fujitsu,mb85rs128ty",
+ .data = &mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(of, mchp48l640_of_table);
@@ -362,6 +381,10 @@ static const struct spi_device_id mchp48l640_spi_ids[] = {
.name = "48l640",
.driver_data = (kernel_ulong_t)&mchp48l640_caps,
},
+ {
+ .name = "mb85rs128ty",
+ .driver_data = (kernel_ulong_t)&mb85rs128ty_caps,
+ },
{}
};
MODULE_DEVICE_TABLE(spi, mchp48l640_spi_ids);
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index f756c60a4931..fd9ec165e61a 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/security.h>
struct phram_mtd_list {
struct mtd_info mtd;
@@ -410,19 +411,23 @@ static int __init init_phram(void)
{
int ret;
+ ret = security_locked_down(LOCKDOWN_DEV_MEM);
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&phram_driver);
if (ret)
return ret;
#ifndef MODULE
- if (phram_paramline[0])
+ if (phram_paramline[0]) {
ret = phram_setup(phram_paramline);
+ if (ret)
+ platform_driver_unregister(&phram_driver);
+ }
phram_init_called = 1;
#endif
- if (ret)
- platform_driver_unregister(&phram_driver);
-
return ret;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index dba584fa2a53..f2266145b821 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2104,7 +2104,6 @@ static void stfsm_remove(struct platform_device *pdev)
WARN_ON(mtd_device_unregister(&fsm->mtd));
}
-#ifdef CONFIG_PM_SLEEP
static int stfsmfsm_suspend(struct device *dev)
{
struct stfsm *fsm = dev_get_drvdata(dev);
@@ -2120,9 +2119,8 @@ static int stfsmfsm_resume(struct device *dev)
return clk_prepare_enable(fsm->clk);
}
-#endif
-static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume);
static const struct of_device_id stfsm_match[] = {
{ .compatible = "st,spi-fsm", },
@@ -2136,7 +2134,7 @@ static struct platform_driver stfsm_driver = {
.driver = {
.name = "st-spi-fsm",
.of_match_table = stfsm_match,
- .pm = &stfsm_pm_ops,
+ .pm = pm_sleep_ptr(&stfsm_pm_ops),
},
};
module_platform_driver(stfsm_driver);
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
index edad2ab0f2fa..82a1e7b7e4d8 100644
--- a/drivers/mtd/hyperbus/hbmc-am654.c
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -176,7 +176,7 @@ static int am654_hbmc_probe(struct platform_device *pdev)
if (ret)
goto put_node;
- if (of_property_read_bool(dev->of_node, "mux-controls")) {
+ if (of_property_present(dev->of_node, "mux-controls")) {
struct mux_control *control = devm_mux_control_get(dev, NULL);
if (IS_ERR(control)) {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 9739387cff8c..58c6e1743f5c 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -482,10 +482,11 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
silly = MAX_LOOPS;
while (thisEUN <= inftl->lastEUN) {
- inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
- blockofs, 8, &retlen, (char *)&bci);
-
- status = bci.Status | bci.Status1;
+ if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
pr_debug("INFTL: status of block %d in EUN %d is %x\n",
block , writeEUN, status);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 47ead84407cd..847c11542f02 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -329,7 +329,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto out_list_del;
ret = blk_mq_alloc_sq_tag_set(new->tag_set, &mtd_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ BLK_MQ_F_BLOCKING);
if (ret)
goto out_kfree_tag_set;
@@ -404,6 +404,7 @@ out_list_del:
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
unsigned long flags;
+ unsigned int memflags;
lockdep_assert_held(&mtd_table_mutex);
@@ -420,10 +421,10 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
spin_unlock_irqrestore(&old->queue_lock, flags);
/* freeze+quiesce queue to ensure all requests are flushed */
- blk_mq_freeze_queue(old->rq);
+ memflags = blk_mq_freeze_queue(old->rq);
blk_mq_quiesce_queue(old->rq);
blk_mq_unquiesce_queue(old->rq);
- blk_mq_unfreeze_queue(old->rq);
+ blk_mq_unfreeze_queue(old->rq, memflags);
/* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 724f917f91ba..5ba9a741f5ac 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -741,7 +741,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->dev.type = &mtd_devtype;
mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
- dev_set_name(&mtd->dev, "mtd%d", i);
+ error = dev_set_name(&mtd->dev, "mtd%d", i);
+ if (error)
+ goto fail_devname;
dev_set_drvdata(&mtd->dev, mtd);
mtd_check_of_node(mtd);
of_node_get(mtd_get_of_node(mtd));
@@ -790,6 +792,7 @@ fail_nvmem_add:
device_unregister(&mtd->dev);
fail_added:
of_node_put(mtd_get_of_node(mtd));
+fail_devname:
idr_remove(&mtd_idr, i);
fail_locked:
mutex_unlock(&mtd_table_mutex);
@@ -1053,7 +1056,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- int ret;
+ int ret, err;
mtd_set_dev_defaults(mtd);
@@ -1105,8 +1108,11 @@ out:
nvmem_unregister(mtd->otp_factory_nvmem);
}
- if (ret && device_is_registered(&mtd->dev))
- del_mtd_device(mtd);
+ if (ret && device_is_registered(&mtd->dev)) {
+ err = del_mtd_device(mtd);
+ if (err)
+ pr_err("Error when deleting MTD device (%d)\n", err);
+ }
return ret;
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6811a714349d..994e8c51e674 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -690,10 +690,9 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
parser = mtd_part_parser_get(*types);
if (!parser && !request_module("%s", *types))
parser = mtd_part_parser_get(*types);
- pr_debug("%s: got parser %s\n", master->name,
- parser ? parser->name : NULL);
if (!parser)
continue;
+ pr_debug("%s: got parser %s\n", master->name, parser->name);
ret = mtd_part_do_parse(parser, master, &pparts, data);
if (ret <= 0)
mtd_part_parser_put(parser);
diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c
index 7ac8ac901306..9cf3872e37ae 100644
--- a/drivers/mtd/mtdpstore.c
+++ b/drivers/mtd/mtdpstore.c
@@ -417,11 +417,14 @@ static void mtdpstore_notify_add(struct mtd_info *mtd)
}
longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
- cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
- cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+ cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
- cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
+ cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL);
+
+ if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap)
+ return;
/* just support dmesg right now */
cxt->dev.flags = PSTORE_FLAGS_DMESG;
@@ -527,9 +530,6 @@ static void mtdpstore_notify_remove(struct mtd_info *mtd)
mtdpstore_flush_removed(cxt);
unregister_pstore_device(&cxt->dev);
- kfree(cxt->badmap);
- kfree(cxt->usedmap);
- kfree(cxt->rmmap);
cxt->mtd = NULL;
cxt->index = -1;
}
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 19e1291ac4d5..44913ff1bf12 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -3,7 +3,8 @@
nandcore-objs := core.o bbt.o
obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
-
+obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
+obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
obj-y += onenand/
obj-y += raw/
obj-y += spi/
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 56b56f726b99..1bf9a5a64b87 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
{
struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
- int nents, step, ret;
+ int nents, step, ret = 0;
if (req->mode == MTD_OPS_RAW)
return 0;
diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c
new file mode 100644
index 000000000000..4dc4d65e7d32
--- /dev/null
+++ b/drivers/mtd/nand/qpic_common.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/nand-qpic-common.h>
+
+/**
+ * qcom_free_bam_transaction() - Frees the BAM transaction memory
+ * @nandc: qpic nand controller
+ *
+ * This function frees the bam transaction memory
+ */
+void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ kfree(bam_txn);
+}
+EXPORT_SYMBOL(qcom_free_bam_transaction);
+
+/**
+ * qcom_alloc_bam_transaction() - allocate BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate and initialize the BAM transaction structure
+ */
+struct bam_transaction *
+qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn;
+ size_t bam_txn_size;
+ unsigned int num_cw = nandc->max_cwperpage;
+ void *bam_txn_buf;
+
+ bam_txn_size =
+ sizeof(*bam_txn) + num_cw *
+ ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
+ (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
+ (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
+
+ bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL);
+ if (!bam_txn_buf)
+ return NULL;
+
+ bam_txn = bam_txn_buf;
+ bam_txn_buf += sizeof(*bam_txn);
+
+ bam_txn->bam_ce = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
+
+ bam_txn->cmd_sgl = bam_txn_buf;
+ bam_txn_buf +=
+ sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
+
+ bam_txn->data_sgl = bam_txn_buf;
+
+ init_completion(&bam_txn->txn_done);
+
+ return bam_txn;
+}
+EXPORT_SYMBOL(qcom_alloc_bam_transaction);
+
+/**
+ * qcom_clear_bam_transaction() - Clears the BAM transaction
+ * @nandc: qpic nand controller
+ *
+ * This function will clear the BAM transaction indexes.
+ */
+void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
+{
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (!nandc->props->supports_bam)
+ return;
+
+ memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions));
+ bam_txn->last_data_desc = NULL;
+
+ sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_CMD_SGL);
+ sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
+ QPIC_PER_CW_DATA_SGL);
+
+ reinit_completion(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_clear_bam_transaction);
+
+/**
+ * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion
+ * @data: data pointer
+ *
+ * This function is a callback for DMA descriptor completion
+ */
+void qcom_qpic_bam_dma_done(void *data)
+{
+ struct bam_transaction *bam_txn = data;
+
+ complete(&bam_txn->txn_done);
+}
+EXPORT_SYMBOL(qcom_qpic_bam_dma_done);
+
+/**
+ * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device
+ * @nandc: qpic nand controller
+ * @is_cpu: cpu or Device
+ *
+ * This function will check for dma sync for cpu or device
+ */
+inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
+{
+ if (!nandc->props->supports_bam)
+ return;
+
+ if (is_cpu)
+ dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ else
+ dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+}
+EXPORT_SYMBOL(qcom_nandc_dev_to_mem);
+
+/**
+ * qcom_prepare_bam_async_desc() - Prepare DMA descriptor
+ * @nandc: qpic nand controller
+ * @chan: dma channel
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function maps the scatter gather list for DMA transfer and forms the
+ * DMA descriptor for BAM.This descriptor will be added in the NAND DMA
+ * descriptor queue which will be submitted to DMA engine.
+ */
+int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
+ struct dma_chan *chan, unsigned long flags)
+{
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ unsigned int sgl_cnt;
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ enum dma_transfer_direction dir_eng;
+ struct dma_async_tx_descriptor *dma_desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ if (chan == nandc->cmd_chan) {
+ sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
+ sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
+ bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else if (chan == nandc->tx_chan) {
+ sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
+ sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
+ bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ } else {
+ sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
+ sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
+ bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ }
+
+ sg_mark_end(sgl + sgl_cnt - 1);
+ ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ if (ret == 0) {
+ dev_err(nandc->dev, "failure in mapping desc\n");
+ kfree(desc);
+ return -ENOMEM;
+ }
+
+ desc->sgl_cnt = sgl_cnt;
+ desc->bam_sgl = sgl;
+
+ dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
+ flags);
+
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failure in prep desc\n");
+ dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ /* update last data/command descriptor */
+ if (chan == nandc->cmd_chan)
+ bam_txn->last_cmd_desc = dma_desc;
+ else
+ bam_txn->last_data_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prepare_bam_async_desc);
+
+/**
+ * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the command descriptor for BAM DMA
+ * which will be used for NAND register reads and writes.
+ */
+int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr,
+ int size, unsigned int flags)
+{
+ int bam_ce_size;
+ int i, ret;
+ struct bam_cmd_element *bam_ce_buffer;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ u32 offset;
+
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
+
+ /* fill the command desc */
+ for (i = 0; i < size; i++) {
+ offset = nandc->props->bam_offset + reg_off + 4 * i;
+ if (read)
+ bam_prep_ce(&bam_ce_buffer[i],
+ offset, BAM_READ_COMMAND,
+ reg_buf_dma_addr(nandc,
+ (__le32 *)vaddr + i));
+ else
+ bam_prep_ce_le32(&bam_ce_buffer[i],
+ offset, BAM_WRITE_COMMAND,
+ *((__le32 *)vaddr + i));
+ }
+
+ bam_txn->bam_ce_pos += size;
+
+ /* use the separate sgl after this command */
+ if (flags & NAND_BAM_NEXT_SGL) {
+ bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
+ bam_ce_size = (bam_txn->bam_ce_pos -
+ bam_txn->bam_ce_start) *
+ sizeof(struct bam_cmd_element);
+ sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
+ bam_ce_buffer, bam_ce_size);
+ bam_txn->cmd_sgl_pos++;
+ bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
+
+ if (flags & NAND_BAM_NWD) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_FENCE | DMA_PREP_CMD);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd);
+
+/**
+ * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares the data descriptor for BAM DMA which
+ * will be used for NAND data reads and writes.
+ */
+int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
+ const void *vaddr, int size, unsigned int flags)
+{
+ int ret;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+
+ if (read) {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
+ vaddr, size);
+ bam_txn->rx_sgl_pos++;
+ } else {
+ sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
+ vaddr, size);
+ bam_txn->tx_sgl_pos++;
+
+ /*
+ * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
+ * is not set, form the DMA descriptor
+ */
+ if (!(flags & NAND_BAM_NO_EOT)) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data);
+
+/**
+ * qcom_prep_adm_dma_desc() - Prepare descriptor for adma
+ * @nandc: qpic nand controller
+ * @read: read or write type
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: adm dma transaction size in bytes
+ * @flow_control: flow controller
+ *
+ * This function will prepare descriptor for adma
+ */
+int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
+ int reg_off, const void *vaddr, int size,
+ bool flow_control)
+{
+ struct qcom_adm_peripheral_config periph_conf = {};
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_slave_config slave_conf = {0};
+ enum dma_transfer_direction dir_eng;
+ struct desc_info *desc;
+ struct scatterlist *sgl;
+ int ret;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ sgl = &desc->adm_sgl;
+
+ sg_init_one(sgl, vaddr, size);
+
+ if (read) {
+ dir_eng = DMA_DEV_TO_MEM;
+ desc->dir = DMA_FROM_DEVICE;
+ } else {
+ dir_eng = DMA_MEM_TO_DEV;
+ desc->dir = DMA_TO_DEVICE;
+ }
+
+ ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+ if (!ret) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ slave_conf.device_fc = flow_control;
+ if (read) {
+ slave_conf.src_maxburst = 16;
+ slave_conf.src_addr = nandc->base_dma + reg_off;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ } else {
+ slave_conf.dst_maxburst = 16;
+ slave_conf.dst_addr = nandc->base_dma + reg_off;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
+ }
+
+ ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+ if (ret) {
+ dev_err(nandc->dev, "failed to configure dma channel\n");
+ goto err;
+ }
+
+ dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+ if (!dma_desc) {
+ dev_err(nandc->dev, "failed to prepare desc\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ desc->dma_desc = dma_desc;
+
+ list_add_tail(&desc->node, &nandc->desc_list);
+
+ return 0;
+err:
+ kfree(desc);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_prep_adm_dma_desc);
+
+/**
+ * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer
+ * @nandc: qpic nand controller
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to read
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to read a given number of
+ * contiguous registers to the reg_read_buf pointer.
+ */
+int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
+ int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+ void *vaddr;
+
+ vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+ nandc->reg_read_pos += num_regs;
+
+ if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, first);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_read_reg_dma);
+
+/**
+ * qcom_write_reg_dma() - write a given number of registers
+ * @nandc: qpic nand controller
+ * @vaddr: contiguous memory from where register value will
+ * be written
+ * @first: offset of the first register in the contiguous block
+ * @num_regs: number of registers to write
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a descriptor to write a given number of
+ * contiguous registers
+ */
+int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
+ int first, int num_regs, unsigned int flags)
+{
+ bool flow_control = false;
+
+ if (first == NAND_EXEC_CMD)
+ flags |= NAND_BAM_NWD;
+
+ if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
+
+ if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
+ first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
+
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
+ num_regs, flags);
+
+ if (first == NAND_FLASH_CMD)
+ flow_control = true;
+
+ return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
+ num_regs * sizeof(u32), flow_control);
+}
+EXPORT_SYMBOL(qcom_write_reg_dma);
+
+/**
+ * qcom_read_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to write to
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from the
+ * controller's internal buffer to the buffer 'vaddr'
+ */
+int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_read_data_dma);
+
+/**
+ * qcom_write_data_dma() - transfer data
+ * @nandc: qpic nand controller
+ * @reg_off: offset within the controller's data buffer
+ * @vaddr: virtual address of the buffer we want to read from
+ * @size: DMA transaction size in bytes
+ * @flags: flags to control DMA descriptor preparation
+ *
+ * This function will prepares a DMA descriptor to transfer data from
+ * 'vaddr' to the controller's internal buffer
+ */
+int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+ const u8 *vaddr, int size, unsigned int flags)
+{
+ if (nandc->props->supports_bam)
+ return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
+
+ return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+EXPORT_SYMBOL(qcom_write_data_dma);
+
+/**
+ * qcom_submit_descs() - submit dma descriptor
+ * @nandc: qpic nand controller
+ *
+ * This function will submit all the prepared dma descriptor
+ * cmd or data descriptor
+ */
+int qcom_submit_descs(struct qcom_nand_controller *nandc)
+{
+ struct desc_info *desc, *n;
+ dma_cookie_t cookie = 0;
+ struct bam_transaction *bam_txn = nandc->bam_txn;
+ int ret = 0;
+
+ if (nandc->props->supports_bam) {
+ if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
+ DMA_PREP_INTERRUPT);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+
+ if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
+ ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ DMA_PREP_CMD);
+ if (ret)
+ goto err_unmap_free_desc;
+ }
+ }
+
+ list_for_each_entry(desc, &nandc->desc_list, node)
+ cookie = dmaengine_submit(desc->dma_desc);
+
+ if (nandc->props->supports_bam) {
+ bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
+ bam_txn->last_cmd_desc->callback_param = bam_txn;
+
+ dma_async_issue_pending(nandc->tx_chan);
+ dma_async_issue_pending(nandc->rx_chan);
+ dma_async_issue_pending(nandc->cmd_chan);
+
+ if (!wait_for_completion_timeout(&bam_txn->txn_done,
+ QPIC_NAND_COMPLETION_TIMEOUT))
+ ret = -ETIMEDOUT;
+ } else {
+ if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+ ret = -ETIMEDOUT;
+ }
+
+err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+ * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
+ */
+ list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+ list_del(&desc->node);
+
+ if (nandc->props->supports_bam)
+ dma_unmap_sg(nandc->dev, desc->bam_sgl,
+ desc->sgl_cnt, desc->dir);
+ else
+ dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
+ desc->dir);
+
+ kfree(desc);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_submit_descs);
+
+/**
+ * qcom_clear_read_regs() - reset the read register buffer
+ * @nandc: qpic nand controller
+ *
+ * This function reset the register read buffer for next NAND operation
+ */
+void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
+{
+ nandc->reg_read_pos = 0;
+ qcom_nandc_dev_to_mem(nandc, false);
+}
+EXPORT_SYMBOL(qcom_clear_read_regs);
+
+/**
+ * qcom_nandc_unalloc() - unallocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will unallocate memory alloacted for qpic nand controller
+ */
+void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+ if (nandc->props->supports_bam) {
+ if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
+ dma_unmap_single(nandc->dev, nandc->reg_read_dma,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+
+ if (nandc->tx_chan)
+ dma_release_channel(nandc->tx_chan);
+
+ if (nandc->rx_chan)
+ dma_release_channel(nandc->rx_chan);
+
+ if (nandc->cmd_chan)
+ dma_release_channel(nandc->cmd_chan);
+ } else {
+ if (nandc->chan)
+ dma_release_channel(nandc->chan);
+ }
+}
+EXPORT_SYMBOL(qcom_nandc_unalloc);
+
+/**
+ * qcom_nandc_alloc() - Allocate qpic nand controller
+ * @nandc: qpic nand controller
+ *
+ * This function will allocate memory for qpic nand controller
+ */
+int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+ int ret;
+
+ ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(nandc->dev, "failed to set DMA mask\n");
+ return ret;
+ }
+
+ /*
+ * we use the internal buffer for reading ONFI params, reading small
+ * data like ID and status, and preforming read-copy-write operations
+ * when writing to a codeword partially. 532 is the maximum possible
+ * size of a codeword for our nand controller
+ */
+ nandc->buf_size = 532;
+
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
+ if (!nandc->data_buffer)
+ return -ENOMEM;
+
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
+ if (!nandc->regs)
+ return -ENOMEM;
+
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+ sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
+ if (!nandc->reg_read_buf)
+ return -ENOMEM;
+
+ if (nandc->props->supports_bam) {
+ nandc->reg_read_dma =
+ dma_map_single(nandc->dev, nandc->reg_read_buf,
+ MAX_REG_RD *
+ sizeof(*nandc->reg_read_buf),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
+ dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
+ return -EIO;
+ }
+
+ nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
+ if (IS_ERR(nandc->tx_chan)) {
+ ret = PTR_ERR(nandc->tx_chan);
+ nandc->tx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "tx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
+ if (IS_ERR(nandc->rx_chan)) {
+ ret = PTR_ERR(nandc->rx_chan);
+ nandc->rx_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rx DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
+ if (IS_ERR(nandc->cmd_chan)) {
+ ret = PTR_ERR(nandc->cmd_chan);
+ nandc->cmd_chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "cmd DMA channel request failed\n");
+ goto unalloc;
+ }
+
+ /*
+ * Initially allocate BAM transaction to read ONFI param page.
+ * After detecting all the devices, this BAM transaction will
+ * be freed and the next BAM transaction will be allocated with
+ * maximum codeword size
+ */
+ nandc->max_cwperpage = 1;
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
+ if (!nandc->bam_txn) {
+ dev_err(nandc->dev,
+ "failed to allocate bam transaction\n");
+ ret = -ENOMEM;
+ goto unalloc;
+ }
+ } else {
+ nandc->chan = dma_request_chan(nandc->dev, "rxtx");
+ if (IS_ERR(nandc->chan)) {
+ ret = PTR_ERR(nandc->chan);
+ nandc->chan = NULL;
+ dev_err_probe(nandc->dev, ret,
+ "rxtx DMA channel request failed\n");
+ return ret;
+ }
+ }
+
+ INIT_LIST_HEAD(&nandc->desc_list);
+ INIT_LIST_HEAD(&nandc->host_list);
+
+ return 0;
+unalloc:
+ qcom_nandc_unalloc(nandc);
+ return ret;
+}
+EXPORT_SYMBOL(qcom_nandc_alloc);
+
+MODULE_DESCRIPTION("QPIC controller common api");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index d0aaccf72d78..b8035df8f732 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -279,8 +279,8 @@ config MTD_NAND_SH_FLCTL
config MTD_NAND_DAVINCI
tristate "DaVinci/Keystone NAND controller"
- depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF) || COMPILE_TEST
- depends on HAS_IOMEM
+ depends on COMPILE_TEST || ARCH_DAVINCI || ARCH_KEYSTONE
+ depends on HAS_IOMEM && TI_AEMIF
help
Enable the driver for NAND flash chips on Texas Instruments
DaVinci/Keystone processors.
@@ -454,6 +454,14 @@ config MTD_NAND_TS72XX
help
Enables support for NAND controller on ts72xx SBCs.
+config MTD_NAND_NUVOTON_MA35
+ tristate "Nuvoton MA35 SoC NAND controller"
+ depends on ARCH_MA35 || COMPILE_TEST
+ depends on OF
+ help
+ Enables support for the NAND controller found on
+ the Nuvoton MA35 series SoCs.
+
comment "Misc"
config MTD_SM_COMMON
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d0b0e6b83568..99e79c448847 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o
obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o
obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o
obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o
+obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o
nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_onfi.o
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index fea5b6119956..17f6d9723df9 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -3008,7 +3008,7 @@ static int brcmnand_resume(struct device *dev)
brcmnand_save_restore_cs_config(host, 1);
/* Reset the chip, required by some chips after power-up */
- nand_reset_op(chip);
+ nand_reset(chip, 0);
}
return 0;
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 0b2db4173e72..6667eea95597 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -2972,8 +2972,10 @@ free_buf_desc:
static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
{
cadence_nand_chips_cleanup(cdns_ctrl);
- dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma,
- cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+ if (cdns_ctrl->dmac)
+ dma_unmap_resource(cdns_ctrl->dmac->device->dev,
+ cdns_ctrl->io.iova_dma, cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
kfree(cdns_ctrl->buf);
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 1f8354acfb50..3986553881d0 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -10,9 +10,11 @@
* Dirk Behme <Dirk.Behme@gmail.com>
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/memory/ti-aemif.h>
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/rawnand.h>
@@ -43,6 +45,9 @@
#define MASK_ALE 0x08
#define MASK_CLE 0x10
+#define MAX_TSU_PS 3000 /* Input setup time in ps */
+#define MAX_TH_PS 1600 /* Input hold time in ps */
+
struct davinci_nand_pdata {
uint32_t mask_ale;
uint32_t mask_cle;
@@ -66,6 +71,7 @@ struct davinci_nand_pdata {
/* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!)
* soft == NAND_ECC_ENGINE_TYPE_SOFT
+ * on-die == NAND_ECC_ENGINE_TYPE_ON_DIE
* else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits
*
* All DaVinci-family chips support 1-bit hardware ECC.
@@ -117,6 +123,9 @@ struct davinci_nand_info {
uint32_t mask_cle;
uint32_t core_chipsel;
+
+ struct clk *clk;
+ struct aemif_device *aemif;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
@@ -479,6 +488,44 @@ static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
.free = hwecc4_ooblayout_small_free,
};
+static int hwecc4_ooblayout_large_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ if (section >= nregions)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = 10;
+
+ return 0;
+}
+
+static int hwecc4_ooblayout_large_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ unsigned int total_ecc_bytes = nand->ecc.ctx.total;
+ int nregions = total_ecc_bytes / 10; /* 10 bytes per chunk */
+
+ /* First region is used for BBT */
+ if (section >= (nregions - 1))
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * 16);
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_large_ooblayout_ops = {
+ .ecc = hwecc4_ooblayout_large_ecc,
+ .free = hwecc4_ooblayout_large_free,
+};
+
#if defined(CONFIG_OF)
static const struct of_device_id davinci_nand_of_match[] = {
{.compatible = "ti,davinci-nand", },
@@ -525,6 +572,8 @@ nand_davinci_get_pdata(struct platform_device *pdev)
pdata->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
if (!strncmp("hw", mode, 2))
pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
+ if (!strncmp("on-die", mode, 6))
+ pdata->engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
}
if (!device_property_read_u32(&pdev->dev,
"ti,davinci-ecc-bits", &prop))
@@ -580,6 +629,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
switch (chip->ecc.engine_type) {
case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
pdata->ecc_bits = 0;
break;
case NAND_ECC_ENGINE_TYPE_SOFT:
@@ -638,9 +688,12 @@ static int davinci_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd,
&hwecc4_small_ooblayout_ops);
} else if (chunks == 4 || chunks == 8) {
- mtd_set_ooblayout(mtd,
- nand_get_large_page_ooblayout());
chip->ecc.read_page = nand_read_page_hwecc_oob_first;
+
+ if (chip->options & NAND_IS_BOOT_MEDIUM)
+ mtd_set_ooblayout(mtd, &hwecc4_large_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
} else {
return -EIO;
}
@@ -724,7 +777,7 @@ static int davinci_nand_exec_instr(struct davinci_nand_info *info,
case NAND_OP_WAITRDY_INSTR:
timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
ret = readl_relaxed_poll_timeout(info->base + NANDFSR_OFFSET,
- status, status & BIT(0), 100,
+ status, status & BIT(0), 5,
timeout_us);
if (ret)
return ret;
@@ -764,9 +817,82 @@ static int davinci_nand_exec_op(struct nand_chip *chip,
return 0;
}
+#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, (period_ns)))
+
+static int davinci_nand_setup_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf)
+{
+ struct davinci_nand_info *info = to_davinci_nand(nand_to_mtd(chip));
+ const struct nand_sdr_timings *sdr;
+ struct aemif_cs_timings timings;
+ s32 cfg, min, cyc_ns;
+ int ret;
+
+ cyc_ns = 1000000000 / clk_get_rate(info->clk);
+
+ sdr = nand_get_sdr_timings(conf);
+ if (IS_ERR(sdr))
+ return PTR_ERR(sdr);
+
+ cfg = TO_CYCLES(sdr->tCLR_min, cyc_ns) - 1;
+ timings.rsetup = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tREA_max + MAX_TSU_PS, cyc_ns),
+ TO_CYCLES(sdr->tRP_min, cyc_ns)) - 1;
+ timings.rstrobe = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tCEA_max + MAX_TSU_PS, cyc_ns) - 2;
+ while ((s32)(timings.rsetup + timings.rstrobe) < min)
+ timings.rstrobe++;
+
+ cfg = TO_CYCLES((s32)(MAX_TH_PS - sdr->tCHZ_max), cyc_ns) - 1;
+ timings.rhold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tRC_min, cyc_ns) - 3;
+ while ((s32)(timings.rsetup + timings.rstrobe + timings.rhold) < min)
+ timings.rhold++;
+
+ cfg = TO_CYCLES((s32)(sdr->tRHZ_max - (timings.rhold + 1) * cyc_ns * 1000), cyc_ns);
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCHZ_max, cyc_ns)) - 1;
+ timings.ta = cfg > 0 ? cfg : 0;
+
+ cfg = TO_CYCLES(sdr->tWP_min, cyc_ns) - 1;
+ timings.wstrobe = cfg > 0 ? cfg : 0;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLS_min, cyc_ns), TO_CYCLES(sdr->tALS_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCS_min, cyc_ns)) - 1;
+ timings.wsetup = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tDS_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe) < min)
+ timings.wstrobe++;
+
+ cfg = max_t(s32, TO_CYCLES(sdr->tCLH_min, cyc_ns), TO_CYCLES(sdr->tALH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tCH_min, cyc_ns));
+ cfg = max_t(s32, cfg, TO_CYCLES(sdr->tDH_min, cyc_ns)) - 1;
+ timings.whold = cfg > 0 ? cfg : 0;
+
+ min = TO_CYCLES(sdr->tWC_min, cyc_ns) - 2;
+ while ((s32)(timings.wsetup + timings.wstrobe + timings.whold) < min)
+ timings.whold++;
+
+ dev_dbg(&info->pdev->dev, "RSETUP %x RSTROBE %x RHOLD %x\n",
+ timings.rsetup, timings.rstrobe, timings.rhold);
+ dev_dbg(&info->pdev->dev, "TA %x\n", timings.ta);
+ dev_dbg(&info->pdev->dev, "WSETUP %x WSTROBE %x WHOLD %x\n",
+ timings.wsetup, timings.wstrobe, timings.whold);
+
+ ret = aemif_check_cs_timings(&timings);
+ if (ret || chipnr == NAND_DATA_IFACE_CHECK_ONLY)
+ return ret;
+
+ return aemif_set_cs_timings(info->aemif, info->core_chipsel, &timings);
+}
+
static const struct nand_controller_ops davinci_nand_controller_ops = {
.attach_chip = davinci_nand_attach_chip,
.exec_op = davinci_nand_exec_op,
+ .setup_interface = davinci_nand_setup_interface,
};
static int nand_davinci_probe(struct platform_device *pdev)
@@ -822,9 +948,14 @@ static int nand_davinci_probe(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
+ info->clk = devm_clk_get_enabled(&pdev->dev, "aemif");
+ if (IS_ERR(info->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(info->clk), "failed to get clock");
+
info->pdev = pdev;
info->base = base;
info->vaddr = vaddr;
+ info->aemif = dev_get_drvdata(pdev->dev.parent);
mtd = nand_to_mtd(&info->chip);
mtd->dev.parent = &pdev->dev;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index d76802944453..f4e68008ea03 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -17,6 +17,7 @@
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
#include <linux/dma/mxs-dma.h>
+#include <linux/string_choices.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
#include "bch-regs.h"
@@ -2319,8 +2320,8 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
"fsl,no-blockmark-swap"))
this->swap_block_mark = false;
}
- dev_dbg(this->dev, "Blockmark swapping %sabled\n",
- this->swap_block_mark ? "en" : "dis");
+ dev_dbg(this->dev, "Blockmark swapping %s\n",
+ str_enabled_disabled(this->swap_block_mark));
ret = gpmi_init_last(this);
if (ret)
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 53e16d39af4b..13e4060bd1b6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1833,7 +1833,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
/* READ_ID data bytes are received twice in NV-DDR mode */
if (len && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
@@ -2203,7 +2203,7 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
* twice.
*/
if (force_8bit && nand_interface_is_nvddr(conf)) {
- ddrbuf = kzalloc(len * 2, GFP_KERNEL);
+ ddrbuf = kcalloc(2, len, GFP_KERNEL);
if (!ddrbuf)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
new file mode 100644
index 000000000000..c23b537948d5
--- /dev/null
+++ b/drivers/mtd/nand/raw/nuvoton-ma35d1-nand-controller.c
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Nuvoton Technology Corp.
+ */
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* NFI Registers */
+#define MA35_NFI_REG_DMACTL 0x400
+#define DMA_EN BIT(0)
+#define DMA_RST BIT(1)
+#define DMA_BUSY BIT(9)
+
+#define MA35_NFI_REG_DMASA 0x408
+#define MA35_NFI_REG_GCTL 0x800
+#define GRST BIT(0)
+#define NAND_EN BIT(3)
+
+#define MA35_NFI_REG_NANDCTL 0x8A0
+#define SWRST BIT(0)
+#define DMA_R_EN BIT(1)
+#define DMA_W_EN BIT(2)
+#define ECC_CHK BIT(7)
+#define PROT3BEN BIT(8)
+#define PSIZE_2K BIT(16)
+#define PSIZE_4K BIT(17)
+#define PSIZE_8K GENMASK(17, 16)
+#define PSIZE_MASK GENMASK(17, 16)
+#define BCH_T24 BIT(18)
+#define BCH_T8 BIT(20)
+#define BCH_T12 BIT(21)
+#define BCH_NONE (0x0)
+#define BCH_MASK GENMASK(22, 18)
+#define ECC_EN BIT(23)
+#define DISABLE_CS0 BIT(25)
+
+#define MA35_NFI_REG_NANDINTEN 0x8A8
+#define MA35_NFI_REG_NANDINTSTS 0x8AC
+#define INT_DMA BIT(0)
+#define INT_ECC BIT(2)
+#define INT_RB0 BIT(10)
+
+#define MA35_NFI_REG_NANDCMD 0x8B0
+#define MA35_NFI_REG_NANDADDR 0x8B4
+#define ENDADDR BIT(31)
+
+#define MA35_NFI_REG_NANDDATA 0x8B8
+#define MA35_NFI_REG_NANDRACTL 0x8BC
+#define MA35_NFI_REG_NANDECTL 0x8C0
+#define ENABLE_WP 0x0
+#define DISABLE_WP BIT(0)
+
+#define MA35_NFI_REG_NANDECCES0 0x8D0
+#define ECC_STATUS_MASK GENMASK(1, 0)
+#define ECC_ERR_CNT_MASK GENMASK(4, 0)
+
+#define MA35_NFI_REG_NANDECCEA0 0x900
+#define MA35_NFI_REG_NANDECCED0 0x960
+#define MA35_NFI_REG_NANDRA0 0xA00
+
+/* Define for the BCH hardware ECC engine */
+/* define the total padding bytes for 512/1024 data segment */
+#define MA35_BCH_PADDING_512 32
+#define MA35_BCH_PADDING_1024 64
+/* define the BCH parity code length for 512 bytes data pattern */
+#define MA35_PARITY_BCH8 15
+#define MA35_PARITY_BCH12 23
+/* define the BCH parity code length for 1024 bytes data pattern */
+#define MA35_PARITY_BCH24 45
+
+#define MA35_MAX_NSELS (2)
+#define PREFIX_RA_IS_EMPTY(reg) FIELD_GET(GENMASK(31, 16), (reg))
+
+struct ma35_nand_chip {
+ struct list_head node;
+ struct nand_chip chip;
+
+ u32 eccstatus;
+ u8 nsels;
+ u8 sels[] __counted_by(nsels);
+};
+
+struct ma35_nand_info {
+ struct nand_controller controller;
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct completion complete;
+ struct list_head chips;
+
+ u8 *buffer;
+ unsigned long assigned_cs;
+};
+
+static inline struct ma35_nand_chip *to_ma35_nand(struct nand_chip *chip)
+{
+ return container_of(chip, struct ma35_nand_chip, chip);
+}
+
+static int ma35_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = chip->ecc.total;
+ oob_region->offset = mtd->oobsize - oob_region->length;
+
+ return 0;
+}
+
+static int ma35_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oob_region)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oob_region->length = mtd->oobsize - chip->ecc.total - 2;
+ oob_region->offset = 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops ma35_ooblayout_ops = {
+ .free = ma35_ooblayout_free,
+ .ecc = ma35_ooblayout_ecc,
+};
+
+static inline void ma35_clear_spare(struct nand_chip *chip, int size)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ int i;
+
+ for (i = 0; i < size / 4; i++)
+ writel(0xff, nand->regs + MA35_NFI_REG_NANDRA0);
+}
+
+static inline void read_remaining_bytes(struct ma35_nand_info *nand, u32 *buf,
+ u32 offset, int size, int swap)
+{
+ u32 value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset);
+ u8 *ptr = (u8 *)buf;
+ int i, shift;
+
+ for (i = 0; i < size; i++) {
+ shift = (swap ? 3 - i : i) * 8;
+ ptr[i] = (value >> shift) & 0xff;
+ }
+}
+
+static inline void ma35_read_spare(struct nand_chip *chip, int size, u32 *buf, u32 offset)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 off = round_down(offset, 4);
+ int len = offset % 4;
+ int i;
+
+ if (len) {
+ read_remaining_bytes(nand, buf, off, 4 - len, 1);
+ off += 4;
+ size -= (4 - len);
+ }
+
+ for (i = 0; i < size / 4; i++)
+ *buf++ = readl(nand->regs + MA35_NFI_REG_NANDRA0 + off + (i * 4));
+
+ read_remaining_bytes(nand, buf, off + (size & ~3), size % 4, 0);
+}
+
+static inline void ma35_write_spare(struct nand_chip *chip, int size, u32 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 value;
+ int i, j;
+ u8 *ptr;
+
+ for (i = 0, j = 0; i < size / 4; i++, j += 4)
+ writel(*buf++, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+
+ ptr = (u8 *)buf;
+ switch (size % 4) {
+ case 1:
+ writel(*ptr, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 2:
+ value = *ptr | (*(ptr + 1) << 8);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ case 3:
+ value = *ptr | (*(ptr + 1) << 8) | (*(ptr + 2) << 16);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + j);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ma35_nand_target_enable(struct nand_chip *chip, unsigned int cs)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 reg;
+
+ switch (cs) {
+ case 0:
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~DISABLE_CS0, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ reg |= INT_RB0;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ma35_nand_hwecc_init(struct nand_chip *chip, struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ nand->buffer = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
+ if (!nand->buffer)
+ return -ENOMEM;
+
+ /* Redundant area size */
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ /* Protect redundant 3 bytes and disable ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= (PROT3BEN | ECC_CHK);
+ reg &= ~ECC_EN;
+
+ if (chip->ecc.strength != 0) {
+ chip->ecc.steps = mtd->writesize / chip->ecc.size;
+ nvtnand->eccstatus = (chip->ecc.steps < 4) ? 1 : chip->ecc.steps / 4;
+ /* Set BCH algorithm */
+ reg &= ~BCH_MASK;
+ switch (chip->ecc.strength) {
+ case 8:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH8;
+ reg |= BCH_T8;
+ break;
+ case 12:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH12;
+ reg |= BCH_T12;
+ break;
+ case 24:
+ chip->ecc.total = chip->ecc.steps * MA35_PARITY_BCH24;
+ reg |= BCH_T24;
+ break;
+ default:
+ dev_err(nand->dev, "ECC strength unsupported\n");
+ return -EINVAL;
+ }
+
+ chip->ecc.bytes = chip->ecc.total / chip->ecc.steps;
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+ return 0;
+}
+
+/* Correct data by BCH alrogithm */
+static void ma35_nfi_correct(struct nand_chip *chip, u8 index,
+ u8 err_cnt, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ u32 temp_data[24], temp_addr[24];
+ u32 padding_len, parity_len;
+ u32 value, offset, remain;
+ u32 err_data[6];
+ u8 i, j;
+
+ /* Configurations */
+ if (chip->ecc.strength <= 8) {
+ parity_len = MA35_PARITY_BCH8;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 12) {
+ parity_len = MA35_PARITY_BCH12;
+ padding_len = MA35_BCH_PADDING_512;
+ } else if (chip->ecc.strength <= 24) {
+ parity_len = MA35_PARITY_BCH24;
+ padding_len = MA35_BCH_PADDING_1024;
+ } else {
+ dev_err(nand->dev, "Invalid BCH_TSEL = 0x%lx\n",
+ readl(nand->regs + MA35_NFI_REG_NANDCTL) & BCH_MASK);
+ return;
+ }
+
+ /*
+ * got valid BCH_ECC_DATAx and parse them to temp_data[]
+ * got the valid register number of BCH_ECC_DATAx since
+ * one register include 4 error bytes
+ */
+ j = (err_cnt + 3) / 4;
+ j = (j > 6) ? 6 : j;
+ for (i = 0; i < j; i++)
+ err_data[i] = readl(nand->regs + MA35_NFI_REG_NANDECCED0 + i * 4);
+
+ for (i = 0; i < j; i++) {
+ temp_data[i * 4 + 0] = err_data[i] & 0xff;
+ temp_data[i * 4 + 1] = (err_data[i] >> 8) & 0xff;
+ temp_data[i * 4 + 2] = (err_data[i] >> 16) & 0xff;
+ temp_data[i * 4 + 3] = (err_data[i] >> 24) & 0xff;
+ }
+
+ /*
+ * got valid REG_BCH_ECC_ADDRx and parse them to temp_addr[]
+ * got the valid register number of REG_BCH_ECC_ADDRx since
+ * one register include 2 error addresses
+ */
+ j = (err_cnt + 1) / 2;
+ j = (j > 12) ? 12 : j;
+ for (i = 0; i < j; i++) {
+ temp_addr[i * 2 + 0] = readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ & 0x07ff;
+ temp_addr[i * 2 + 1] = (readl(nand->regs + MA35_NFI_REG_NANDECCEA0 + i * 4)
+ >> 16) & 0x07ff;
+ }
+
+ /* pointer to begin address of field that with data error */
+ addr += index * chip->ecc.size;
+
+ /* correct each error bytes */
+ for (i = 0; i < err_cnt; i++) {
+ u32 corrected_index = temp_addr[i];
+
+ if (corrected_index < chip->ecc.size) {
+ /* for wrong data in field */
+ *(addr + corrected_index) ^= temp_data[i];
+ } else if (corrected_index < (chip->ecc.size + 3)) {
+ /* for wrong first-3-bytes in redundancy area */
+ corrected_index -= chip->ecc.size;
+ temp_addr[i] += (parity_len * index); /* field offset */
+
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ value ^= temp_data[i] << (8 * corrected_index);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0);
+ } else {
+ /*
+ * for wrong parity code in redundancy area
+ * ERR_ADDRx = [data in field] + [3 bytes] + [xx] + [parity code]
+ * |<-- padding bytes -->|
+ * The ERR_ADDRx for last parity code always = field size + padding size.
+ * The first parity code = field size + padding size - parity code length.
+ * For example, for BCH T12, the first parity code = 512 + 32 - 23 = 521.
+ * That is, error byte address offset within field is
+ */
+ corrected_index -= (chip->ecc.size + padding_len - parity_len);
+
+ /*
+ * final address = first parity code of first field +
+ * offset of fields +
+ * offset within field
+ */
+ offset = (readl(nand->regs + MA35_NFI_REG_NANDRACTL) & 0x1ff) -
+ (parity_len * chip->ecc.steps) +
+ (parity_len * index) + corrected_index;
+
+ remain = offset % 4;
+ value = readl(nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ value ^= temp_data[i] << (8 * remain);
+ writel(value, nand->regs + MA35_NFI_REG_NANDRA0 + offset - remain);
+ }
+ }
+}
+
+static int ma35_nfi_ecc_check(struct nand_chip *chip, u8 *addr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct ma35_nand_chip *nvtnand = to_ma35_nand(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int maxbitflips = 0;
+ int cnt = 0;
+ u32 status;
+ int i, j;
+
+ for (j = 0; j < nvtnand->eccstatus; j++) {
+ status = readl(nand->regs + MA35_NFI_REG_NANDECCES0 + j * 4);
+ if (!status)
+ continue;
+
+ for (i = 0; i < 4; i++) {
+ if ((status & ECC_STATUS_MASK) == 0x01) {
+ /* Correctable error */
+ cnt = (status >> 2) & ECC_ERR_CNT_MASK;
+ ma35_nfi_correct(chip, j * 4 + i, cnt, addr);
+ maxbitflips = max_t(u32, maxbitflips, cnt);
+ mtd->ecc_stats.corrected += cnt;
+ } else {
+ /* Uncorrectable error */
+ mtd->ecc_stats.failed++;
+ dev_err(nand->dev, "uncorrectable error! 0x%4x\n", status);
+ return -EBADMSG;
+ }
+ status >>= 8;
+ }
+ }
+ return maxbitflips;
+}
+
+static void ma35_nand_dmac_init(struct ma35_nand_info *nand)
+{
+ /* DMAC reset and enable */
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+
+ /* Clear DMA finished flag and enable */
+ writel(INT_DMA | INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTEN);
+}
+
+static int ma35_nand_do_write(struct nand_chip *chip, const u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ dma_addr_t dma_addr;
+ int ret = 0, i;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ writel(addr[i], nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* To mark this page as dirty. */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (reg & 0xffff0000)
+ writel(reg & 0xffff, nand->regs + MA35_NFI_REG_NANDRA0);
+
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+ dma_sync_single_for_device(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_W_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "write timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int ma35_nand_do_read(struct nand_chip *chip, u8 *addr, u32 len)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0, cnt = 0, i;
+ dma_addr_t dma_addr;
+ u32 reg;
+
+ if (len != mtd->writesize) {
+ for (i = 0; i < len; i++)
+ addr[i] = readb(nand->regs + MA35_NFI_REG_NANDDATA);
+ return 0;
+ }
+
+ ma35_nand_dmac_init(nand);
+
+ /* Setup and start DMA using dma_addr */
+ dma_addr = dma_map_single(nand->dev, (void *)addr, len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(nand->dev, dma_addr);
+ if (ret) {
+ dev_err(nand->dev, "dma mapping error\n");
+ return -EINVAL;
+ }
+
+ reinit_completion(&nand->complete);
+ writel(dma_addr, nand->regs + MA35_NFI_REG_DMASA);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | DMA_R_EN,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ ret = wait_for_completion_timeout(&nand->complete, msecs_to_jiffies(1000));
+ if (!ret) {
+ dev_err(nand->dev, "read timeout\n");
+ ret = -ETIMEDOUT;
+ }
+
+ dma_unmap_single(nand->dev, dma_addr, len, DMA_FROM_DEVICE);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (reg & INT_ECC) {
+ cnt = ma35_nfi_ecc_check(chip, addr);
+ if (cnt < 0) {
+ writel(DMA_RST | DMA_EN, nand->regs + MA35_NFI_REG_DMACTL);
+ writel(readl(nand->regs + MA35_NFI_REG_NANDCTL) | SWRST,
+ nand->regs + MA35_NFI_REG_NANDCTL);
+ }
+ writel(INT_ECC, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ }
+
+ ret = ret < 0 ? ret : cnt;
+ return ret;
+}
+
+static int ma35_nand_format_subpage(struct nand_chip *chip, u32 offset,
+ u32 len, const u8 *buf)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 page_off = round_down(offset, chip->ecc.size);
+ u32 end = DIV_ROUND_UP(page_off + len, chip->ecc.size);
+ u32 start = page_off / chip->ecc.size;
+ u32 reg;
+ int i;
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL) | 0xffff0000;
+ memset(nand->buffer, 0xff, mtd->writesize);
+ for (i = start; i < end; i++) {
+ memcpy(nand->buffer + i * chip->ecc.size,
+ buf + i * chip->ecc.size, chip->ecc.size);
+ reg &= ~(1 << (i + 16));
+ }
+ writel(reg, nand->regs + MA35_NFI_REG_NANDRACTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg, oobpoi, index;
+ int i;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ ma35_nand_format_subpage(chip, offset, data_len, buf);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, nand->buffer, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ oobpoi = mtd->oobsize - chip->ecc.total;
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRACTL);
+ for (i = 0; i < chip->ecc.steps; i++) {
+ index = i * chip->ecc.bytes;
+ if (!(reg & (1 << (i + 16)))) {
+ ma35_read_spare(chip, chip->ecc.bytes,
+ (u32 *)(chip->oob_poi + oobpoi + index),
+ oobpoi + index);
+ }
+ }
+
+ writel(mtd->oobsize, nand->regs + MA35_NFI_REG_NANDRACTL);
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+
+ ma35_clear_spare(chip, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize - chip->ecc.total,
+ (u32 *)chip->oob_poi);
+
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ ma35_nand_do_write(chip, buf, mtd->writesize);
+ nand_prog_page_end_op(chip);
+
+ ma35_read_spare(chip, chip->ecc.total,
+ (u32 *)(chip->oob_poi + (mtd->oobsize - chip->ecc.total)),
+ mtd->oobsize - chip->ecc.total);
+
+ /* Disable HW ECC engine */
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return 0;
+}
+
+static int ma35_nand_read_subpage_hwecc(struct nand_chip *chip, u32 offset,
+ u32 data_len, u8 *buf, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, offset, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf + offset, data_len);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int bitflips = 0;
+ u32 reg;
+
+ /* Enable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg | ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg)) {
+ memset((void *)buf, 0xff, mtd->writesize);
+ } else {
+ nand_read_page_op(chip, page, 0, NULL, 0);
+ bitflips = ma35_nand_do_read(chip, buf, mtd->writesize);
+ ma35_read_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi, 0);
+ }
+
+ /* Disable HW ECC engine */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ writel(reg & ~ECC_EN, nand->regs + MA35_NFI_REG_NANDCTL);
+
+ return bitflips;
+}
+
+static int ma35_nand_read_oob_hwecc(struct nand_chip *chip, int page)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u32 reg;
+
+ ma35_nand_target_enable(chip, chip->cur_cs);
+ nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+
+ /* copy OOB data to controller redundant area for page read */
+ ma35_write_spare(chip, mtd->oobsize, (u32 *)chip->oob_poi);
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDRA0);
+ if (PREFIX_RA_IS_EMPTY(reg))
+ memset((void *)chip->oob_poi, 0xff, mtd->oobsize);
+
+ return 0;
+}
+
+static inline void ma35_hw_init(struct ma35_nand_info *nand)
+{
+ u32 reg;
+
+ /* Disable flash wp. */
+ writel(DISABLE_WP, nand->regs + MA35_NFI_REG_NANDECTL);
+
+ /* resets the internal state machine and counters */
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL);
+ reg |= SWRST;
+ writel(reg, nand->regs + MA35_NFI_REG_NANDCTL);
+}
+
+static irqreturn_t ma35_nand_irq(int irq, void *id)
+{
+ struct ma35_nand_info *nand = (struct ma35_nand_info *)id;
+ u32 isr;
+
+ isr = readl(nand->regs + MA35_NFI_REG_NANDINTSTS);
+ if (isr & INT_DMA) {
+ writel(INT_DMA, nand->regs + MA35_NFI_REG_NANDINTSTS);
+ complete(&nand->complete);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int ma35_nand_attach_chip(struct nand_chip *chip)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct device *dev = mtd->dev.parent;
+ u32 reg;
+
+ if (chip->options & NAND_BUSWIDTH_16) {
+ dev_err(dev, "16 bits bus width not supported");
+ return -EINVAL;
+ }
+
+ reg = readl(nand->regs + MA35_NFI_REG_NANDCTL) & (~PSIZE_MASK);
+ switch (mtd->writesize) {
+ case SZ_2K:
+ writel(reg | PSIZE_2K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_4K:
+ writel(reg | PSIZE_4K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ case SZ_8K:
+ writel(reg | PSIZE_8K, nand->regs + MA35_NFI_REG_NANDCTL);
+ break;
+ default:
+ dev_err(dev, "Unsupported page size");
+ return -EINVAL;
+ }
+
+ switch (chip->ecc.engine_type) {
+ case NAND_ECC_ENGINE_TYPE_ON_HOST:
+ /* Do not store BBT bits in the OOB section as it is not protected */
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+ chip->options |= NAND_USES_DMA | NAND_SUBPAGE_READ;
+ chip->ecc.write_subpage = ma35_nand_write_subpage_hwecc;
+ chip->ecc.write_page = ma35_nand_write_page_hwecc;
+ chip->ecc.read_subpage = ma35_nand_read_subpage_hwecc;
+ chip->ecc.read_page = ma35_nand_read_page_hwecc;
+ chip->ecc.read_oob = ma35_nand_read_oob_hwecc;
+ return ma35_nand_hwecc_init(chip, nand);
+ case NAND_ECC_ENGINE_TYPE_NONE:
+ case NAND_ECC_ENGINE_TYPE_SOFT:
+ case NAND_ECC_ENGINE_TYPE_ON_DIE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ma35_nfc_exec_instr(struct nand_chip *chip,
+ const struct nand_op_instr *instr)
+{
+ struct ma35_nand_info *nand = nand_get_controller_data(chip);
+ unsigned int i;
+ int ret = 0;
+ u32 status;
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ writel(instr->ctx.cmd.opcode, nand->regs + MA35_NFI_REG_NANDCMD);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ for (i = 0; i < instr->ctx.addr.naddrs; i++) {
+ if (i == (instr->ctx.addr.naddrs - 1))
+ writel(instr->ctx.addr.addrs[i] | ENDADDR,
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ else
+ writel(instr->ctx.addr.addrs[i],
+ nand->regs + MA35_NFI_REG_NANDADDR);
+ }
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ ret = ma35_nand_do_read(chip, instr->ctx.data.buf.in, instr->ctx.data.len);
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ ret = ma35_nand_do_write(chip, instr->ctx.data.buf.out, instr->ctx.data.len);
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ return readl_poll_timeout(nand->regs + MA35_NFI_REG_NANDINTSTS, status,
+ status & INT_RB0, 20,
+ instr->ctx.waitrdy.timeout_ms * MSEC_PER_SEC);
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ma35_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ int ret = 0;
+ u32 i;
+
+ if (check_only)
+ return 0;
+
+ ma35_nand_target_enable(chip, op->cs);
+
+ for (i = 0; i < op->ninstrs; i++) {
+ ret = ma35_nfc_exec_instr(chip, &op->instrs[i]);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static const struct nand_controller_ops ma35_nfc_ops = {
+ .attach_chip = ma35_nand_attach_chip,
+ .exec_op = ma35_nfc_exec_op,
+};
+
+static int ma35_nand_chip_init(struct device *dev, struct ma35_nand_info *nand,
+ struct device_node *np)
+{
+ struct ma35_nand_chip *nvtnand;
+ struct nand_chip *chip;
+ struct mtd_info *mtd;
+ int nsels;
+ int ret;
+ u32 cs;
+ int i;
+
+ nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+ if (!nsels || nsels > MA35_MAX_NSELS) {
+ dev_err(dev, "invalid reg property size %d\n", nsels);
+ return -EINVAL;
+ }
+
+ nvtnand = devm_kzalloc(dev, struct_size(nvtnand, sels, nsels),
+ GFP_KERNEL);
+ if (!nvtnand)
+ return -ENOMEM;
+
+ nvtnand->nsels = nsels;
+ for (i = 0; i < nsels; i++) {
+ ret = of_property_read_u32_index(np, "reg", i, &cs);
+ if (ret) {
+ dev_err(dev, "reg property failure : %d\n", ret);
+ return ret;
+ }
+
+ if (cs >= MA35_MAX_NSELS) {
+ dev_err(dev, "invalid CS: %u\n", cs);
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(cs, &nand->assigned_cs)) {
+ dev_err(dev, "CS %u already assigned\n", cs);
+ return -EINVAL;
+ }
+
+ nvtnand->sels[i] = cs;
+ }
+
+ chip = &nvtnand->chip;
+ chip->controller = &nand->controller;
+
+ nand_set_flash_node(chip, np);
+ nand_set_controller_data(chip, nand);
+
+ mtd = nand_to_mtd(chip);
+ mtd->owner = THIS_MODULE;
+ mtd->dev.parent = dev;
+
+ mtd_set_ooblayout(mtd, &ma35_ooblayout_ops);
+ ret = nand_scan(chip, nsels);
+ if (ret)
+ return ret;
+
+ ret = mtd_device_register(mtd, NULL, 0);
+ if (ret) {
+ nand_cleanup(chip);
+ return ret;
+ }
+
+ list_add_tail(&nvtnand->node, &nand->chips);
+
+ return 0;
+}
+
+static void ma35_chips_cleanup(struct ma35_nand_info *nand)
+{
+ struct ma35_nand_chip *nvtnand, *tmp;
+ struct nand_chip *chip;
+ int ret;
+
+ list_for_each_entry_safe(nvtnand, tmp, &nand->chips, node) {
+ chip = &nvtnand->chip;
+ ret = mtd_device_unregister(nand_to_mtd(chip));
+ WARN_ON(ret);
+ nand_cleanup(chip);
+ list_del(&nvtnand->node);
+ }
+}
+
+static int ma35_nand_chips_init(struct device *dev, struct ma35_nand_info *nand)
+{
+ struct device_node *np = dev->of_node, *nand_np;
+ int ret;
+
+ for_each_child_of_node(np, nand_np) {
+ ret = ma35_nand_chip_init(dev, nand, nand_np);
+ if (ret) {
+ ma35_chips_cleanup(nand);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ma35_nand_probe(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand;
+ int ret = 0;
+
+ nand = devm_kzalloc(&pdev->dev, sizeof(*nand), GFP_KERNEL);
+ if (!nand)
+ return -ENOMEM;
+
+ nand_controller_init(&nand->controller);
+ INIT_LIST_HEAD(&nand->chips);
+ nand->controller.ops = &ma35_nfc_ops;
+
+ init_completion(&nand->complete);
+
+ nand->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nand->regs))
+ return PTR_ERR(nand->regs);
+
+ nand->dev = &pdev->dev;
+
+ nand->clk = devm_clk_get_enabled(&pdev->dev, "nand_gate");
+ if (IS_ERR(nand->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(nand->clk),
+ "failed to find NAND clock\n");
+
+ nand->irq = platform_get_irq(pdev, 0);
+ if (nand->irq < 0)
+ return dev_err_probe(&pdev->dev, nand->irq,
+ "failed to get platform irq\n");
+
+ ret = devm_request_irq(&pdev->dev, nand->irq, ma35_nand_irq,
+ IRQF_TRIGGER_HIGH, "ma35d1-nand-controller", nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request NAND irq\n");
+ return -ENXIO;
+ }
+
+ platform_set_drvdata(pdev, nand);
+
+ writel(GRST | NAND_EN, nand->regs + MA35_NFI_REG_GCTL);
+ ma35_hw_init(nand);
+ ret = ma35_nand_chips_init(&pdev->dev, nand);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init NAND chips\n");
+ clk_disable(nand->clk);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ma35_nand_remove(struct platform_device *pdev)
+{
+ struct ma35_nand_info *nand = platform_get_drvdata(pdev);
+
+ ma35_chips_cleanup(nand);
+}
+
+static const struct of_device_id ma35_nand_of_match[] = {
+ { .compatible = "nuvoton,ma35d1-nand-controller" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ma35_nand_of_match);
+
+static struct platform_driver ma35_nand_driver = {
+ .driver = {
+ .name = "ma35d1-nand-controller",
+ .of_match_table = ma35_nand_of_match,
+ },
+ .probe = ma35_nand_probe,
+ .remove = ma35_nand_remove,
+};
+
+module_platform_driver(ma35_nand_driver);
+
+MODULE_DESCRIPTION("Nuvoton ma35 NAND driver");
+MODULE_AUTHOR("Hui-Ping Chen <hpchen0nvt@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 636bba2528bf..1003cf118c01 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -15,431 +15,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-/* NANDc reg offsets */
-#define NAND_FLASH_CMD 0x00
-#define NAND_ADDR0 0x04
-#define NAND_ADDR1 0x08
-#define NAND_FLASH_CHIP_SELECT 0x0c
-#define NAND_EXEC_CMD 0x10
-#define NAND_FLASH_STATUS 0x14
-#define NAND_BUFFER_STATUS 0x18
-#define NAND_DEV0_CFG0 0x20
-#define NAND_DEV0_CFG1 0x24
-#define NAND_DEV0_ECC_CFG 0x28
-#define NAND_AUTO_STATUS_EN 0x2c
-#define NAND_DEV1_CFG0 0x30
-#define NAND_DEV1_CFG1 0x34
-#define NAND_READ_ID 0x40
-#define NAND_READ_STATUS 0x44
-#define NAND_DEV_CMD0 0xa0
-#define NAND_DEV_CMD1 0xa4
-#define NAND_DEV_CMD2 0xa8
-#define NAND_DEV_CMD_VLD 0xac
-#define SFLASHC_BURST_CFG 0xe0
-#define NAND_ERASED_CW_DETECT_CFG 0xe8
-#define NAND_ERASED_CW_DETECT_STATUS 0xec
-#define NAND_EBI2_ECC_BUF_CFG 0xf0
-#define FLASH_BUF_ACC 0x100
-
-#define NAND_CTRL 0xf00
-#define NAND_VERSION 0xf08
-#define NAND_READ_LOCATION_0 0xf20
-#define NAND_READ_LOCATION_1 0xf24
-#define NAND_READ_LOCATION_2 0xf28
-#define NAND_READ_LOCATION_3 0xf2c
-#define NAND_READ_LOCATION_LAST_CW_0 0xf40
-#define NAND_READ_LOCATION_LAST_CW_1 0xf44
-#define NAND_READ_LOCATION_LAST_CW_2 0xf48
-#define NAND_READ_LOCATION_LAST_CW_3 0xf4c
-
-/* dummy register offsets, used by write_reg_dma */
-#define NAND_DEV_CMD1_RESTORE 0xdead
-#define NAND_DEV_CMD_VLD_RESTORE 0xbeef
-
-/* NAND_FLASH_CMD bits */
-#define PAGE_ACC BIT(4)
-#define LAST_PAGE BIT(5)
-
-/* NAND_FLASH_CHIP_SELECT bits */
-#define NAND_DEV_SEL 0
-#define DM_EN BIT(2)
-
-/* NAND_FLASH_STATUS bits */
-#define FS_OP_ERR BIT(4)
-#define FS_READY_BSY_N BIT(5)
-#define FS_MPU_ERR BIT(8)
-#define FS_DEVICE_STS_ERR BIT(16)
-#define FS_DEVICE_WP BIT(23)
-
-/* NAND_BUFFER_STATUS bits */
-#define BS_UNCORRECTABLE_BIT BIT(8)
-#define BS_CORRECTABLE_ERR_MSK 0x1f
-
-/* NAND_DEVn_CFG0 bits */
-#define DISABLE_STATUS_AFTER_WRITE 4
-#define CW_PER_PAGE 6
-#define UD_SIZE_BYTES 9
-#define UD_SIZE_BYTES_MASK GENMASK(18, 9)
-#define ECC_PARITY_SIZE_BYTES_RS 19
-#define SPARE_SIZE_BYTES 23
-#define SPARE_SIZE_BYTES_MASK GENMASK(26, 23)
-#define NUM_ADDR_CYCLES 27
-#define STATUS_BFR_READ 30
-#define SET_RD_MODE_AFTER_STATUS 31
-
-/* NAND_DEVn_CFG0 bits */
-#define DEV0_CFG1_ECC_DISABLE 0
-#define WIDE_FLASH 1
-#define NAND_RECOVERY_CYCLES 2
-#define CS_ACTIVE_BSY 5
-#define BAD_BLOCK_BYTE_NUM 6
-#define BAD_BLOCK_IN_SPARE_AREA 16
-#define WR_RD_BSY_GAP 17
-#define ENABLE_BCH_ECC 27
-
-/* NAND_DEV0_ECC_CFG bits */
-#define ECC_CFG_ECC_DISABLE 0
-#define ECC_SW_RESET 1
-#define ECC_MODE 4
-#define ECC_PARITY_SIZE_BYTES_BCH 8
-#define ECC_NUM_DATA_BYTES 16
-#define ECC_NUM_DATA_BYTES_MASK GENMASK(25, 16)
-#define ECC_FORCE_CLK_OPEN 30
-
-/* NAND_DEV_CMD1 bits */
-#define READ_ADDR 0
-
-/* NAND_DEV_CMD_VLD bits */
-#define READ_START_VLD BIT(0)
-#define READ_STOP_VLD BIT(1)
-#define WRITE_START_VLD BIT(2)
-#define ERASE_START_VLD BIT(3)
-#define SEQ_READ_START_VLD BIT(4)
-
-/* NAND_EBI2_ECC_BUF_CFG bits */
-#define NUM_STEPS 0
-
-/* NAND_ERASED_CW_DETECT_CFG bits */
-#define ERASED_CW_ECC_MASK 1
-#define AUTO_DETECT_RES 0
-#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
-#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
-#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
-#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
-#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
-
-/* NAND_ERASED_CW_DETECT_STATUS bits */
-#define PAGE_ALL_ERASED BIT(7)
-#define CODEWORD_ALL_ERASED BIT(6)
-#define PAGE_ERASED BIT(5)
-#define CODEWORD_ERASED BIT(4)
-#define ERASED_PAGE (PAGE_ALL_ERASED | PAGE_ERASED)
-#define ERASED_CW (CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-
-/* NAND_READ_LOCATION_n bits */
-#define READ_LOCATION_OFFSET 0
-#define READ_LOCATION_SIZE 16
-#define READ_LOCATION_LAST 31
-
-/* Version Mask */
-#define NAND_VERSION_MAJOR_MASK 0xf0000000
-#define NAND_VERSION_MAJOR_SHIFT 28
-#define NAND_VERSION_MINOR_MASK 0x0fff0000
-#define NAND_VERSION_MINOR_SHIFT 16
-
-/* NAND OP_CMDs */
-#define OP_PAGE_READ 0x2
-#define OP_PAGE_READ_WITH_ECC 0x3
-#define OP_PAGE_READ_WITH_ECC_SPARE 0x4
-#define OP_PAGE_READ_ONFI_READ 0x5
-#define OP_PROGRAM_PAGE 0x6
-#define OP_PAGE_PROGRAM_WITH_ECC 0x7
-#define OP_PROGRAM_PAGE_SPARE 0x9
-#define OP_BLOCK_ERASE 0xa
-#define OP_CHECK_STATUS 0xc
-#define OP_FETCH_ID 0xb
-#define OP_RESET_DEVICE 0xd
-
-/* Default Value for NAND_DEV_CMD_VLD */
-#define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \
- ERASE_START_VLD | SEQ_READ_START_VLD)
-
-/* NAND_CTRL bits */
-#define BAM_MODE_EN BIT(0)
-
-/*
- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
- * the driver calls the chunks 'step' or 'codeword' interchangeably
- */
-#define NANDC_STEP_SIZE 512
-
-/*
- * the largest page size we support is 8K, this will have 16 steps/codewords
- * of 512 bytes each
- */
-#define MAX_NUM_STEPS (SZ_8K / NANDC_STEP_SIZE)
-
-/* we read at most 3 registers per codeword scan */
-#define MAX_REG_RD (3 * MAX_NUM_STEPS)
-
-/* ECC modes supported by the controller */
-#define ECC_NONE BIT(0)
-#define ECC_RS_4BIT BIT(1)
-#define ECC_BCH_4BIT BIT(2)
-#define ECC_BCH_8BIT BIT(3)
-
-#define nandc_set_read_loc_first(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-
-#define nandc_set_read_loc_last(chip, reg, cw_offset, read_size, is_last_read_loc) \
-nandc_set_reg(chip, reg, \
- ((cw_offset) << READ_LOCATION_OFFSET) | \
- ((read_size) << READ_LOCATION_SIZE) | \
- ((is_last_read_loc) << READ_LOCATION_LAST))
-/*
- * Returns the actual register address for all NAND_DEV_ registers
- * (i.e. NAND_DEV_CMD0, NAND_DEV_CMD1, NAND_DEV_CMD2 and NAND_DEV_CMD_VLD)
- */
-#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
-
-/* Returns the NAND register physical address */
-#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
-
-/* Returns the dma address for reg read buffer */
-#define reg_buf_dma_addr(chip, vaddr) \
- ((chip)->reg_read_dma + \
- ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
-
-#define QPIC_PER_CW_CMD_ELEMENTS 32
-#define QPIC_PER_CW_CMD_SGL 32
-#define QPIC_PER_CW_DATA_SGL 8
-
-#define QPIC_NAND_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
-
-/*
- * Flags used in DMA descriptor preparation helper functions
- * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
- */
-/* Don't set the EOT in current tx BAM sgl */
-#define NAND_BAM_NO_EOT BIT(0)
-/* Set the NWD flag in current BAM sgl */
-#define NAND_BAM_NWD BIT(1)
-/* Finish writing in the current BAM sgl and start writing in another BAM sgl */
-#define NAND_BAM_NEXT_SGL BIT(2)
-/*
- * Erased codeword status is being used two times in single transfer so this
- * flag will determine the current value of erased codeword status register
- */
-#define NAND_ERASED_CW_SET BIT(4)
-
-#define MAX_ADDRESS_CYCLE 5
-
-/*
- * This data type corresponds to the BAM transaction which will be used for all
- * NAND transfers.
- * @bam_ce - the array of BAM command elements
- * @cmd_sgl - sgl for NAND BAM command pipe
- * @data_sgl - sgl for NAND BAM consumer/producer pipe
- * @last_data_desc - last DMA desc in data channel (tx/rx).
- * @last_cmd_desc - last DMA desc in command channel.
- * @txn_done - completion for NAND transfer.
- * @bam_ce_pos - the index in bam_ce which is available for next sgl
- * @bam_ce_start - the index in bam_ce which marks the start position ce
- * for current sgl. It will be used for size calculation
- * for current sgl
- * @cmd_sgl_pos - current index in command sgl.
- * @cmd_sgl_start - start index in command sgl.
- * @tx_sgl_pos - current index in data sgl for tx.
- * @tx_sgl_start - start index in data sgl for tx.
- * @rx_sgl_pos - current index in data sgl for rx.
- * @rx_sgl_start - start index in data sgl for rx.
- * @wait_second_completion - wait for second DMA desc completion before making
- * the NAND transfer completion.
- */
-struct bam_transaction {
- struct bam_cmd_element *bam_ce;
- struct scatterlist *cmd_sgl;
- struct scatterlist *data_sgl;
- struct dma_async_tx_descriptor *last_data_desc;
- struct dma_async_tx_descriptor *last_cmd_desc;
- struct completion txn_done;
- u32 bam_ce_pos;
- u32 bam_ce_start;
- u32 cmd_sgl_pos;
- u32 cmd_sgl_start;
- u32 tx_sgl_pos;
- u32 tx_sgl_start;
- u32 rx_sgl_pos;
- u32 rx_sgl_start;
- bool wait_second_completion;
-};
-
-/*
- * This data type corresponds to the nand dma descriptor
- * @dma_desc - low level DMA engine descriptor
- * @list - list for desc_info
- *
- * @adm_sgl - sgl which will be used for single sgl dma descriptor. Only used by
- * ADM
- * @bam_sgl - sgl which will be used for dma descriptor. Only used by BAM
- * @sgl_cnt - number of SGL in bam_sgl. Only used by BAM
- * @dir - DMA transfer direction
- */
-struct desc_info {
- struct dma_async_tx_descriptor *dma_desc;
- struct list_head node;
-
- union {
- struct scatterlist adm_sgl;
- struct {
- struct scatterlist *bam_sgl;
- int sgl_cnt;
- };
- };
- enum dma_data_direction dir;
-};
-
-/*
- * holds the current register values that we want to write. acts as a contiguous
- * chunk of memory which we use to write the controller registers through DMA.
- */
-struct nandc_regs {
- __le32 cmd;
- __le32 addr0;
- __le32 addr1;
- __le32 chip_sel;
- __le32 exec;
-
- __le32 cfg0;
- __le32 cfg1;
- __le32 ecc_bch_cfg;
-
- __le32 clrflashstatus;
- __le32 clrreadstatus;
-
- __le32 cmd1;
- __le32 vld;
-
- __le32 orig_cmd1;
- __le32 orig_vld;
-
- __le32 ecc_buf_cfg;
- __le32 read_location0;
- __le32 read_location1;
- __le32 read_location2;
- __le32 read_location3;
- __le32 read_location_last0;
- __le32 read_location_last1;
- __le32 read_location_last2;
- __le32 read_location_last3;
-
- __le32 erased_cw_detect_cfg_clr;
- __le32 erased_cw_detect_cfg_set;
-};
-
-/*
- * NAND controller data struct
- *
- * @dev: parent device
- *
- * @base: MMIO base
- *
- * @core_clk: controller clock
- * @aon_clk: another controller clock
- *
- * @regs: a contiguous chunk of memory for DMA register
- * writes. contains the register values to be
- * written to controller
- *
- * @props: properties of current NAND controller,
- * initialized via DT match data
- *
- * @controller: base controller structure
- * @host_list: list containing all the chips attached to the
- * controller
- *
- * @chan: dma channel
- * @cmd_crci: ADM DMA CRCI for command flow control
- * @data_crci: ADM DMA CRCI for data flow control
- *
- * @desc_list: DMA descriptor list (list of desc_infos)
- *
- * @data_buffer: our local DMA buffer for page read/writes,
- * used when we can't use the buffer provided
- * by upper layers directly
- * @reg_read_buf: local buffer for reading back registers via DMA
- *
- * @base_phys: physical base address of controller registers
- * @base_dma: dma base address of controller registers
- * @reg_read_dma: contains dma address for register read buffer
- *
- * @buf_size/count/start: markers for chip->legacy.read_buf/write_buf
- * functions
- * @max_cwperpage: maximum QPIC codewords required. calculated
- * from all connected NAND devices pagesize
- *
- * @reg_read_pos: marker for data read in reg_read_buf
- *
- * @cmd1/vld: some fixed controller register values
- *
- * @exec_opwrite: flag to select correct number of code word
- * while reading status
- */
-struct qcom_nand_controller {
- struct device *dev;
-
- void __iomem *base;
-
- struct clk *core_clk;
- struct clk *aon_clk;
-
- struct nandc_regs *regs;
- struct bam_transaction *bam_txn;
-
- const struct qcom_nandc_props *props;
-
- struct nand_controller controller;
- struct list_head host_list;
-
- union {
- /* will be used only by QPIC for BAM DMA */
- struct {
- struct dma_chan *tx_chan;
- struct dma_chan *rx_chan;
- struct dma_chan *cmd_chan;
- };
-
- /* will be used only by EBI2 for ADM DMA */
- struct {
- struct dma_chan *chan;
- unsigned int cmd_crci;
- unsigned int data_crci;
- };
- };
-
- struct list_head desc_list;
-
- u8 *data_buffer;
- __le32 *reg_read_buf;
-
- phys_addr_t base_phys;
- dma_addr_t base_dma;
- dma_addr_t reg_read_dma;
-
- int buf_size;
- int buf_count;
- int buf_start;
- unsigned int max_cwperpage;
-
- int reg_read_pos;
-
- u32 cmd1, vld;
- bool exec_opwrite;
-};
+#include <linux/mtd/nand-qpic-common.h>
/*
* NAND special boot partitions
@@ -471,9 +47,9 @@ struct qcom_op {
unsigned int data_instr_idx;
unsigned int rdy_timeout_ms;
unsigned int rdy_delay_ns;
- u32 addr1_reg;
- u32 addr2_reg;
- u32 cmd_reg;
+ __le32 addr1_reg;
+ __le32 addr2_reg;
+ __le32 cmd_reg;
u8 flag;
};
@@ -544,243 +120,113 @@ struct qcom_nand_host {
bool bch_enabled;
};
-/*
- * This data type corresponds to the NAND controller properties which varies
- * among different NAND controllers.
- * @ecc_modes - ecc mode for NAND
- * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset
- * @is_bam - whether NAND controller is using BAM
- * @is_qpic - whether NAND CTRL is part of qpic IP
- * @qpic_v2 - flag to indicate QPIC IP version 2
- * @use_codeword_fixup - whether NAND has different layout for boot partitions
- */
-struct qcom_nandc_props {
- u32 ecc_modes;
- u32 dev_cmd_reg_start;
- bool is_bam;
- bool is_qpic;
- bool qpic_v2;
- bool use_codeword_fixup;
-};
-
-/* Frees the BAM transaction memory */
-static void free_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- devm_kfree(nandc->dev, bam_txn);
-}
-
-/* Allocates and Initializes the BAM transaction */
-static struct bam_transaction *
-alloc_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn;
- size_t bam_txn_size;
- unsigned int num_cw = nandc->max_cwperpage;
- void *bam_txn_buf;
-
- bam_txn_size =
- sizeof(*bam_txn) + num_cw *
- ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
- (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
- (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
-
- bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
- if (!bam_txn_buf)
- return NULL;
-
- bam_txn = bam_txn_buf;
- bam_txn_buf += sizeof(*bam_txn);
-
- bam_txn->bam_ce = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
-
- bam_txn->cmd_sgl = bam_txn_buf;
- bam_txn_buf +=
- sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
-
- bam_txn->data_sgl = bam_txn_buf;
-
- init_completion(&bam_txn->txn_done);
-
- return bam_txn;
-}
-
-/* Clears the BAM transaction indexes */
-static void clear_bam_transaction(struct qcom_nand_controller *nandc)
-{
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (!nandc->props->is_bam)
- return;
-
- bam_txn->bam_ce_pos = 0;
- bam_txn->bam_ce_start = 0;
- bam_txn->cmd_sgl_pos = 0;
- bam_txn->cmd_sgl_start = 0;
- bam_txn->tx_sgl_pos = 0;
- bam_txn->tx_sgl_start = 0;
- bam_txn->rx_sgl_pos = 0;
- bam_txn->rx_sgl_start = 0;
- bam_txn->last_data_desc = NULL;
- bam_txn->wait_second_completion = false;
-
- sg_init_table(bam_txn->cmd_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_CMD_SGL);
- sg_init_table(bam_txn->data_sgl, nandc->max_cwperpage *
- QPIC_PER_CW_DATA_SGL);
-
- reinit_completion(&bam_txn->txn_done);
-}
-
-/* Callback for DMA descriptor completion */
-static void qpic_bam_dma_done(void *data)
-{
- struct bam_transaction *bam_txn = data;
-
- /*
- * In case of data transfer with NAND, 2 callbacks will be generated.
- * One for command channel and another one for data channel.
- * If current transaction has data descriptors
- * (i.e. wait_second_completion is true), then set this to false
- * and wait for second DMA descriptor completion.
- */
- if (bam_txn->wait_second_completion)
- bam_txn->wait_second_completion = false;
- else
- complete(&bam_txn->txn_done);
-}
-
-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+static struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
{
return container_of(chip, struct qcom_nand_host, chip);
}
-static inline struct qcom_nand_controller *
+static struct qcom_nand_controller *
get_qcom_nand_controller(struct nand_chip *chip)
{
- return container_of(chip->controller, struct qcom_nand_controller,
- controller);
+ return (struct qcom_nand_controller *)
+ ((u8 *)chip->controller - sizeof(struct qcom_nand_controller));
}
-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+static u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
{
return ioread32(nandc->base + offset);
}
-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
- u32 val)
+static void nandc_write(struct qcom_nand_controller *nandc, int offset,
+ u32 val)
{
iowrite32(val, nandc->base + offset);
}
-static inline void nandc_read_buffer_sync(struct qcom_nand_controller *nandc,
- bool is_cpu)
+/* Helper to check whether this is the last CW or not */
+static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
{
- if (!nandc->props->is_bam)
- return;
-
- if (is_cpu)
- dma_sync_single_for_cpu(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- else
- dma_sync_single_for_device(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
+ return cw == (ecc->steps - 1);
}
-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+/**
+ * nandc_set_read_loc_first() - to set read location first register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location register value
+ */
+static void nandc_set_read_loc_first(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
- switch (offset) {
- case NAND_FLASH_CMD:
- return &regs->cmd;
- case NAND_ADDR0:
- return &regs->addr0;
- case NAND_ADDR1:
- return &regs->addr1;
- case NAND_FLASH_CHIP_SELECT:
- return &regs->chip_sel;
- case NAND_EXEC_CMD:
- return &regs->exec;
- case NAND_FLASH_STATUS:
- return &regs->clrflashstatus;
- case NAND_DEV0_CFG0:
- return &regs->cfg0;
- case NAND_DEV0_CFG1:
- return &regs->cfg1;
- case NAND_DEV0_ECC_CFG:
- return &regs->ecc_bch_cfg;
- case NAND_READ_STATUS:
- return &regs->clrreadstatus;
- case NAND_DEV_CMD1:
- return &regs->cmd1;
- case NAND_DEV_CMD1_RESTORE:
- return &regs->orig_cmd1;
- case NAND_DEV_CMD_VLD:
- return &regs->vld;
- case NAND_DEV_CMD_VLD_RESTORE:
- return &regs->orig_vld;
- case NAND_EBI2_ECC_BUF_CFG:
- return &regs->ecc_buf_cfg;
- case NAND_READ_LOCATION_0:
- return &regs->read_location0;
- case NAND_READ_LOCATION_1:
- return &regs->read_location1;
- case NAND_READ_LOCATION_2:
- return &regs->read_location2;
- case NAND_READ_LOCATION_3:
- return &regs->read_location3;
- case NAND_READ_LOCATION_LAST_CW_0:
- return &regs->read_location_last0;
- case NAND_READ_LOCATION_LAST_CW_1:
- return &regs->read_location_last1;
- case NAND_READ_LOCATION_LAST_CW_2:
- return &regs->read_location_last2;
- case NAND_READ_LOCATION_LAST_CW_3:
- return &regs->read_location_last3;
- default:
- return NULL;
- }
-}
-
-static void nandc_set_reg(struct nand_chip *chip, int offset,
- u32 val)
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ __le32 locreg_val;
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
+
+ locreg_val = cpu_to_le32(val);
+
+ if (reg_base == NAND_READ_LOCATION_0)
+ nandc->regs->read_location0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_1)
+ nandc->regs->read_location1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_2)
+ nandc->regs->read_location2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_3)
+ nandc->regs->read_location3 = locreg_val;
+}
+
+/**
+ * nandc_set_read_loc_last - to set read location last register
+ * @chip: NAND Private Flash Chip Data
+ * @reg_base: location register base
+ * @cw_offset: code word offset
+ * @read_size: code word read length
+ * @is_last_read_loc: is this the last read location
+ *
+ * This function will set location last register value
+ */
+static void nandc_set_read_loc_last(struct nand_chip *chip,
+ int reg_base, u32 cw_offset,
+ u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nandc_regs *regs = nandc->regs;
- __le32 *reg;
+ __le32 locreg_val;
+ u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) |
+ FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) |
+ FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc);
- reg = offset_to_nandc_reg(regs, offset);
+ locreg_val = cpu_to_le32(val);
- if (reg)
- *reg = cpu_to_le32(val);
-}
-
-/* Helper to check the code word, whether it is last cw or not */
-static bool qcom_nandc_is_last_cw(struct nand_ecc_ctrl *ecc, int cw)
-{
- return cw == (ecc->steps - 1);
+ if (reg_base == NAND_READ_LOCATION_LAST_CW_0)
+ nandc->regs->read_location_last0 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_1)
+ nandc->regs->read_location_last1 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_2)
+ nandc->regs->read_location_last2 = locreg_val;
+ else if (reg_base == NAND_READ_LOCATION_LAST_CW_3)
+ nandc->regs->read_location_last3 = locreg_val;
}
/* helper to configure location register values */
static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
- int cw_offset, int read_size, int is_last_read_loc)
+ u32 cw_offset, u32 read_size, u32 is_last_read_loc)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int reg_base = NAND_READ_LOCATION_0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
reg_base = NAND_READ_LOCATION_LAST_CW_0;
reg_base += reg * 4;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
return nandc_set_read_loc_last(chip, reg_base, cw_offset,
read_size, is_last_read_loc);
else
@@ -792,12 +238,13 @@ static void nandc_set_read_loc(struct nand_chip *chip, int cw, int reg,
static void set_address(struct qcom_nand_host *host, u16 column, int page)
{
struct nand_chip *chip = &host->chip;
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (chip->options & NAND_BUSWIDTH_16)
column >>= 1;
- nandc_set_reg(chip, NAND_ADDR0, page << 16 | column);
- nandc_set_reg(chip, NAND_ADDR1, page >> 16 & 0xff);
+ nandc->regs->addr0 = cpu_to_le32(page << 16 | column);
+ nandc->regs->addr1 = cpu_to_le32(page >> 16 & 0xff);
}
/*
@@ -811,41 +258,43 @@ static void set_address(struct qcom_nand_host *host, u16 column, int page)
static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, int cw)
{
struct nand_chip *chip = &host->chip;
- u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+ __le32 cmd, cfg0, cfg1, ecc_bch_cfg;
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
if (read) {
if (host->use_ecc)
- cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE);
else
- cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
} else {
- cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+ cmd = cpu_to_le32(OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE);
}
if (host->use_ecc) {
- cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0 & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
- cfg1 = host->cfg1;
- ecc_bch_cfg = host->ecc_bch_cfg;
+ cfg1 = cpu_to_le32(host->cfg1);
+ ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg);
} else {
- cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
- (num_cw - 1) << CW_PER_PAGE;
+ cfg0 = cpu_to_le32((host->cfg0_raw & ~CW_PER_PAGE_MASK) |
+ FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1)));
- cfg1 = host->cfg1_raw;
- ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+ cfg1 = cpu_to_le32(host->cfg1_raw);
+ ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
}
- nandc_set_reg(chip, NAND_FLASH_CMD, cmd);
- nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0);
- nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1);
- nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = cmd;
+ nandc->regs->cfg0 = cfg0;
+ nandc->regs->cfg1 = cfg1;
+ nandc->regs->ecc_bch_cfg = ecc_bch_cfg;
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(host->ecc_buf_cfg);
+
+ nandc->regs->clrflashstatus = cpu_to_le32(host->clrflashstatus);
+ nandc->regs->clrreadstatus = cpu_to_le32(host->clrreadstatus);
+ nandc->regs->exec = cpu_to_le32(1);
if (read)
nandc_set_read_loc(chip, cw, 0, 0, host->use_ecc ?
@@ -853,366 +302,6 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i
}
/*
- * Maps the scatter gather list for DMA transfer and forms the DMA descriptor
- * for BAM. This descriptor will be added in the NAND DMA descriptor queue
- * which will be submitted to DMA engine.
- */
-static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
- struct dma_chan *chan,
- unsigned long flags)
-{
- struct desc_info *desc;
- struct scatterlist *sgl;
- unsigned int sgl_cnt;
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- enum dma_transfer_direction dir_eng;
- struct dma_async_tx_descriptor *dma_desc;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- if (chan == nandc->cmd_chan) {
- sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start];
- sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start;
- bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else if (chan == nandc->tx_chan) {
- sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start];
- sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start;
- bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos;
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- } else {
- sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start];
- sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start;
- bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos;
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- }
-
- sg_mark_end(sgl + sgl_cnt - 1);
- ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- if (ret == 0) {
- dev_err(nandc->dev, "failure in mapping desc\n");
- kfree(desc);
- return -ENOMEM;
- }
-
- desc->sgl_cnt = sgl_cnt;
- desc->bam_sgl = sgl;
-
- dma_desc = dmaengine_prep_slave_sg(chan, sgl, sgl_cnt, dir_eng,
- flags);
-
- if (!dma_desc) {
- dev_err(nandc->dev, "failure in prep desc\n");
- dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir);
- kfree(desc);
- return -EINVAL;
- }
-
- desc->dma_desc = dma_desc;
-
- /* update last data/command descriptor */
- if (chan == nandc->cmd_chan)
- bam_txn->last_cmd_desc = dma_desc;
- else
- bam_txn->last_data_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-}
-
-/*
- * Prepares the command descriptor for BAM DMA which will be used for NAND
- * register reads and writes. The command descriptor requires the command
- * to be formed in command element type so this function uses the command
- * element from bam transaction ce array and fills the same with required
- * data. A single SGL can contain multiple command elements so
- * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
- * after the current command element.
- */
-static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr,
- int size, unsigned int flags)
-{
- int bam_ce_size;
- int i, ret;
- struct bam_cmd_element *bam_ce_buffer;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
-
- /* fill the command desc */
- for (i = 0; i < size; i++) {
- if (read)
- bam_prep_ce(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_READ_COMMAND,
- reg_buf_dma_addr(nandc,
- (__le32 *)vaddr + i));
- else
- bam_prep_ce_le32(&bam_ce_buffer[i],
- nandc_reg_phys(nandc, reg_off + 4 * i),
- BAM_WRITE_COMMAND,
- *((__le32 *)vaddr + i));
- }
-
- bam_txn->bam_ce_pos += size;
-
- /* use the separate sgl after this command */
- if (flags & NAND_BAM_NEXT_SGL) {
- bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
- bam_ce_size = (bam_txn->bam_ce_pos -
- bam_txn->bam_ce_start) *
- sizeof(struct bam_cmd_element);
- sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
- bam_ce_buffer, bam_ce_size);
- bam_txn->cmd_sgl_pos++;
- bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
-
- if (flags & NAND_BAM_NWD) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_FENCE |
- DMA_PREP_CMD);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * Prepares the data descriptor for BAM DMA which will be used for NAND
- * data reads and writes.
- */
-static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
- const void *vaddr,
- int size, unsigned int flags)
-{
- int ret;
- struct bam_transaction *bam_txn = nandc->bam_txn;
-
- if (read) {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos],
- vaddr, size);
- bam_txn->rx_sgl_pos++;
- } else {
- sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos],
- vaddr, size);
- bam_txn->tx_sgl_pos++;
-
- /*
- * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag
- * is not set, form the DMA descriptor
- */
- if (!(flags & NAND_BAM_NO_EOT)) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
- int reg_off, const void *vaddr, int size,
- bool flow_control)
-{
- struct desc_info *desc;
- struct dma_async_tx_descriptor *dma_desc;
- struct scatterlist *sgl;
- struct dma_slave_config slave_conf;
- struct qcom_adm_peripheral_config periph_conf = {};
- enum dma_transfer_direction dir_eng;
- int ret;
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return -ENOMEM;
-
- sgl = &desc->adm_sgl;
-
- sg_init_one(sgl, vaddr, size);
-
- if (read) {
- dir_eng = DMA_DEV_TO_MEM;
- desc->dir = DMA_FROM_DEVICE;
- } else {
- dir_eng = DMA_MEM_TO_DEV;
- desc->dir = DMA_TO_DEVICE;
- }
-
- ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
- if (ret == 0) {
- ret = -ENOMEM;
- goto err;
- }
-
- memset(&slave_conf, 0x00, sizeof(slave_conf));
-
- slave_conf.device_fc = flow_control;
- if (read) {
- slave_conf.src_maxburst = 16;
- slave_conf.src_addr = nandc->base_dma + reg_off;
- if (nandc->data_crci) {
- periph_conf.crci = nandc->data_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- } else {
- slave_conf.dst_maxburst = 16;
- slave_conf.dst_addr = nandc->base_dma + reg_off;
- if (nandc->cmd_crci) {
- periph_conf.crci = nandc->cmd_crci;
- slave_conf.peripheral_config = &periph_conf;
- slave_conf.peripheral_size = sizeof(periph_conf);
- }
- }
-
- ret = dmaengine_slave_config(nandc->chan, &slave_conf);
- if (ret) {
- dev_err(nandc->dev, "failed to configure dma channel\n");
- goto err;
- }
-
- dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
- if (!dma_desc) {
- dev_err(nandc->dev, "failed to prepare desc\n");
- ret = -EINVAL;
- goto err;
- }
-
- desc->dma_desc = dma_desc;
-
- list_add_tail(&desc->node, &nandc->desc_list);
-
- return 0;
-err:
- kfree(desc);
-
- return ret;
-}
-
-/*
- * read_reg_dma: prepares a descriptor to read a given number of
- * contiguous registers to the reg_read_buf pointer
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to read
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- void *vaddr;
-
- vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
- nandc->reg_read_pos += num_regs;
-
- if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, first);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, true, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * write_reg_dma: prepares a descriptor to write a given number of
- * contiguous registers
- *
- * @first: offset of the first register in the contiguous block
- * @num_regs: number of registers to write
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
- int num_regs, unsigned int flags)
-{
- bool flow_control = false;
- struct nandc_regs *regs = nandc->regs;
- void *vaddr;
-
- vaddr = offset_to_nandc_reg(regs, first);
-
- if (first == NAND_ERASED_CW_DETECT_CFG) {
- if (flags & NAND_ERASED_CW_SET)
- vaddr = &regs->erased_cw_detect_cfg_set;
- else
- vaddr = &regs->erased_cw_detect_cfg_clr;
- }
-
- if (first == NAND_EXEC_CMD)
- flags |= NAND_BAM_NWD;
-
- if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
-
- if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
- first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
-
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
- num_regs, flags);
-
- if (first == NAND_FLASH_CMD)
- flow_control = true;
-
- return prep_adm_dma_desc(nandc, false, first, vaddr,
- num_regs * sizeof(u32), flow_control);
-}
-
-/*
- * read_data_dma: prepares a DMA descriptor to transfer data from the
- * controller's internal buffer to the buffer 'vaddr'
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to write to
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
-}
-
-/*
- * write_data_dma: prepares a DMA descriptor to transfer data from
- * 'vaddr' to the controller's internal buffer
- *
- * @reg_off: offset within the controller's data buffer
- * @vaddr: virtual address of the buffer we want to read from
- * @size: DMA transaction size in bytes
- * @flags: flags to control DMA descriptor preparation
- */
-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
- const u8 *vaddr, int size, unsigned int flags)
-{
- if (nandc->props->is_bam)
- return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
-
- return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
-}
-
-/*
* Helper to prepare DMA descriptors for configuring registers
* before reading a NAND page.
*/
@@ -1220,13 +309,14 @@ static void config_nand_page_read(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0);
- write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1,
- NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
+ NAND_ERASED_CW_DETECT_CFG, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
+ NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
}
/*
@@ -1239,23 +329,23 @@ config_nand_cw_read(struct nand_chip *chip, bool use_ecc, int cw)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- int reg = NAND_READ_LOCATION_0;
+ __le32 *reg = &nandc->regs->read_location0;
- if (nandc->props->qpic_v2 && qcom_nandc_is_last_cw(ecc, cw))
- reg = NAND_READ_LOCATION_LAST_CW_0;
+ if (nandc->props->qpic_version2 && qcom_nandc_is_last_cw(ecc, cw))
+ reg = &nandc->regs->read_location_last0;
- if (nandc->props->is_bam)
- write_reg_dma(nandc, reg, 4, NAND_BAM_NEXT_SGL);
+ if (nandc->props->supports_bam)
+ qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
if (use_ecc) {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
- read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
+ qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
} else {
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
}
}
@@ -1279,11 +369,11 @@ static void config_nand_page_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_ADDR0, 2, 0);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0);
- if (!nandc->props->qpic_v2)
- write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1,
- NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
+ if (!nandc->props->qpic_version2)
+ qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1294,95 +384,14 @@ static void config_nand_cw_write(struct nand_chip *chip)
{
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-}
-
-/* helpers to submit/free our list of dma descriptors */
-static int submit_descs(struct qcom_nand_controller *nandc)
-{
- struct desc_info *desc, *n;
- dma_cookie_t cookie = 0;
- struct bam_transaction *bam_txn = nandc->bam_txn;
- int ret = 0;
-
- if (nandc->props->is_bam) {
- if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
- DMA_PREP_INTERRUPT);
- if (ret)
- goto err_unmap_free_desc;
- }
-
- if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
- DMA_PREP_CMD);
- if (ret)
- goto err_unmap_free_desc;
- }
- }
-
- list_for_each_entry(desc, &nandc->desc_list, node)
- cookie = dmaengine_submit(desc->dma_desc);
-
- if (nandc->props->is_bam) {
- bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
- bam_txn->last_cmd_desc->callback_param = bam_txn;
- if (bam_txn->last_data_desc) {
- bam_txn->last_data_desc->callback = qpic_bam_dma_done;
- bam_txn->last_data_desc->callback_param = bam_txn;
- bam_txn->wait_second_completion = true;
- }
-
- dma_async_issue_pending(nandc->tx_chan);
- dma_async_issue_pending(nandc->rx_chan);
- dma_async_issue_pending(nandc->cmd_chan);
-
- if (!wait_for_completion_timeout(&bam_txn->txn_done,
- QPIC_NAND_COMPLETION_TIMEOUT))
- ret = -ETIMEDOUT;
- } else {
- if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
- ret = -ETIMEDOUT;
- }
-
-err_unmap_free_desc:
- /*
- * Unmap the dma sg_list and free the desc allocated by both
- * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
- */
- list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
- list_del(&desc->node);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- if (nandc->props->is_bam)
- dma_unmap_sg(nandc->dev, desc->bam_sgl,
- desc->sgl_cnt, desc->dir);
- else
- dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1,
- desc->dir);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- kfree(desc);
- }
-
- return ret;
-}
-
-/* reset the register read buffer for next NAND operation */
-static void clear_read_regs(struct qcom_nand_controller *nandc)
-{
- nandc->reg_read_pos = 0;
- nandc_read_buffer_sync(nandc, false);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
+ NAND_BAM_NEXT_SGL);
}
/*
@@ -1446,7 +455,7 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
int i;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < cw_cnt; i++) {
u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -1473,13 +482,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
nand_read_page_op(chip, page, 0, NULL, 0);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
host->use_ecc = false;
- if (nandc->props->qpic_v2)
+ if (nandc->props->qpic_version2)
raw_cw = ecc->steps - 1;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
set_address(host, host->cw_size * cw, page);
update_rw_regs(host, 1, true, raw_cw);
config_nand_page_read(chip);
@@ -1497,7 +506,7 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nandc_set_read_loc(chip, cw, 0, read_loc, data_size1, 0);
read_loc += data_size1;
@@ -1512,18 +521,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
config_nand_cw_read(chip, false, raw_cw);
- read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
reg_off += data_size1;
- read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
reg_off += oob_size1;
- read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
reg_off += data_size2;
- read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
+ qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
@@ -1621,7 +630,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
buf = (struct read_stats *)nandc->reg_read_buf;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < ecc->steps; i++, buf++) {
u32 flash, buffer, erased_cw;
@@ -1734,7 +743,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_size = host->ecc_bytes_hw + host->spare_bytes;
}
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
if (data_buf && oob_buf) {
nandc_set_read_loc(chip, i, 0, 0, data_size, 0);
nandc_set_read_loc(chip, i, 1, data_size,
@@ -1750,8 +759,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
config_nand_cw_read(chip, true, i);
if (data_buf)
- read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
- data_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+ data_size, 0);
/*
* when ecc is enabled, the controller doesn't read the real
@@ -1766,8 +775,8 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
for (j = 0; j < host->bbm_size; j++)
*oob_buf++ = 0xff;
- read_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
if (data_buf)
@@ -1776,7 +785,7 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
@@ -1797,7 +806,7 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
int size;
int ret;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
size = host->use_ecc ? host->cw_data : host->cw_size;
@@ -1809,9 +818,9 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
@@ -1873,12 +882,12 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
host->bbm_size - host->cw_data;
host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK);
- host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES |
- host->cw_data << UD_SIZE_BYTES;
+ host->cfg0 |= FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data);
host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK;
- host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES;
- host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS;
+ host->ecc_bch_cfg |= FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data);
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, host->cw_data - 1);
}
/* implements ecc->read_page() */
@@ -1897,14 +906,14 @@ static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = true;
- clear_read_regs(nandc);
+ qcom_clear_read_regs(nandc);
set_address(host, 0, page);
update_rw_regs(host, ecc->steps, true, 0);
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
return read_page_ecc(host, data_buf, oob_buf, page);
}
@@ -1945,8 +954,8 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
host->use_ecc = true;
set_address(host, 0, page);
@@ -1973,8 +982,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
set_address(host, 0, page);
nandc->buf_count = 0;
nandc->buf_start = 0;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -1995,8 +1004,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
oob_size = ecc->bytes;
}
- write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
- i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
+ i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
/*
* when ECC is enabled, we don't really need to write anything
@@ -2008,8 +1017,8 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
if (qcom_nandc_is_last_cw(ecc, i)) {
oob_buf += host->bbm_size;
- write_data_dma(nandc, FLASH_BUF_ACC + data_size,
- oob_buf, oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+ oob_buf, oob_size, 0);
}
config_nand_cw_write(chip);
@@ -2018,7 +1027,7 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
oob_buf += oob_size;
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write page\n");
return ret;
@@ -2043,8 +1052,8 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
qcom_nandc_codeword_fixup(host, page);
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
data_buf = (u8 *)buf;
oob_buf = chip->oob_poi;
@@ -2070,28 +1079,28 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
}
- write_data_dma(nandc, reg_off, data_buf, data_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
+ NAND_BAM_NO_EOT);
reg_off += data_size1;
data_buf += data_size1;
- write_data_dma(nandc, reg_off, oob_buf, oob_size1,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
+ NAND_BAM_NO_EOT);
reg_off += oob_size1;
oob_buf += oob_size1;
- write_data_dma(nandc, reg_off, data_buf, data_size2,
- NAND_BAM_NO_EOT);
+ qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
+ NAND_BAM_NO_EOT);
reg_off += data_size2;
data_buf += data_size2;
- write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
+ qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
oob_buf += oob_size2;
config_nand_cw_write(chip);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
return ret;
@@ -2121,7 +1130,7 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
qcom_nandc_codeword_fixup(host, page);
host->use_ecc = true;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
@@ -2136,11 +1145,11 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
update_rw_regs(host, 1, false, 0);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, data_size + oob_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, data_size + oob_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
return ret;
@@ -2167,7 +1176,7 @@ static int qcom_nandc_block_bad(struct nand_chip *chip, loff_t ofs)
*/
host->use_ecc = false;
- clear_bam_transaction(nandc);
+ qcom_clear_bam_transaction(nandc);
ret = copy_last_cw(host, page);
if (ret)
goto err;
@@ -2194,8 +1203,8 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
struct nand_ecc_ctrl *ecc = &chip->ecc;
int page, ret;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
/*
* to mark the BBM as bad, we flash the entire last codeword with 0s.
@@ -2212,11 +1221,11 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
update_rw_regs(host, 1, false, ecc->steps - 1);
config_nand_page_write(chip);
- write_data_dma(nandc, FLASH_BUF_ACC,
- nandc->data_buffer, host->cw_size, 0);
+ qcom_write_data_dma(nandc, FLASH_BUF_ACC,
+ nandc->data_buffer, host->cw_size, 0);
config_nand_cw_write(chip);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
return ret;
@@ -2455,15 +1464,15 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
/* Free the initially allocated BAM transaction for reading the ONFI params */
- if (nandc->props->is_bam)
- free_bam_transaction(nandc);
+ if (nandc->props->supports_bam)
+ qcom_free_bam_transaction(nandc);
nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
cwperpage);
/* Now allocate the BAM transaction based on updated max_cwperpage */
- if (nandc->props->is_bam) {
- nandc->bam_txn = alloc_bam_transaction(nandc);
+ if (nandc->props->supports_bam) {
+ nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
if (!nandc->bam_txn) {
dev_err(nandc->dev,
"failed to allocate bam transaction\n");
@@ -2485,45 +1494,44 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
host->cw_size = host->cw_data + ecc->bytes;
bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
- host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_data << UD_SIZE_BYTES
- | 0 << DISABLE_STATUS_AFTER_WRITE
- | 5 << NUM_ADDR_CYCLES
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
- | 0 << STATUS_BFR_READ
- | 1 << SET_RD_MODE_AFTER_STATUS
- | host->spare_bytes << SPARE_SIZE_BYTES;
-
- host->cfg1 = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | bad_block_byte << BAD_BLOCK_BYTE_NUM
- | 0 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | host->bch_enabled << ENABLE_BCH_ECC;
-
- host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
- | host->cw_size << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES;
-
- host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | wide_bus << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE;
-
- host->ecc_bch_cfg = !host->bch_enabled << ECC_CFG_ECC_DISABLE
- | 0 << ECC_SW_RESET
- | host->cw_data << ECC_NUM_DATA_BYTES
- | 1 << ECC_FORCE_CLK_OPEN
- | ecc_mode << ECC_MODE
- | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-
- if (!nandc->props->qpic_v2)
- host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+ host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 0) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, host->ecc_bytes_hw) |
+ FIELD_PREP(STATUS_BFR_READ, 0) |
+ FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes);
+
+ host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(ENABLE_BCH_ECC, host->bch_enabled);
+
+ host->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_size) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
+
+ host->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, wide_bus) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+
+ host->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !host->bch_enabled) |
+ FIELD_PREP(ECC_SW_RESET, 0) |
+ FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data) |
+ FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
+ FIELD_PREP(ECC_MODE_MASK, ecc_mode) |
+ FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw);
+
+ if (!nandc->props->qpic_version2)
+ host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203);
host->clrflashstatus = FS_READY_BSY_N;
host->clrreadstatus = 0xc0;
@@ -2556,7 +1564,7 @@ static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
cmd = OP_FETCH_ID;
break;
case NAND_CMD_PARAM:
- if (nandc->props->qpic_v2)
+ if (nandc->props->qpic_version2)
cmd = OP_PAGE_READ_ONFI_READ;
else
cmd = OP_PAGE_READ;
@@ -2609,7 +1617,7 @@ static int qcom_parse_instructions(struct nand_chip *chip,
if (ret < 0)
return ret;
- q_op->cmd_reg = ret;
+ q_op->cmd_reg = cpu_to_le32(ret);
q_op->rdy_delay_ns = instr->delay_ns;
break;
@@ -2619,10 +1627,10 @@ static int qcom_parse_instructions(struct nand_chip *chip,
addrs = &instr->ctx.addr.addrs[offset];
for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
- q_op->addr1_reg |= addrs[i] << (i * 8);
+ q_op->addr1_reg |= cpu_to_le32(addrs[i] << (i * 8));
if (naddrs > 4)
- q_op->addr2_reg |= addrs[4];
+ q_op->addr2_reg |= cpu_to_le32(addrs[4]);
q_op->rdy_delay_ns = instr->delay_ns;
break;
@@ -2663,7 +1671,7 @@ static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
unsigned long start = jiffies + msecs_to_jiffies(time_ms);
u32 flash;
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
do {
flash = le32_to_cpu(nandc->reg_read_buf[0]);
@@ -2703,23 +1711,23 @@ static int qcom_read_status_exec(struct nand_chip *chip,
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting status descriptor\n");
goto err_out;
}
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
for (i = 0; i < num_cw; i++) {
flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
@@ -2760,23 +1768,21 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
- nandc->props->is_bam ? 0 : DM_EN);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
+ nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting read id descriptor\n");
goto err_out;
@@ -2786,7 +1792,7 @@ static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subo
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_read_buffer_sync(nandc, true);
+ qcom_nandc_dev_to_mem(nandc, true);
memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
err_out:
@@ -2807,15 +1813,14 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
if (q_op.flag == OP_PROGRAM_PAGE) {
goto wait_rdy;
- } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
- nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
- nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
- nandc_set_reg(chip, NAND_DEV0_CFG0,
- host->cfg0_raw & ~(7 << CW_PER_PAGE));
- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
+ } else if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE)) {
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
+ nandc->regs->addr0 = q_op.addr1_reg;
+ nandc->regs->addr1 = q_op.addr2_reg;
+ nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~CW_PER_PAGE_MASK);
+ nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw);
instrs = 3;
- } else if (q_op.cmd_reg != OP_RESET_DEVICE) {
+ } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) {
return 0;
}
@@ -2823,20 +1828,20 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->exec = cpu_to_le32(1);
- write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
- if (q_op.cmd_reg == OP_BLOCK_ERASE)
- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
+ qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+ qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting misc descriptor\n");
goto err_out;
@@ -2858,80 +1863,89 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
const struct nand_op_instr *instr = NULL;
unsigned int op_id = 0;
unsigned int len = 0;
- int ret;
+ int ret, reg_base;
+
+ reg_base = NAND_READ_LOCATION_0;
+
+ if (nandc->props->qpic_version2)
+ reg_base = NAND_READ_LOCATION_LAST_CW_0;
ret = qcom_parse_instructions(chip, subop, &q_op);
if (ret)
return ret;
- q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+ q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE);
nandc->buf_count = 0;
nandc->buf_start = 0;
host->use_ecc = false;
- clear_read_regs(nandc);
- clear_bam_transaction(nandc);
-
- nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
-
- nandc_set_reg(chip, NAND_ADDR0, 0);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
- | 512 << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES);
- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | 0 << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+ qcom_clear_read_regs(nandc);
+ qcom_clear_bam_transaction(nandc);
+
+ nandc->regs->cmd = q_op.cmd_reg;
+ nandc->regs->addr0 = 0;
+ nandc->regs->addr1 = 0;
+
+ nandc->regs->cfg0 = cpu_to_le32(FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0));
+
+ nandc->regs->cfg1 = cpu_to_le32(FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1));
+
+ if (!nandc->props->qpic_version2)
+ nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
/* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
- (nandc->vld & ~READ_START_VLD));
- nandc_set_reg(chip, NAND_DEV_CMD1,
- (nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD));
+ nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~READ_ADDR_MASK) |
+ FIELD_PREP(READ_ADDR_MASK, NAND_CMD_PARAM));
}
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+ nandc->regs->exec = cpu_to_le32(1);
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ if (!nandc->props->qpic_version2) {
+ nandc->regs->orig_cmd1 = cpu_to_le32(nandc->cmd1);
+ nandc->regs->orig_vld = cpu_to_le32(nandc->vld);
}
instr = q_op.data_instr;
op_id = q_op.data_instr_idx;
len = nand_subop_get_data_len(subop, op_id);
- nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+ if (nandc->props->qpic_version2)
+ nandc_set_read_loc_last(chip, reg_base, 0, len, 1);
+ else
+ nandc_set_read_loc_first(chip, reg_base, 0, len, 1);
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
}
- nandc->buf_count = len;
+ nandc->buf_count = 512;
memset(nandc->data_buffer, 0xff, nandc->buf_count);
config_nand_single_cw_page_read(chip, false, 0);
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
- nandc->buf_count, 0);
+ qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
/* restore CMD1 and VLD regs */
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ if (!nandc->props->qpic_version2) {
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
+ qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
+ NAND_BAM_NEXT_SGL);
}
- ret = submit_descs(nandc);
+ ret = qcom_submit_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure in submitting param page descriptor\n");
goto err_out;
@@ -3015,151 +2029,24 @@ static const struct nand_controller_ops qcom_nandc_ops = {
.exec_op = qcom_nand_exec_op,
};
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
- if (nandc->props->is_bam) {
- if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
- dma_unmap_single(nandc->dev, nandc->reg_read_dma,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
-
- if (nandc->tx_chan)
- dma_release_channel(nandc->tx_chan);
-
- if (nandc->rx_chan)
- dma_release_channel(nandc->rx_chan);
-
- if (nandc->cmd_chan)
- dma_release_channel(nandc->cmd_chan);
- } else {
- if (nandc->chan)
- dma_release_channel(nandc->chan);
- }
-}
-
-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-{
- int ret;
-
- ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(nandc->dev, "failed to set DMA mask\n");
- return ret;
- }
-
- /*
- * we use the internal buffer for reading ONFI params, reading small
- * data like ID and status, and preforming read-copy-write operations
- * when writing to a codeword partially. 532 is the maximum possible
- * size of a codeword for our nand controller
- */
- nandc->buf_size = 532;
-
- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
- if (!nandc->data_buffer)
- return -ENOMEM;
-
- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
- if (!nandc->regs)
- return -ENOMEM;
-
- nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
- sizeof(*nandc->reg_read_buf),
- GFP_KERNEL);
- if (!nandc->reg_read_buf)
- return -ENOMEM;
-
- if (nandc->props->is_bam) {
- nandc->reg_read_dma =
- dma_map_single(nandc->dev, nandc->reg_read_buf,
- MAX_REG_RD *
- sizeof(*nandc->reg_read_buf),
- DMA_FROM_DEVICE);
- if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
- dev_err(nandc->dev, "failed to DMA MAP reg buffer\n");
- return -EIO;
- }
-
- nandc->tx_chan = dma_request_chan(nandc->dev, "tx");
- if (IS_ERR(nandc->tx_chan)) {
- ret = PTR_ERR(nandc->tx_chan);
- nandc->tx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "tx DMA channel request failed\n");
- goto unalloc;
- }
-
- nandc->rx_chan = dma_request_chan(nandc->dev, "rx");
- if (IS_ERR(nandc->rx_chan)) {
- ret = PTR_ERR(nandc->rx_chan);
- nandc->rx_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rx DMA channel request failed\n");
- goto unalloc;
- }
-
- nandc->cmd_chan = dma_request_chan(nandc->dev, "cmd");
- if (IS_ERR(nandc->cmd_chan)) {
- ret = PTR_ERR(nandc->cmd_chan);
- nandc->cmd_chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "cmd DMA channel request failed\n");
- goto unalloc;
- }
-
- /*
- * Initially allocate BAM transaction to read ONFI param page.
- * After detecting all the devices, this BAM transaction will
- * be freed and the next BAM transaction will be allocated with
- * maximum codeword size
- */
- nandc->max_cwperpage = 1;
- nandc->bam_txn = alloc_bam_transaction(nandc);
- if (!nandc->bam_txn) {
- dev_err(nandc->dev,
- "failed to allocate bam transaction\n");
- ret = -ENOMEM;
- goto unalloc;
- }
- } else {
- nandc->chan = dma_request_chan(nandc->dev, "rxtx");
- if (IS_ERR(nandc->chan)) {
- ret = PTR_ERR(nandc->chan);
- nandc->chan = NULL;
- dev_err_probe(nandc->dev, ret,
- "rxtx DMA channel request failed\n");
- return ret;
- }
- }
-
- INIT_LIST_HEAD(&nandc->desc_list);
- INIT_LIST_HEAD(&nandc->host_list);
-
- nand_controller_init(&nandc->controller);
- nandc->controller.ops = &qcom_nandc_ops;
-
- return 0;
-unalloc:
- qcom_nandc_unalloc(nandc);
- return ret;
-}
-
/* one time setup of a few nand controller registers */
static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
{
u32 nand_ctrl;
+ nand_controller_init(nandc->controller);
+ nandc->controller->ops = &qcom_nandc_ops;
+
/* kill onenand */
- if (!nandc->props->is_qpic)
+ if (!nandc->props->nandc_part_of_qpic)
nandc_write(nandc, SFLASHC_BURST_CFG, 0);
- if (!nandc->props->qpic_v2)
+ if (!nandc->props->qpic_version2)
nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD),
NAND_DEV_CMD_VLD_VAL);
/* enable ADM or BAM DMA */
- if (nandc->props->is_bam) {
+ if (nandc->props->supports_bam) {
nand_ctrl = nandc_read(nandc, NAND_CTRL);
/*
@@ -3176,7 +2063,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
}
/* save the original values of these registers */
- if (!nandc->props->qpic_v2) {
+ if (!nandc->props->qpic_version2) {
nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1));
nandc->vld = NAND_DEV_CMD_VLD_VAL;
}
@@ -3288,7 +2175,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
chip->legacy.block_bad = qcom_nandc_block_bad;
chip->legacy.block_markbad = qcom_nandc_block_markbad;
- chip->controller = &nandc->controller;
+ chip->controller = nandc->controller;
chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA |
NAND_SKIP_BBTSCAN;
@@ -3349,7 +2236,7 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
struct device_node *np = nandc->dev->of_node;
int ret;
- if (!nandc->props->is_bam) {
+ if (!nandc->props->supports_bam) {
ret = of_property_read_u32(np, "qcom,cmd-crci",
&nandc->cmd_crci);
if (ret) {
@@ -3371,17 +2258,21 @@ static int qcom_nandc_parse_dt(struct platform_device *pdev)
static int qcom_nandc_probe(struct platform_device *pdev)
{
struct qcom_nand_controller *nandc;
+ struct nand_controller *controller;
const void *dev_data;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
- nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+ nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc) + sizeof(*controller),
+ GFP_KERNEL);
if (!nandc)
return -ENOMEM;
+ controller = (struct nand_controller *)&nandc[1];
platform_set_drvdata(pdev, nandc);
nandc->dev = dev;
+ nandc->controller = controller;
dev_data = of_device_get_match_data(dev);
if (!dev_data) {
@@ -3474,31 +2365,35 @@ static void qcom_nandc_remove(struct platform_device *pdev)
static const struct qcom_nandc_props ipq806x_nandc_props = {
.ecc_modes = (ECC_RS_4BIT | ECC_BCH_8BIT),
- .is_bam = false,
+ .supports_bam = false,
.use_codeword_fixup = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq4019_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x0,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props ipq8074_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
static const struct qcom_nandc_props sdx55_nandc_props = {
.ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT),
- .is_bam = true,
- .is_qpic = true,
- .qpic_v2 = true,
+ .supports_bam = true,
+ .nandc_part_of_qpic = true,
+ .qpic_version2 = true,
.dev_cmd_reg_start = 0x7000,
+ .bam_offset = 0x30000,
};
/*
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index b07c2f8b4035..918974d088cf 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -387,6 +387,9 @@ static int r852_wait(struct nand_chip *chip)
static int r852_ready(struct nand_chip *chip)
{
struct r852_device *dev = r852_get_dev(nand_to_mtd(chip));
+ if (dev->card_unstable)
+ return 0;
+
return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
}
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index fab371e3e9b7..162cd5f4f234 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
@@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
if (ret)
return ret;
+ sunxi_nfc_randomizer_config(nand, page, false);
sunxi_nfc_randomizer_enable(nand);
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 19cc77288ebb..258da42451a4 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
-spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
+spinand-objs := core.o otp.o
+spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
+spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c
index 7936ea546b03..0f9522009843 100644
--- a/drivers/mtd/nand/spi/alliancememory.c
+++ b/drivers/mtd/nand/spi/alliancememory.c
@@ -17,12 +17,12 @@
#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c
index 82b377c06812..88dc51308e1b 100644
--- a/drivers/mtd/nand/spi/ato.c
+++ b/drivers/mtd/nand/spi/ato.c
@@ -14,9 +14,9 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index b1df7f627161..303aede6e5aa 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -294,6 +294,9 @@ static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
struct spinand_device *spinand = nand_to_spinand(nand);
bool enable = (req->mode != MTD_OPS_RAW);
+ if (!enable && spinand->flags & SPINAND_NO_RAW_ACCESS)
+ return -EOPNOTSUPP;
+
memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand));
/* Only enable or disable the engine */
@@ -531,10 +534,20 @@ static int spinand_erase_op(struct spinand_device *spinand,
return spi_mem_exec_op(spinand->spimem, &op);
}
-static int spinand_wait(struct spinand_device *spinand,
- unsigned long initial_delay_us,
- unsigned long poll_delay_us,
- u8 *s)
+/**
+ * spinand_wait() - Poll memory device status
+ * @spinand: the spinand device
+ * @initial_delay_us: delay in us before starting to poll
+ * @poll_delay_us: time to sleep between reads in us
+ * @s: the pointer to variable to store the value of REG_STATUS
+ *
+ * This function polls a status register (REG_STATUS) and returns when
+ * the STATUS_READY bit is 0 or when the timeout has expired.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
+ unsigned long poll_delay_us, u8 *s)
{
struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS,
spinand->scratchbuf);
@@ -601,8 +614,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
}
-static int spinand_read_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_read_page() - Read a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_read_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -632,8 +653,16 @@ static int spinand_read_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
-static int spinand_write_page(struct spinand_device *spinand,
- const struct nand_page_io_req *req)
+/**
+ * spinand_write_page() - Write a page
+ * @spinand: the spinand device
+ * @req: the I/O request
+ *
+ * Return: 0 or a positive number of bitflips corrected on success.
+ * A negative error code otherwise.
+ */
+int spinand_write_page(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
{
struct nand_device *nand = spinand_to_nand(spinand);
u8 status;
@@ -671,11 +700,15 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
{
struct spinand_device *spinand = mtd_to_spinand(mtd);
struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct mtd_ecc_stats old_stats;
struct nand_io_iter iter;
bool disable_ecc = false;
bool ecc_failed = false;
+ unsigned int retry_mode = 0;
int ret;
+ old_stats = mtd->ecc_stats;
+
if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
disable_ecc = true;
@@ -687,18 +720,43 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
if (ret)
break;
+read_retry:
ret = spinand_read_page(spinand, &iter.req);
if (ret < 0 && ret != -EBADMSG)
break;
- if (ret == -EBADMSG)
+ if (ret == -EBADMSG && spinand->set_read_retry) {
+ if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) {
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0) {
+ spinand->set_read_retry(spinand, 0);
+ return ret;
+ }
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = old_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_failed = true;
+ }
+ } else if (ret == -EBADMSG) {
ecc_failed = true;
- else
+ } else {
*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
ret = 0;
ops->retlen += iter.req.datalen;
ops->oobretlen += iter.req.ooblen;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ retry_mode = 0;
+ ret = spinand->set_read_retry(spinand, retry_mode);
+ if (ret < 0)
+ return ret;
+ }
}
if (ecc_failed && !ret)
@@ -901,9 +959,17 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
.oobbuf.in = marker,
.mode = MTD_OPS_RAW,
};
+ int ret;
spinand_select_target(spinand, pos->target);
- spinand_read_page(spinand, &req);
+
+ ret = spinand_read_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ spinand_read_page(spinand, &req);
+ }
+
if (marker[0] != 0xff || marker[1] != 0xff)
return true;
@@ -942,11 +1008,14 @@ static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
if (ret)
return ret;
- ret = spinand_write_enable_op(spinand);
- if (ret)
- return ret;
+ ret = spinand_write_page(spinand, &req);
+ if (ret == -EOPNOTSUPP) {
+ /* Retry with ECC in case raw access is not supported */
+ req.mode = MTD_OPS_PLACE_OOB;
+ ret = spinand_write_page(spinand, &req);
+ }
- return spinand_write_page(spinand, &req);
+ return ret;
}
static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
@@ -1117,6 +1186,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
&paragon_spinand_manufacturer,
+ &skyhigh_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
&xtx_spinand_manufacturer,
@@ -1198,10 +1268,13 @@ spinand_select_op_variant(struct spinand_device *spinand,
const struct spinand_op_variants *variants)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ const struct spi_mem_op *best_variant = NULL;
+ u64 best_op_duration_ns = ULLONG_MAX;
unsigned int i;
for (i = 0; i < variants->nops; i++) {
struct spi_mem_op op = variants->ops[i];
+ u64 op_duration_ns = 0;
unsigned int nbytes;
int ret;
@@ -1214,17 +1287,23 @@ spinand_select_op_variant(struct spinand_device *spinand,
if (ret)
break;
+ spi_mem_adjust_op_freq(spinand->spimem, &op);
+
if (!spi_mem_supports_op(spinand->spimem, &op))
break;
nbytes -= op.data.nbytes;
+
+ op_duration_ns += spi_mem_calc_op_duration(&op);
}
- if (!nbytes)
- return &variants->ops[i];
+ if (!nbytes && op_duration_ns < best_op_duration_ns) {
+ best_op_duration_ns = op_duration_ns;
+ best_variant = &variants->ops[i];
+ }
}
- return NULL;
+ return best_variant;
}
/**
@@ -1268,6 +1347,10 @@ int spinand_match_and_init(struct spinand_device *spinand,
spinand->id.len = 1 + table[i].devid.len;
spinand->select_target = table[i].select_target;
spinand->set_cont_read = table[i].set_cont_read;
+ spinand->fact_otp = &table[i].fact_otp;
+ spinand->user_otp = &table[i].user_otp;
+ spinand->read_retries = table[i].read_retries;
+ spinand->set_read_retry = table[i].set_read_retry;
op = spinand_select_op_variant(spinand,
info->op_variants.read_cache);
@@ -1454,6 +1537,12 @@ static int spinand_init(struct spinand_device *spinand)
mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
mtd->_resume = spinand_mtd_resume;
+ if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) {
+ ret = spinand_set_mtd_otp_ops(spinand);
+ if (ret)
+ goto err_cleanup_ecc_engine;
+ }
+
if (nand->ecc.engine) {
ret = mtd_ooblayout_count_freebytes(mtd);
if (ret < 0)
@@ -1496,6 +1585,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{
struct nand_device *nand = spinand_to_nand(spinand);
+ nanddev_ecc_engine_cleanup(nand);
nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf);
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 4597a82de23a..cda718e385a2 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -8,15 +8,20 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
/* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */
#define SPINAND_MFR_ESMT_C8 0xc8
+#define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7)
+#define ESMT_F50L1G41LB_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT)
+
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -102,6 +107,83 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
.free = f50l1g41lb_ooblayout_free,
};
+static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen, bool user)
+{
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ buf->locked = 0;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen)
+{
+ return f50l1g41lb_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK,
+ ESMT_F50L1G41LB_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = {
+ .info = f50l1g41lb_user_otp_info,
+ .lock = f50l1g41lb_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = {
+ .info = f50l1g41lb_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
@@ -112,7 +194,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D1G41LB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
0x7f, 0x7f),
@@ -122,7 +206,9 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
0,
- SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL),
+ SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)),
SPINAND_INFO("F50D2G41KA",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
0x7f, 0x7f),
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
index e0d2d9257045..21ad44032286 100644
--- a/drivers/mtd/nand/spi/foresee.c
+++ b/drivers/mtd/nand/spi/foresee.c
@@ -12,10 +12,10 @@
#define SPINAND_MFR_FORESEE 0xCD
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -81,6 +81,16 @@ static const struct spinand_info foresee_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&f35sqa002g_ooblayout,
f35sqa002g_ecc_get_status)),
+ SPINAND_INFO("F35SQA001G",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x71, 0x71),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+ f35sqa002g_ecc_get_status)),
};
static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index 6023cba748bb..3ce79ae1bac4 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -24,36 +24,36 @@
#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_f,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_1gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(read_cache_variants_2gq5,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c
index d277c3220fdc..208db1a21510 100644
--- a/drivers/mtd/nand/spi/macronix.c
+++ b/drivers/mtd/nand/spi/macronix.c
@@ -14,6 +14,8 @@
#define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr)
#define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr)
#define MACRONIX_CFG_CONT_READ BIT(2)
+#define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70
+#define MACRONIX_NUM_READ_RETRY_MODES 5
#define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4)
@@ -26,10 +28,10 @@ struct macronix_priv {
};
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -136,6 +138,23 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable)
return 0;
}
+/**
+ * macronix_set_read_retry - Set the retry mode
+ * @spinand: SPI NAND device
+ * @retry_mode: Specify which retry mode to set
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int macronix_set_read_retry(struct spinand_device *spinand,
+ unsigned int retry_mode)
+{
+ struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MACRONIX_FEATURE_ADDR_READ_RETRY,
+ spinand->scratchbuf);
+
+ *spinand->scratchbuf = retry_mode;
+ return spi_mem_exec_op(spinand->spimem, &op);
+}
+
static const struct spinand_info macronix_spinand_table[] = {
SPINAND_INFO("MX35LF1GE4AB",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12),
@@ -168,7 +187,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03),
NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1),
@@ -179,7 +200,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF1G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -188,7 +211,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
@@ -198,7 +223,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -207,7 +234,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1),
@@ -217,7 +246,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35LF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -226,7 +257,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&write_cache_variants,
&update_cache_variants),
SPINAND_HAS_QE_BIT,
- SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)),
+ SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX31LF1GE4BC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
@@ -270,7 +303,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -280,7 +315,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF4GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03),
NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
@@ -291,7 +328,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G14AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1),
@@ -314,7 +353,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT |
SPINAND_HAS_PROG_PLANE_SELECT_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2G24AD-Z4I8",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -324,7 +365,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
@@ -335,7 +378,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF2GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
@@ -366,7 +411,9 @@ static const struct spinand_info macronix_spinand_table[] = {
&update_cache_variants),
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
- macronix_ecc_get_status)),
+ macronix_ecc_get_status),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03),
NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
@@ -377,7 +424,9 @@ static const struct spinand_info macronix_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout,
macronix_ecc_get_status),
- SPINAND_CONT_READ(macronix_set_cont_read)),
+ SPINAND_CONT_READ(macronix_set_cont_read),
+ SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES,
+ macronix_set_read_retry)),
SPINAND_INFO("MX35UF1GE4AC",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 12601bc4227a..f92c28b8d557 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
#define SPINAND_MFR_MICRON 0x2c
@@ -28,13 +30,17 @@
#define MICRON_SELECT_DIE(x) ((x) << 6)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7)
+#define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \
+ (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE)
+
static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x4_write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -46,10 +52,10 @@ static SPINAND_OP_VARIANTS(x4_update_cache_variants,
/* Micron MT29F2G01AAAED Device */
static SPINAND_OP_VARIANTS(x4_read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(x1_write_cache_variants,
SPINAND_PROG_LOAD(true, 0, NULL, 0));
@@ -168,6 +174,131 @@ static int micron_8_ecc_get_status(struct spinand_device *spinand,
return -EINVAL;
}
+static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand)
+{
+ size_t bufsize = spinand_otp_page_size(spinand);
+ size_t retlen;
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_STATE);
+ if (ret)
+ goto free_buf;
+
+ ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf);
+
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ if (ret)
+ goto free_buf;
+
+ /* If all zeros, then the OTP area is locked. */
+ if (mem_is_zero(buf, bufsize))
+ ret = 1;
+
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len,
+ struct otp_info *buf, size_t *retlen,
+ bool user)
+{
+ int locked;
+
+ if (len < sizeof(*buf))
+ return -EINVAL;
+
+ locked = mt29f2g01abagd_otp_is_locked(spinand);
+ if (locked < 0)
+ return locked;
+
+ buf->locked = locked;
+ buf->start = 0;
+ buf->length = user ? spinand_user_otp_size(spinand) :
+ spinand_fact_otp_size(spinand);
+
+ *retlen = sizeof(*buf);
+ return 0;
+}
+
+static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false);
+}
+
+static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand,
+ size_t len, struct otp_info *buf,
+ size_t *retlen)
+{
+ return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true);
+}
+
+static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from,
+ size_t len)
+{
+ struct spi_mem_op write_op = SPINAND_WR_EN_DIS_OP(true);
+ struct spi_mem_op exec_op = SPINAND_PROG_EXEC_OP(0);
+ u8 status;
+ int ret;
+
+ ret = spinand_upd_cfg(spinand,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK,
+ MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK);
+ if (!ret)
+ return ret;
+
+ ret = spi_mem_exec_op(spinand->spimem, &write_op);
+ if (!ret)
+ goto out;
+
+ ret = spi_mem_exec_op(spinand->spimem, &exec_op);
+ if (!ret)
+ goto out;
+
+ ret = spinand_wait(spinand,
+ SPINAND_WRITE_INITIAL_DELAY_US,
+ SPINAND_WRITE_POLL_DELAY_US,
+ &status);
+ if (!ret && (status & STATUS_PROG_FAILED))
+ ret = -EIO;
+
+out:
+ if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = {
+ .info = mt29f2g01abagd_user_otp_info,
+ .lock = mt29f2g01abagd_otp_lock,
+ .read = spinand_user_otp_read,
+ .write = spinand_user_otp_write,
+};
+
+static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = {
+ .info = mt29f2g01abagd_fact_otp_info,
+ .read = spinand_fact_otp_read,
+};
+
static const struct spinand_info micron_spinand_table[] = {
/* M79A 2Gb 3.3V */
SPINAND_INFO("MT29F2G01ABAGD",
@@ -179,7 +310,9 @@ static const struct spinand_info micron_spinand_table[] = {
&x4_update_cache_variants),
0,
SPINAND_ECCINFO(&micron_8_ooblayout,
- micron_8_ecc_get_status)),
+ micron_8_ecc_get_status),
+ SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops),
+ SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)),
/* M79A 2Gb 1.8V */
SPINAND_INFO("MT29F2G01ABBGD",
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
diff --git a/drivers/mtd/nand/spi/otp.c b/drivers/mtd/nand/spi/otp.c
new file mode 100644
index 000000000000..ce41bca86ea9
--- /dev/null
+++ b/drivers/mtd/nand/spi/otp.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025, SaluteDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spinand.h>
+
+/**
+ * spinand_otp_page_size() - Get SPI-NAND OTP page size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP page size.
+ */
+size_t spinand_otp_page_size(struct spinand_device *spinand)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+
+ return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+}
+
+static size_t spinand_otp_size(struct spinand_device *spinand,
+ const struct spinand_otp_layout *layout)
+{
+ return layout->npages * spinand_otp_page_size(spinand);
+}
+
+/**
+ * spinand_fact_otp_size() - Get SPI-NAND factory OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_fact_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_size() - Get SPI-NAND user OTP area size
+ * @spinand: the spinand device
+ *
+ * Return: the OTP size.
+ */
+size_t spinand_user_otp_size(struct spinand_device *spinand)
+{
+ return spinand_otp_size(spinand, &spinand->user_otp->layout);
+}
+
+static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs,
+ size_t len,
+ const struct spinand_otp_layout *layout)
+{
+ if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int spinand_user_otp_check_bounds(struct spinand_device *spinand,
+ loff_t ofs, size_t len)
+{
+ return spinand_otp_check_bounds(spinand, ofs, len,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf, bool is_write,
+ const struct spinand_otp_layout *layout)
+{
+ struct nand_page_io_req req = {};
+ unsigned long long page;
+ size_t copied = 0;
+ size_t otp_pagesize = spinand_otp_page_size(spinand);
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len, layout);
+ if (ret)
+ return ret;
+
+ ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE);
+ if (ret)
+ return ret;
+
+ page = ofs;
+ req.dataoffs = do_div(page, otp_pagesize);
+ req.pos.page = page + layout->start_page;
+ req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ;
+ req.mode = MTD_OPS_RAW;
+ req.databuf.in = buf;
+
+ while (copied < len) {
+ req.datalen = min_t(unsigned int,
+ otp_pagesize - req.dataoffs,
+ len - copied);
+
+ if (is_write)
+ ret = spinand_write_page(spinand, &req);
+ else
+ ret = spinand_read_page(spinand, &req);
+
+ if (ret < 0)
+ break;
+
+ req.databuf.in += req.datalen;
+ req.pos.page++;
+ req.dataoffs = 0;
+ copied += req.datalen;
+ }
+
+ *retlen = copied;
+
+ if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) {
+ dev_warn(&spinand_to_mtd(spinand)->dev,
+ "Can not disable OTP mode\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/**
+ * spinand_fact_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->fact_otp->layout);
+}
+
+/**
+ * spinand_user_otp_read() - Read from OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to read
+ * @len: the number of data bytes to read
+ * @retlen: the pointer to variable to store the number of read bytes
+ * @buf: the buffer to store the read data
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, buf, false,
+ &spinand->user_otp->layout);
+}
+
+/**
+ * spinand_user_otp_write() - Write to OTP area
+ * @spinand: the spinand device
+ * @ofs: the offset to write to
+ * @len: the number of bytes to write
+ * @retlen: the pointer to variable to store the number of written bytes
+ * @buf: the buffer with data to write
+ *
+ * Return: 0 on success, an error code otherwise.
+ */
+int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true,
+ &spinand->user_otp->layout);
+}
+
+static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf,
+ bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen);
+ else
+ ret = spinand->user_otp->ops->info(spinand, len, buf, retlen);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return spinand_mtd_otp_info(mtd, len, retlen, buf, false);
+}
+
+static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, u8 *buf, bool is_fact)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_otp_check_bounds(spinand, ofs, len,
+ is_fact ? &spinand->fact_otp->layout :
+ &spinand->user_otp->layout);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+
+ if (is_fact)
+ ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+ else
+ ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen,
+ buf);
+
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true);
+}
+
+static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, u8 *buf)
+{
+ return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false);
+}
+
+static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs,
+ size_t len, size_t *retlen, const u8 *buf)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ *retlen = 0;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->write(spinand, ofs, len, retlen, buf);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->erase(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs,
+ size_t len)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ const struct spinand_user_otp_ops *ops = spinand->user_otp->ops;
+ int ret;
+
+ if (!len)
+ return 0;
+
+ ret = spinand_user_otp_check_bounds(spinand, ofs, len);
+ if (ret)
+ return ret;
+
+ mutex_lock(&spinand->lock);
+ ret = ops->lock(spinand, ofs, len);
+ mutex_unlock(&spinand->lock);
+
+ return ret;
+}
+
+/**
+ * spinand_set_mtd_otp_ops() - Setup OTP methods
+ * @spinand: the spinand device
+ *
+ * Setup OTP methods.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int spinand_set_mtd_otp_ops(struct spinand_device *spinand)
+{
+ struct mtd_info *mtd = spinand_to_mtd(spinand);
+ const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops;
+ const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops;
+
+ if (!user_ops && !fact_ops)
+ return -EINVAL;
+
+ if (user_ops) {
+ if (user_ops->info)
+ mtd->_get_user_prot_info = spinand_mtd_user_otp_info;
+
+ if (user_ops->read)
+ mtd->_read_user_prot_reg = spinand_mtd_user_otp_read;
+
+ if (user_ops->write)
+ mtd->_write_user_prot_reg = spinand_mtd_user_otp_write;
+
+ if (user_ops->lock)
+ mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock;
+
+ if (user_ops->erase)
+ mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase;
+ }
+
+ if (fact_ops) {
+ if (fact_ops->info)
+ mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info;
+
+ if (fact_ops->read)
+ mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
index 519ade513c1f..b5ea24861803 100644
--- a/drivers/mtd/nand/spi/paragon.c
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -22,12 +22,12 @@
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c
new file mode 100644
index 000000000000..ac73f43e9365
--- /dev/null
+++ b/drivers/mtd/nand/spi/skyhigh.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 SkyHigh Memory Limited
+ *
+ * Author: Takahiro Kuwano <takahiro.kuwano@infineon.com>
+ * Co-Author: KR Kim <kr.kim@skyhighmemory.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_SKYHIGH 0x01
+#define SKYHIGH_STATUS_ECC_1TO2_BITFLIPS (1 << 4)
+#define SKYHIGH_STATUS_ECC_3TO6_BITFLIPS (2 << 4)
+#define SKYHIGH_STATUS_ECC_UNCOR_ERROR (3 << 4)
+#define SKYHIGH_CONFIG_PROTECT_EN BIT(1)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ /* ECC bytes are stored in hidden area. */
+ return -ERANGE;
+}
+
+static int skyhigh_spinand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ /* ECC bytes are stored in hidden area. Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+ region->length = mtd->oobsize - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops skyhigh_spinand_ooblayout = {
+ .ecc = skyhigh_spinand_ooblayout_ecc,
+ .free = skyhigh_spinand_ooblayout_free,
+};
+
+static int skyhigh_spinand_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case SKYHIGH_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case SKYHIGH_STATUS_ECC_1TO2_BITFLIPS:
+ return 2;
+
+ case SKYHIGH_STATUS_ECC_3TO6_BITFLIPS:
+ return 6;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info skyhigh_spinand_table[] = {
+ SPINAND_INFO("S35ML01G301",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML01G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML02G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+ SPINAND_INFO("S35ML04G300",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 2, 1, 1),
+ NAND_ECCREQ(6, 32),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_NO_RAW_ACCESS,
+ SPINAND_ECCINFO(&skyhigh_spinand_ooblayout,
+ skyhigh_spinand_ecc_get_status)),
+};
+
+static int skyhigh_spinand_init(struct spinand_device *spinand)
+{
+ /*
+ * Config_Protect_En (bit 1 in Block Lock register) must be set to 1
+ * before writing other bits. Do it here before core unlocks all blocks
+ * by writing block protection bits.
+ */
+ return spinand_write_reg_op(spinand, REG_BLOCK_LOCK,
+ SKYHIGH_CONFIG_PROTECT_EN);
+}
+
+static const struct spinand_manufacturer_ops skyhigh_spinand_manuf_ops = {
+ .init = skyhigh_spinand_init,
+};
+
+const struct spinand_manufacturer skyhigh_spinand_manufacturer = {
+ .id = SPINAND_MFR_SKYHIGH,
+ .name = "SkyHigh",
+ .chips = skyhigh_spinand_table,
+ .nchips = ARRAY_SIZE(skyhigh_spinand_table),
+ .ops = &skyhigh_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index bbbcaa87c0bc..f3f2c0ed1d0c 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -15,10 +15,10 @@
#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_x4_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 7180e615ac97..397c90b745e3 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/mtd/spinand.h>
+#include <linux/units.h>
#define SPINAND_MFR_WINBOND 0xEF
@@ -17,13 +18,31 @@
#define W25N04KV_STATUS_ECC_5_8_BITFLIPS (3 << 4)
+/*
+ * "X2" in the core is equivalent to "dual output" in the datasheets,
+ * "X4" in the core is equivalent to "quad output" in the datasheets.
+ */
+
+static SPINAND_OP_VARIANTS(read_cache_dtr_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ));
+
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
@@ -194,7 +213,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
@@ -223,7 +242,7 @@ static const struct spinand_info winbond_spinand_table[] = {
SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
NAND_ECCREQ(1, 512),
- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants,
&write_cache_variants,
&update_cache_variants),
0,
diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c
index 66a4255bdf06..abbbcd594c2c 100644
--- a/drivers/mtd/nand/spi/xtx.c
+++ b/drivers/mtd/nand/spi/xtx.c
@@ -23,12 +23,12 @@
#define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2)
static SPINAND_OP_VARIANTS(read_cache_variants,
- SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
- SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+ SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0));
static SPINAND_OP_VARIANTS(write_cache_variants,
SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index b5b3c4c44a94..d28d4f1790f5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1067,7 +1067,7 @@ static int sm_write(struct mtd_blktrans_dev *dev,
sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
/* No need in flush thread running now */
- del_timer(&ftl->timer);
+ timer_delete(&ftl->timer);
mutex_lock(&ftl->mutex);
zone = sm_get_zone(ftl, zone_num);
@@ -1111,7 +1111,7 @@ static void sm_release(struct mtd_blktrans_dev *dev)
{
struct sm_ftl *ftl = dev->priv;
- del_timer_sync(&ftl->timer);
+ timer_delete_sync(&ftl->timer);
cancel_work_sync(&ftl->flush_work);
mutex_lock(&ftl->mutex);
sm_cache_flush(ftl);
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 45d1153a04a0..82c592f0a1e1 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -238,6 +238,10 @@ static const struct flash_info atmel_nor_parts[] = {
.flags = SPI_NOR_HAS_LOCK,
.no_sfdp_flags = SECT_4K,
.fixups = &at25fs_nor_fixups
+ }, {
+ .id = SNOR_ID(0x1f, 0x87, 0x01),
+ .size = SZ_4M,
+ .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
},
};
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index b6f374ded390..ac4b960101cc 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -7,16 +7,18 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
-#include <linux/err.h>
-#include <linux/errno.h>
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched/task_stack.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -638,32 +640,26 @@ static bool spi_nor_use_parallel_locking(struct spi_nor *nor)
static int spi_nor_rww_start_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- int ret = -EAGAIN;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return -EAGAIN;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- ret = 0;
-busy:
- mutex_unlock(&nor->lock);
- return ret;
+ return 0;
}
static void spi_nor_rww_end_rdst(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_rdst(struct spi_nor *nor)
@@ -1211,26 +1207,21 @@ static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len,
static bool spi_nor_rww_start_io(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io)
- goto busy;
+ return false;
rww->ongoing_io = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_io(struct spi_nor *nor)
{
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
nor->rww.ongoing_io = false;
- mutex_unlock(&nor->lock);
}
static int spi_nor_lock_device(struct spi_nor *nor)
@@ -1253,32 +1244,27 @@ static void spi_nor_unlock_device(struct spi_nor *nor)
static bool spi_nor_rww_start_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- bool start = false;
mutex_lock(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
rww->ongoing_io = true;
rww->ongoing_rd = true;
rww->ongoing_pe = true;
- start = true;
-busy:
- mutex_unlock(&nor->lock);
- return start;
+ return true;
}
static void spi_nor_rww_end_exclusive(struct spi_nor *nor)
{
struct spi_nor_rww *rww = &nor->rww;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
rww->ongoing_io = false;
rww->ongoing_rd = false;
rww->ongoing_pe = false;
- mutex_unlock(&nor->lock);
}
int spi_nor_prep_and_lock(struct spi_nor *nor)
@@ -1315,30 +1301,26 @@ static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
rww->used_banks |= used_banks;
rww->ongoing_pe = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1347,15 +1329,13 @@ static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
rww->used_banks &= ~BIT(bank);
rww->ongoing_pe = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len)
@@ -1392,19 +1372,18 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
{
struct spi_nor_rww *rww = &nor->rww;
unsigned int used_banks = 0;
- bool started = false;
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
if (rww->ongoing_io || rww->ongoing_rd)
- goto busy;
+ return false;
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++) {
if (rww->used_banks & BIT(bank))
- goto busy;
+ return false;
used_banks |= BIT(bank);
}
@@ -1412,11 +1391,8 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->used_banks |= used_banks;
rww->ongoing_io = true;
rww->ongoing_rd = true;
- started = true;
-busy:
- mutex_unlock(&nor->lock);
- return started;
+ return true;
}
static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
@@ -1425,7 +1401,7 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
u8 first, last;
int bank;
- mutex_lock(&nor->lock);
+ guard(mutex)(&nor->lock);
spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last);
for (bank = first; bank <= last; bank++)
@@ -1433,8 +1409,6 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len)
rww->ongoing_io = false;
rww->ongoing_rd = false;
-
- mutex_unlock(&nor->lock);
}
static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len)
@@ -3576,7 +3550,8 @@ static int spi_nor_create_write_dirmap(struct spi_nor *nor)
static int spi_nor_probe(struct spi_mem *spimem)
{
struct spi_device *spi = spimem->spi;
- struct flash_platform_data *data = dev_get_platdata(&spi->dev);
+ struct device *dev = &spi->dev;
+ struct flash_platform_data *data = dev_get_platdata(dev);
struct spi_nor *nor;
/*
* Enable all caps by default. The core will mask them after
@@ -3586,13 +3561,17 @@ static int spi_nor_probe(struct spi_mem *spimem)
char *flash_name;
int ret;
- nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return ret;
+
+ nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL);
if (!nor)
return -ENOMEM;
nor->spimem = spimem;
- nor->dev = &spi->dev;
- spi_nor_set_flash_node(nor, spi->dev.of_node);
+ nor->dev = dev;
+ spi_nor_set_flash_node(nor, dev->of_node);
spi_mem_set_drvdata(spimem, nor);
@@ -3628,9 +3607,8 @@ static int spi_nor_probe(struct spi_mem *spimem)
*/
if (nor->params->page_size > PAGE_SIZE) {
nor->bouncebuf_size = nor->params->page_size;
- devm_kfree(nor->dev, nor->bouncebuf);
- nor->bouncebuf = devm_kmalloc(nor->dev,
- nor->bouncebuf_size,
+ devm_kfree(dev, nor->bouncebuf);
+ nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
GFP_KERNEL);
if (!nor->bouncebuf)
return -ENOMEM;
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 5c33740ed7f5..ceff412f7d65 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -448,7 +448,11 @@ struct spi_nor_id {
* @id: pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
* older chips).
* @name: (obsolete) the name of the flash. Do not set it for new additions.
- * @size: the size of the flash in bytes.
+ * @size: the size of the flash in bytes. The flash size is one
+ * property parsed by the SFDP. We use it as an indicator
+ * whether we need SFDP parsing for a particular flash.
+ * I.e. non-legacy flash entries in flash_info will have
+ * a size of zero iff SFDP should be used.
* @sector_size: (optional) the size listed here is what works with
* SPINOR_OP_SE, which isn't necessarily called a "sector" by
* the vendor. Defaults to 64k.
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 830da21eea08..55644a3cd88c 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -45,8 +45,26 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
return 0;
}
+static int
+macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /* PP_1_1_4_4B is supported but missing in 4BAIT. */
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B, SNOR_PROTO_1_1_4);
+
+ return 0;
+}
+
static const struct spi_nor_fixups mx25l25635_fixups = {
.post_bfpt = mx25l25635_post_bfpt_fixups,
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
+};
+
+static const struct spi_nor_fixups macronix_qpp4b_fixups = {
+ .post_sfdp = macronix_qpp4b_post_sfdp_fixups,
};
static const struct flash_info macronix_nor_parts[] = {
@@ -102,11 +120,17 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x20, 0x1b),
.name = "mx66l1g45g",
.size = SZ_128M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66L2G45G */
+ .id = SNOR_ID(0xc2, 0x20, 0x1c),
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x23, 0x14),
.name = "mx25v8035f",
@@ -143,29 +167,30 @@ static const struct flash_info macronix_nor_parts[] = {
.size = SZ_16M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
- .id = SNOR_ID(0xc2, 0x25, 0x39),
- .name = "mx25u25635f",
- .size = SZ_32M,
- .no_sfdp_flags = SECT_4K,
- .fixup_flags = SPI_NOR_4B_OPCODES,
- }, {
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx25u51245g",
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x25, 0x3a),
.name = "mx66u51235f",
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
+ }, {
+ /* MX66U1G45G */
+ .id = SNOR_ID(0xc2, 0x25, 0x3b),
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x25, 0x3c),
.name = "mx66u2g45g",
.size = SZ_256M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
.fixup_flags = SPI_NOR_4B_OPCODES,
+ .fixups = &macronix_qpp4b_fixups,
}, {
.id = SNOR_ID(0xc2, 0x26, 0x18),
.name = "mx25l12855e",
@@ -230,7 +255,8 @@ static int macronix_nor_octal_dtr_en(struct spi_nor *nor)
return ret;
/* Read flash ID to make sure the switch was successful. */
- ret = spi_nor_read_id(nor, 4, 4, buf, SNOR_PROTO_8_8_8_DTR);
+ ret = spi_nor_read_id(nor, nor->addr_nbytes, 4, buf,
+ SNOR_PROTO_8_8_8_DTR);
if (ret) {
dev_dbg(nor->dev, "error %d reading JEDEC ID after enabling 8D-8D-8D mode\n", ret);
return ret;
diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c
index 9a729aa3452d..7d0b145d78d8 100644
--- a/drivers/mtd/spi-nor/otp.c
+++ b/drivers/mtd/spi-nor/otp.c
@@ -6,6 +6,7 @@
*/
#include <linux/log2.h>
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 5a88a6096ca8..bf08dbf5e742 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -958,6 +958,11 @@ static const struct flash_info spansion_nor_parts[] = {
.mfr_flags = USE_CLPEF,
.fixups = &s25hx_t_fixups
}, {
+ /* S28HL256T */
+ .id = SNOR_ID(0x34, 0x5a, 0x19),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
.id = SNOR_ID(0x34, 0x5a, 0x1a),
.name = "s28hl512t",
.mfr_flags = USE_CLPEF,
@@ -968,6 +973,11 @@ static const struct flash_info spansion_nor_parts[] = {
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
}, {
+ /* S28HL02GT */
+ .id = SNOR_ID(0x34, 0x5a, 0x1c),
+ .mfr_flags = USE_CLPEF,
+ .fixups = &s28hx_t_fixups,
+ }, {
.id = SNOR_ID(0x34, 0x5b, 0x19),
.mfr_flags = USE_CLPEF,
.fixups = &s28hx_t_fixups,
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index e48c3cff247a..9c9328478d8a 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -5,6 +5,7 @@
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/math64.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/spi-nor.h>
diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c
index 5e9eb268073d..4f12ff755df0 100644
--- a/drivers/mtd/spi-nor/sysfs.c
+++ b/drivers/mtd/spi-nor/sysfs.c
@@ -50,7 +50,7 @@ static struct attribute *spi_nor_sysfs_entries[] = {
};
static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr, char *buf,
+ const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct spi_device *spi = to_spi_device(kobj_to_dev(kobj));
@@ -62,9 +62,9 @@ static ssize_t sfdp_read(struct file *filp, struct kobject *kobj,
return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords,
sfdp_size);
}
-static BIN_ATTR_RO(sfdp, 0);
+static const BIN_ATTR_RO(sfdp, 0);
-static struct bin_attribute *spi_nor_sysfs_bin_entries[] = {
+static const struct bin_attribute *const spi_nor_sysfs_bin_entries[] = {
&bin_attr_sfdp,
NULL
};
@@ -104,7 +104,7 @@ static const struct attribute_group spi_nor_sysfs_group = {
.is_visible = spi_nor_sysfs_is_visible,
.is_bin_visible = spi_nor_sysfs_is_bin_visible,
.attrs = spi_nor_sysfs_entries,
- .bin_attrs = spi_nor_sysfs_bin_entries,
+ .bin_attrs_new = spi_nor_sysfs_bin_entries,
};
const struct attribute_group *spi_nor_sysfs_groups[] = {
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 8d0a00d69e12..63a93c9eb917 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -10,6 +10,7 @@
#define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */
#define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */
+#define WINBOND_NOR_OP_SELDIE 0xc2 /* Select active die */
#define WINBOND_NOR_WREAR_OP(buf) \
SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \
@@ -17,6 +18,12 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_DATA_OUT(1, buf, 0))
+#define WINBOND_NOR_SELDIE_OP(buf) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_SELDIE, 0), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, buf, 0))
+
static int
w25q128_post_bfpt_fixups(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
@@ -66,6 +73,79 @@ static const struct spi_nor_fixups w25q256_fixups = {
.post_bfpt = w25q256_post_bfpt_fixups,
};
+/**
+ * winbond_nor_select_die() - Set active die.
+ * @nor: pointer to 'struct spi_nor'.
+ * @die: die to set active.
+ *
+ * Certain Winbond chips feature more than a single die. This is mostly hidden
+ * to the user, except that some chips may experience time deviation when
+ * modifying the status bits between dies, which in some corner cases may
+ * produce problematic races. Being able to explicitly select a die to check its
+ * state in this case may be useful.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int winbond_nor_select_die(struct spi_nor *nor, u8 die)
+{
+ int ret;
+
+ nor->bouncebuf[0] = die;
+
+ if (nor->spimem) {
+ struct spi_mem_op op = WINBOND_NOR_SELDIE_OP(nor->bouncebuf);
+
+ spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
+
+ ret = spi_mem_exec_op(nor->spimem, &op);
+ } else {
+ ret = spi_nor_controller_ops_write_reg(nor,
+ WINBOND_NOR_OP_SELDIE,
+ nor->bouncebuf, 1);
+ }
+
+ if (ret)
+ dev_dbg(nor->dev, "error %d selecting die %d\n", ret, die);
+
+ return ret;
+}
+
+static int winbond_nor_multi_die_ready(struct spi_nor *nor)
+{
+ int ret, i;
+
+ for (i = 0; i < nor->params->n_dice; i++) {
+ ret = winbond_nor_select_die(nor, i);
+ if (ret)
+ return ret;
+
+ ret = spi_nor_sr_ready(nor);
+ if (ret <= 0)
+ return ret;
+ }
+
+ return 1;
+}
+
+static int
+winbond_nor_multi_die_post_sfdp_fixups(struct spi_nor *nor)
+{
+ /*
+ * SFDP supports dice numbers, but this information is only available in
+ * optional additional tables which are not provided by these chips.
+ * Dice number has an impact though, because these devices need extra
+ * care when reading the busy bit.
+ */
+ nor->params->n_dice = nor->params->size / SZ_64M;
+ nor->params->ready = winbond_nor_multi_die_ready;
+
+ return 0;
+}
+
+static const struct spi_nor_fixups winbond_nor_multi_die_fixups = {
+ .post_sfdp = winbond_nor_multi_die_post_sfdp_fixups,
+};
+
static const struct flash_info winbond_nor_parts[] = {
{
.id = SNOR_ID(0xef, 0x30, 0x10),
@@ -147,6 +227,10 @@ static const struct flash_info winbond_nor_parts[] = {
.size = SZ_64M,
.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
}, {
+ /* W25Q01JV */
+ .id = SNOR_ID(0xef, 0x40, 0x21),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
.id = SNOR_ID(0xef, 0x50, 0x12),
.name = "w25q20bw",
.size = SZ_256K,
@@ -222,6 +306,10 @@ static const struct flash_info winbond_nor_parts[] = {
.id = SNOR_ID(0xef, 0x70, 0x19),
.name = "w25q256jvm",
}, {
+ /* W25Q02JV */
+ .id = SNOR_ID(0xef, 0x70, 0x22),
+ .fixups = &winbond_nor_multi_die_fixups,
+ }, {
.id = SNOR_ID(0xef, 0x71, 0x19),
.name = "w25m512jv",
.size = SZ_64M,
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 60d0155be869..39cc0a6a4d37 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -199,7 +199,7 @@ static blk_status_t ubiblock_read(struct request *req)
* and ubi_read_sg() will check that limit.
*/
ubi_sgl_init(&pdu->usgl);
- blk_rq_map_sg(req->q, req, pdu->usgl.sg);
+ blk_rq_map_sg(req, pdu->usgl.sg);
while (bytes_left) {
/*
@@ -383,7 +383,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
dev->tag_set.ops = &ubiblock_mq_ops;
dev->tag_set.queue_depth = 64;
dev->tag_set.numa_node = NUMA_NO_NODE;
- dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ dev->tag_set.flags = BLK_MQ_F_BLOCKING;
dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
dev->tag_set.driver_data = dev;
dev->tag_set.nr_hw_queues = 1;
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 6bb80d7714bc..b700a0efaa93 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -828,6 +828,70 @@ out_free:
return err;
}
+static int ubi_get_ec_info(struct ubi_device *ubi, struct ubi_ecinfo_req __user *ureq)
+{
+ struct ubi_ecinfo_req req;
+ struct ubi_wl_entry *wl;
+ int read_cnt;
+ int peb;
+ int end_peb;
+
+ /* Copy the input arguments */
+ if (copy_from_user(&req, ureq, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ /* Check input arguments */
+ if (req.length <= 0 || req.start < 0 || req.start >= ubi->peb_count)
+ return -EINVAL;
+
+ if (check_add_overflow(req.start, req.length, &end_peb))
+ return -EINVAL;
+
+ if (end_peb > ubi->peb_count)
+ end_peb = ubi->peb_count;
+
+ /* Check access rights before filling erase_counters array */
+ if (!access_ok((void __user *)ureq->erase_counters,
+ (end_peb-req.start) * sizeof(int32_t)))
+ return -EFAULT;
+
+ /* Fill erase counter array */
+ read_cnt = 0;
+ for (peb = req.start; peb < end_peb; read_cnt++, peb++) {
+ int ec;
+
+ if (ubi_io_is_bad(ubi, peb)) {
+ if (__put_user(UBI_UNKNOWN, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ continue;
+ }
+
+ spin_lock(&ubi->wl_lock);
+
+ wl = ubi->lookuptbl[peb];
+ if (wl)
+ ec = wl->ec;
+ else
+ ec = UBI_UNKNOWN;
+
+ spin_unlock(&ubi->wl_lock);
+
+ if (__put_user(ec, ureq->erase_counters+read_cnt))
+ return -EFAULT;
+
+ }
+
+ /* Return actual read length */
+ req.read_length = read_cnt;
+
+ /* Copy everything except erase counter array */
+ if (copy_to_user(ureq, &req, sizeof(struct ubi_ecinfo_req)))
+ return -EFAULT;
+
+ return 0;
+}
+
static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -991,6 +1055,12 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
+ case UBI_IOCECNFO:
+ {
+ err = ubi_get_ec_info(ubi, argp);
+ break;
+ }
+
default:
err = -ENOTTY;
break;