diff options
Diffstat (limited to 'drivers/mtd')
44 files changed, 744 insertions, 889 deletions
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 25bad4318305..3bbaa590c768 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -96,13 +96,6 @@ struct dataflash { struct mtd_info mtd; }; -static const struct spi_device_id dataflash_dev_ids[] = { - { "at45" }, - { "dataflash" }, - { }, -}; -MODULE_DEVICE_TABLE(spi, dataflash_dev_ids); - #ifdef CONFIG_OF static const struct of_device_id dataflash_dt_ids[] = { { .compatible = "atmel,at45", }, @@ -939,8 +932,6 @@ static struct spi_driver dataflash_driver = { .name = "mtd_dataflash", .of_match_table = of_match_ptr(dataflash_dt_ids), }, - .id_table = dataflash_dev_ids, - .probe = dataflash_probe, .remove = dataflash_remove, .id_table = dataflash_spi_ids, diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c index d00d30243403..ef32fca5f785 100644 --- a/drivers/mtd/hyperbus/rpc-if.c +++ b/drivers/mtd/hyperbus/rpc-if.c @@ -56,7 +56,7 @@ static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to, op.data.nbytes = len; op.data.buf.in = to; - rpcif_prepare(rpc, &op, NULL, NULL); + rpcif_prepare(rpc->dev, &op, NULL, NULL); } static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to, @@ -70,7 +70,7 @@ static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to, op.data.nbytes = len; op.data.buf.out = from; - rpcif_prepare(rpc, &op, NULL, NULL); + rpcif_prepare(rpc->dev, &op, NULL, NULL); } static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr) @@ -81,7 +81,7 @@ static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr) rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2); - rpcif_manual_xfer(&hyperbus->rpc); + rpcif_manual_xfer(hyperbus->rpc.dev); return data.x[0]; } @@ -94,7 +94,7 @@ static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr, rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2); - rpcif_manual_xfer(&hyperbus->rpc); + rpcif_manual_xfer(hyperbus->rpc.dev); } static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to, @@ -105,7 +105,7 @@ static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to, rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len); - rpcif_dirmap_read(&hyperbus->rpc, from, len, to); + rpcif_dirmap_read(hyperbus->rpc.dev, from, len, to); } static const struct hyperbus_ops rpcif_hb_ops = { @@ -130,9 +130,9 @@ static int rpcif_hb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hyperbus); - rpcif_enable_rpm(&hyperbus->rpc); + pm_runtime_enable(hyperbus->rpc.dev); - error = rpcif_hw_init(&hyperbus->rpc, true); + error = rpcif_hw_init(hyperbus->rpc.dev, true); if (error) goto out_disable_rpm; @@ -150,7 +150,7 @@ static int rpcif_hb_probe(struct platform_device *pdev) return 0; out_disable_rpm: - rpcif_disable_rpm(&hyperbus->rpc); + pm_runtime_disable(hyperbus->rpc.dev); return error; } @@ -160,7 +160,7 @@ static int rpcif_hb_remove(struct platform_device *pdev) hyperbus_unregister_device(&hyperbus->hbdev); - rpcif_disable_rpm(&hyperbus->rpc); + pm_runtime_disable(hyperbus->rpc.dev); return 0; } diff --git a/drivers/mtd/maps/pismo.c b/drivers/mtd/maps/pismo.c index 5fcefcd0baca..3e0fff3f129e 100644 --- a/drivers/mtd/maps/pismo.c +++ b/drivers/mtd/maps/pismo.c @@ -206,8 +206,7 @@ static void pismo_remove(struct i2c_client *client) kfree(pismo); } -static int pismo_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int pismo_probe(struct i2c_client *client) { struct pismo_pdata *pdata = client->dev.platform_data; struct pismo_eeprom eeprom; @@ -260,7 +259,7 @@ static struct i2c_driver pismo_driver = { .driver = { .name = "pismo", }, - .probe = pismo_probe, + .probe_new = pismo_probe, .remove = pismo_remove, .id_table = pismo_id, }; diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 1e94e7d10b8b..a0a1194dc1d9 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -153,7 +153,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, mtdblk->cache_state = STATE_EMPTY; ret = mtd_read(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data); - if (ret) + if (ret && !mtd_is_bitflip(ret)) return ret; if (retlen != sect_size) return -EIO; @@ -188,8 +188,12 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", mtd->name, pos, len); - if (!sect_size) - return mtd_read(mtd, pos, len, &retlen, buf); + if (!sect_size) { + ret = mtd_read(mtd, pos, len, &retlen, buf); + if (ret && !mtd_is_bitflip(ret)) + return ret; + return 0; + } while (len > 0) { unsigned long sect_start = (pos/sect_size)*sect_size; @@ -209,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, memcpy (buf, mtdblk->cache_data + offset, size); } else { ret = mtd_read(mtd, pos, size, &retlen, buf); - if (ret) + if (ret && !mtd_is_bitflip(ret)) return ret; if (retlen != size) return -EIO; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index d442fa94c872..85f5ee6f06fc 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -577,6 +577,7 @@ static int mtd_part_of_parse(struct mtd_info *master, { struct mtd_part_parser *parser; struct device_node *np; + struct device_node *child; struct property *prop; struct device *dev; const char *compat; @@ -594,6 +595,15 @@ static int mtd_part_of_parse(struct mtd_info *master, else np = of_get_child_by_name(np, "partitions"); + /* + * Don't create devices that are added to a bus but will never get + * probed. That'll cause fw_devlink to block probing of consumers of + * this partition until the partition device is probed. + */ + for_each_child_of_node(np, child) + if (of_device_is_compatible(child, "nvmem-cells")) + of_node_set_flag(child, OF_POPULATED); + of_property_for_each_string(np, "compatible", prop, compat) { parser = mtd_part_get_compatible_parser(compat); if (!parser) diff --git a/drivers/mtd/nand/ecc-mtk.c b/drivers/mtd/nand/ecc-mtk.c index 9f9b201fe706..c75bb8b80cc1 100644 --- a/drivers/mtd/nand/ecc-mtk.c +++ b/drivers/mtd/nand/ecc-mtk.c @@ -40,6 +40,10 @@ #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) +#define ECC_ERRMASK_MT7622 GENMASK(4, 0) +#define ECC_ERRMASK_MT2701 GENMASK(5, 0) +#define ECC_ERRMASK_MT2712 GENMASK(6, 0) + struct mtk_ecc_caps { u32 err_mask; u32 err_shift; @@ -79,6 +83,10 @@ static const u8 ecc_strength_mt7622[] = { 4, 6, 8, 10, 12 }; +static const u8 ecc_strength_mt7986[] = { + 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24 +}; + enum mtk_ecc_regs { ECC_ENCPAR00, ECC_ENCIRQ_EN, @@ -451,7 +459,7 @@ unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) EXPORT_SYMBOL(mtk_ecc_get_parity_bits); static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { - .err_mask = 0x3f, + .err_mask = ECC_ERRMASK_MT2701, .err_shift = 8, .ecc_strength = ecc_strength_mt2701, .ecc_regs = mt2701_ecc_regs, @@ -462,7 +470,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { }; static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { - .err_mask = 0x7f, + .err_mask = ECC_ERRMASK_MT2712, .err_shift = 8, .ecc_strength = ecc_strength_mt2712, .ecc_regs = mt2712_ecc_regs, @@ -473,7 +481,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { }; static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { - .err_mask = 0x1f, + .err_mask = ECC_ERRMASK_MT7622, .err_shift = 5, .ecc_strength = ecc_strength_mt7622, .ecc_regs = mt7622_ecc_regs, @@ -483,6 +491,17 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { .pg_irq_sel = 0, }; +static const struct mtk_ecc_caps mtk_ecc_caps_mt7986 = { + .err_mask = ECC_ERRMASK_MT7622, + .err_shift = 8, + .ecc_strength = ecc_strength_mt7986, + .ecc_regs = mt2712_ecc_regs, + .num_ecc_strength = 11, + .ecc_mode_shift = 5, + .parity_bits = 14, + .pg_irq_sel = 1, +}; + static const struct of_device_id mtk_ecc_dt_match[] = { { .compatible = "mediatek,mt2701-ecc", @@ -493,6 +512,9 @@ static const struct of_device_id mtk_ecc_dt_match[] = { }, { .compatible = "mediatek,mt7622-ecc", .data = &mtk_ecc_caps_mt7622, + }, { + .compatible = "mediatek,mt7986-ecc", + .data = &mtk_ecc_caps_mt7986, }, {}, }; diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c index 8afdca731b87..6b487ffe2f2d 100644 --- a/drivers/mtd/nand/ecc-mxic.c +++ b/drivers/mtd/nand/ecc-mxic.c @@ -429,6 +429,7 @@ static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic) mxic_ecc_enable_int(mxic); ret = wait_for_completion_timeout(&mxic->complete, msecs_to_jiffies(1000)); + ret = ret ? 0 : -ETIMEDOUT; mxic_ecc_disable_int(mxic); } else { ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val, diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 048b1c8f08ee..170f1185ddc4 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -193,13 +193,6 @@ config MTD_NAND_PASEMI Enables support for NAND Flash interface on PA Semi PWRficient based boards -config MTD_NAND_TMIO - tristate "Toshiba Mobile IO NAND controller" - depends on MFD_TMIO - help - Support for NAND flash connected to a Toshiba Mobile IO - Controller in some PDAs, including the Sharp SL6000x. - source "drivers/mtd/nand/raw/brcmnand/Kconfig" config MTD_NAND_BCM47XXNFLASH diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index fa1d00120310..917cdfb815b9 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -23,7 +23,6 @@ omap2_nand-objs := omap2.o obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o -obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index a18d121396aa..e25119e58b69 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -725,6 +725,7 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip) struct fsl_lbc_ctrl *ctrl = priv->ctrl; struct fsl_lbc_regs __iomem *lbc = ctrl->regs; unsigned int al; + u32 br; /* * if ECC was not chosen in DT, decide whether to use HW or SW ECC from @@ -764,6 +765,13 @@ static int fsl_elbc_attach_chip(struct nand_chip *chip) return -EINVAL; } + /* enable/disable HW ECC checking and generating based on if HW ECC was chosen */ + br = in_be32(&lbc->bank[priv->bank].br) & ~BR_DECC; + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) + out_be32(&lbc->bank[priv->bank].br, br | BR_DECC_CHK_GEN); + else + out_be32(&lbc->bank[priv->bank].br, br | BR_DECC_OFF); + /* calculate FMR Address Length field */ al = 0; if (chip->pagemask & 0xffff0000) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 42c64dcea767..3034916d2e25 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -288,10 +288,17 @@ static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = { MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0), MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0), MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,32, 30), + MARVELL_LAYOUT( 2048, 512, 8, 2, 1, 1024, 0, 30,1024,64, 30), + MARVELL_LAYOUT( 2048, 512, 12, 3, 2, 704, 0, 30,640, 0, 30), + MARVELL_LAYOUT( 2048, 512, 16, 5, 4, 512, 0, 30, 0, 32, 30), MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0), MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30), + MARVELL_LAYOUT( 4096, 512, 12, 6, 5, 704, 0, 30,576, 32, 30), + MARVELL_LAYOUT( 4096, 512, 16, 9, 8, 512, 0, 30, 0, 32, 30), MARVELL_LAYOUT( 8192, 512, 4, 4, 4, 2048, 0, 30, 0, 0, 0), MARVELL_LAYOUT( 8192, 512, 8, 9, 8, 1024, 0, 30, 0, 160, 30), + MARVELL_LAYOUT( 8192, 512, 12, 12, 11, 704, 0, 30,448, 64, 30), + MARVELL_LAYOUT( 8192, 512, 16, 17, 16, 512, 0, 30, 0, 32, 30), }; /** diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 5ee01231ac4c..074e14225c06 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -176,6 +176,7 @@ struct meson_nfc { dma_addr_t daddr; dma_addr_t iaddr; + u32 info_bytes; unsigned long assigned_cs; }; @@ -279,7 +280,7 @@ static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, if (raw) { len = mtd->writesize + mtd->oobsize; - cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir); + cmd = (len & GENMASK(13, 0)) | scrambler | DMA_DIR(dir); writel(cmd, nfc->reg_base + NFC_REG_CMD); return; } @@ -503,6 +504,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf, nfc->daddr, datalen, dir); return ret; } + nfc->info_bytes = infolen; cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr); writel(cmd, nfc->reg_base + NFC_REG_CMD); @@ -520,8 +522,10 @@ static void meson_nfc_dma_buffer_release(struct nand_chip *nand, struct meson_nfc *nfc = nand_get_controller_data(nand); dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir); - if (infolen) + if (infolen) { dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir); + nfc->info_bytes = 0; + } } static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len) @@ -540,7 +544,7 @@ static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len) if (ret) goto out; - cmd = NFC_CMD_N2M | (len & GENMASK(5, 0)); + cmd = NFC_CMD_N2M | (len & GENMASK(13, 0)); writel(cmd, nfc->reg_base + NFC_REG_CMD); meson_nfc_drain_cmd(nfc); @@ -564,7 +568,7 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) if (ret) return ret; - cmd = NFC_CMD_M2N | (len & GENMASK(5, 0)); + cmd = NFC_CMD_M2N | (len & GENMASK(13, 0)); writel(cmd, nfc->reg_base + NFC_REG_CMD); meson_nfc_drain_cmd(nfc); @@ -710,6 +714,8 @@ static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc, usleep_range(10, 15); /* info is updated by nfc dma engine*/ smp_rmb(); + dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes, + DMA_FROM_DEVICE); ret = *info & ECC_COMPLETE; } while (!ret); } @@ -991,7 +997,7 @@ static const struct mtd_ooblayout_ops meson_ooblayout_ops = { static int meson_nfc_clk_init(struct meson_nfc *nfc) { - struct clk_parent_data nfc_divider_parent_data[1]; + struct clk_parent_data nfc_divider_parent_data[1] = {0}; struct clk_init_data init = {0}; int ret; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index c3cc66039925..a6af521832aa 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1208,6 +1208,73 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, return nand_exec_op(chip, &op); } +static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, void *buf, + unsigned int len, bool check_only) +{ + const struct nand_interface_config *conf = + nand_get_interface_config(chip); + u8 addrs[5]; + struct nand_op_instr start_instrs[] = { + NAND_OP_CMD(NAND_CMD_READ0, 0), + NAND_OP_ADDR(4, addrs, 0), + NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0), + NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), + NAND_COMMON_TIMING_NS(conf, tRR_min)), + NAND_OP_DATA_IN(len, buf, 0), + }; + struct nand_op_instr cont_instrs[] = { + NAND_OP_CMD(page == chip->cont_read.last_page ? + NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ, + NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), + NAND_COMMON_TIMING_NS(conf, tRR_min)), + NAND_OP_DATA_IN(len, buf, 0), + }; + struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs); + struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs); + int ret; + + if (!len) { + start_op.ninstrs--; + cont_op.ninstrs--; + } + + ret = nand_fill_column_cycles(chip, addrs, offset_in_page); + if (ret < 0) + return ret; + + addrs[2] = page; + addrs[3] = page >> 8; + + if (chip->options & NAND_ROW_ADDR_3) { + addrs[4] = page >> 16; + start_instrs[1].ctx.addr.naddrs++; + } + + /* Check if cache reads are supported */ + if (check_only) { + if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op)) + return -EOPNOTSUPP; + + return 0; + } + + if (page == chip->cont_read.first_page) + return nand_exec_op(chip, &start_op); + else + return nand_exec_op(chip, &cont_op); +} + +static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page) +{ + return chip->cont_read.ongoing && + page >= chip->cont_read.first_page && + page <= chip->cont_read.last_page; +} + /** * nand_read_page_op - Do a READ PAGE operation * @chip: The NAND chip @@ -1233,10 +1300,16 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page, return -EINVAL; if (nand_has_exec_op(chip)) { - if (mtd->writesize > 512) - return nand_lp_exec_read_page_op(chip, page, - offset_in_page, buf, - len); + if (mtd->writesize > 512) { + if (rawnand_cont_read_ongoing(chip, page)) + return nand_lp_exec_cont_read_page_op(chip, page, + offset_in_page, + buf, len, false); + else + return nand_lp_exec_read_page_op(chip, page, + offset_in_page, buf, + len); + } return nand_sp_exec_read_page_op(chip, page, offset_in_page, buf, len); @@ -3353,6 +3426,27 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, return NULL; } +static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page, + u32 readlen, int col) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + if (!chip->controller->supported_op.cont_read) + return; + + if ((col && col + readlen < (3 * mtd->writesize)) || + (!col && readlen < (2 * mtd->writesize))) { + chip->cont_read.ongoing = false; + return; + } + + chip->cont_read.ongoing = true; + chip->cont_read.first_page = page; + if (col) + chip->cont_read.first_page++; + chip->cont_read.last_page = page + ((readlen >> chip->page_shift) & chip->pagemask); +} + /** * nand_setup_read_retry - [INTERN] Set the READ RETRY mode * @chip: NAND chip object @@ -3426,6 +3520,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from, oob = ops->oobbuf; oob_required = oob ? 1 : 0; + rawnand_enable_cont_reads(chip, page, readlen, col); + while (1) { struct mtd_ecc_stats ecc_stats = mtd->ecc_stats; @@ -4991,6 +5087,47 @@ nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) return manufacturer_desc ? manufacturer_desc->name : "Unknown"; } +static void rawnand_check_data_only_read_support(struct nand_chip *chip) +{ + /* Use an arbitrary size for the check */ + if (!nand_read_data_op(chip, NULL, SZ_512, true, true)) + chip->controller->supported_op.data_only_read = 1; +} + +static void rawnand_early_check_supported_ops(struct nand_chip *chip) +{ + /* The supported_op fields should not be set by individual drivers */ + WARN_ON_ONCE(chip->controller->supported_op.data_only_read); + + if (!nand_has_exec_op(chip)) + return; + + rawnand_check_data_only_read_support(chip); +} + +static void rawnand_check_cont_read_support(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + + if (chip->read_retries) + return; + + if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL, + mtd->writesize, true)) + chip->controller->supported_op.cont_read = 1; +} + +static void rawnand_late_check_supported_ops(struct nand_chip *chip) +{ + /* The supported_op fields should not be set by individual drivers */ + WARN_ON_ONCE(chip->controller->supported_op.cont_read); + + if (!nand_has_exec_op(chip)) + return; + + rawnand_check_cont_read_support(chip); +} + /* * Get the flash and manufacturer id and lookup if the type is supported. */ @@ -5023,6 +5160,8 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) /* Select the device */ nand_select_target(chip, 0); + rawnand_early_check_supported_ops(chip); + /* Send the command for reading device ID */ ret = nand_readid_op(chip, 0, id_data, 2); if (ret) @@ -6325,6 +6464,8 @@ static int nand_scan_tail(struct nand_chip *chip) goto err_free_interface_config; } + rawnand_late_check_supported_ops(chip); + /* * Look for secure regions in the NAND chip. These regions are supposed * to be protected by a secure element like Trustzone. So the read/write diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c index 85b6d9372d80..836757717660 100644 --- a/drivers/mtd/nand/raw/nand_jedec.c +++ b/drivers/mtd/nand/raw/nand_jedec.c @@ -46,8 +46,7 @@ int nand_jedec_detect(struct nand_chip *chip) if (!p) return -ENOMEM; - if (!nand_has_exec_op(chip) || - !nand_read_data_op(chip, p, sizeof(*p), true, true)) + if (!nand_has_exec_op(chip) || chip->controller->supported_op.data_only_read) use_datain = true; for (i = 0; i < JEDEC_PARAM_PAGES; i++) { diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 7586befce7f9..f15ef90aec8c 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -166,8 +166,7 @@ int nand_onfi_detect(struct nand_chip *chip) if (!pbuf) return -ENOMEM; - if (!nand_has_exec_op(chip) || - !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true)) + if (!nand_has_exec_op(chip) || chip->controller->supported_op.data_only_read) use_datain = true; for (i = 0; i < ONFI_PARAM_PAGES; i++) { diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index c21abf748948..179b28459b4b 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2160,8 +2160,23 @@ static int ns_exec_op(struct nand_chip *chip, const struct nand_operation *op, const struct nand_op_instr *instr = NULL; struct nandsim *ns = nand_get_controller_data(chip); - if (check_only) + if (check_only) { + /* The current implementation of nandsim needs to know the + * ongoing operation when performing the address cycles. This + * means it cannot make the difference between a regular read + * and a continuous read. Hence, this hack to manually refuse + * supporting sequential cached operations. + */ + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + if (instr->type == NAND_OP_CMD_INSTR && + (instr->ctx.cmd.opcode == NAND_CMD_READCACHEEND || + instr->ctx.cmd.opcode == NAND_CMD_READCACHESEQ)) + return -EOPNOTSUPP; + } + return 0; + } ns->lines.ce = 1; diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index c176036453ed..f7ef6ca06ca9 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -26,9 +26,12 @@ #define CLE_PIN_CTL 15 #define ALE_PIN_CTL 14 -static unsigned int lpcctl; -static struct mtd_info *pasemi_nand_mtd; -static struct nand_controller controller; +struct pasemi_ddata { + struct nand_chip chip; + unsigned int lpcctl; + struct nand_controller controller; +}; + static const char driver_name[] = "pasemi-nand"; static void pasemi_read_buf(struct nand_chip *chip, u_char *buf, int len) @@ -55,6 +58,8 @@ static void pasemi_write_buf(struct nand_chip *chip, const u_char *buf, static void pasemi_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl) { + struct pasemi_ddata *ddata = container_of(chip, struct pasemi_ddata, chip); + if (cmd == NAND_CMD_NONE) return; @@ -65,12 +70,14 @@ static void pasemi_hwcontrol(struct nand_chip *chip, int cmd, /* Push out posted writes */ eieio(); - inl(lpcctl); + inl(ddata->lpcctl); } static int pasemi_device_ready(struct nand_chip *chip) { - return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); + struct pasemi_ddata *ddata = container_of(chip, struct pasemi_ddata, chip); + + return !!(inl(ddata->lpcctl) & LBICTRL_LPCCTL_NR); } static int pasemi_attach_chip(struct nand_chip *chip) @@ -93,29 +100,31 @@ static int pasemi_nand_probe(struct platform_device *ofdev) struct device_node *np = dev->of_node; struct resource res; struct nand_chip *chip; + struct nand_controller *controller; int err = 0; + struct pasemi_ddata *ddata; + struct mtd_info *pasemi_nand_mtd; err = of_address_to_resource(np, 0, &res); if (err) return -EINVAL; - /* We only support one device at the moment */ - if (pasemi_nand_mtd) - return -ENODEV; - dev_dbg(dev, "pasemi_nand at %pR\n", &res); /* Allocate memory for MTD device structure and private data */ - chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); - if (!chip) { + ddata = kzalloc(sizeof(*ddata), GFP_KERNEL); + if (!ddata) { err = -ENOMEM; goto out; } + platform_set_drvdata(ofdev, ddata); + chip = &ddata->chip; + controller = &ddata->controller; - controller.ops = &pasemi_ops; - nand_controller_init(&controller); - chip->controller = &controller; + controller->ops = &pasemi_ops; + nand_controller_init(controller); + chip->controller = controller; pasemi_nand_mtd = nand_to_mtd(chip); @@ -136,10 +145,10 @@ static int pasemi_nand_probe(struct platform_device *ofdev) goto out_ior; } - lpcctl = pci_resource_start(pdev, 0); + ddata->lpcctl = pci_resource_start(pdev, 0); pci_dev_put(pdev); - if (!request_region(lpcctl, 4, driver_name)) { + if (!request_region(ddata->lpcctl, 4, driver_name)) { err = -EBUSY; goto out_ior; } @@ -172,45 +181,43 @@ static int pasemi_nand_probe(struct platform_device *ofdev) } dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res, - lpcctl); + ddata->lpcctl); return 0; out_cleanup_nand: nand_cleanup(chip); out_lpc: - release_region(lpcctl, 4); + release_region(ddata->lpcctl, 4); out_ior: iounmap(chip->legacy.IO_ADDR_R); out_mtd: - kfree(chip); + kfree(ddata); out: return err; } static int pasemi_nand_remove(struct platform_device *ofdev) { - struct nand_chip *chip; + struct pasemi_ddata *ddata = platform_get_drvdata(ofdev); + struct mtd_info *pasemi_nand_mtd; int ret; + struct nand_chip *chip; - if (!pasemi_nand_mtd) - return 0; - - chip = mtd_to_nand(pasemi_nand_mtd); + chip = &ddata->chip; + pasemi_nand_mtd = nand_to_mtd(chip); /* Release resources, unregister device */ ret = mtd_device_unregister(pasemi_nand_mtd); WARN_ON(ret); nand_cleanup(chip); - release_region(lpcctl, 4); + release_region(ddata->lpcctl, 4); iounmap(chip->legacy.IO_ADDR_R); /* Free the MTD device structure */ - kfree(chip); - - pasemi_nand_mtd = NULL; + kfree(ddata); return 0; } diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 5d627048c420..9e74bcd90aaa 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1531,6 +1531,9 @@ static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr, if (IS_ERR(sdrt)) return PTR_ERR(sdrt); + if (conf->timings.mode > 3) + return -EOPNOTSUPP; + if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) return 0; diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index ea953e31933e..13e3e0198d15 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -172,10 +172,10 @@ struct sunxi_nand_chip_sel { /** * struct sunxi_nand_hw_ecc - stores information related to HW ECC support * - * @mode: the sunxi ECC mode field deduced from ECC requirements + * @ecc_ctl: ECC_CTL register value for this NAND chip */ struct sunxi_nand_hw_ecc { - int mode; + u32 ecc_ctl; }; /** @@ -193,7 +193,7 @@ struct sunxi_nand_hw_ecc { struct sunxi_nand_chip { struct list_head node; struct nand_chip nand; - struct sunxi_nand_hw_ecc *ecc; + struct sunxi_nand_hw_ecc ecc; unsigned long clk_rate; u32 timing_cfg; u32 timing_ctl; @@ -421,7 +421,7 @@ static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs) struct sunxi_nand_chip_sel *sel; u32 ctl; - if (cs > 0 && cs >= sunxi_nand->nsels) + if (cs >= sunxi_nand->nsels) return; ctl = readl(nfc->regs + NFC_REG_CTL) & @@ -689,26 +689,15 @@ static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand) { struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); - u32 ecc_ctl; - - ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); - ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE | - NFC_ECC_BLOCK_SIZE_MSK); - ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(sunxi_nand->ecc->mode) | - NFC_ECC_EXCEPTION | NFC_ECC_PIPELINE; - - if (nand->ecc.size == 512) - ecc_ctl |= NFC_ECC_BLOCK_512; - writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); + writel(sunxi_nand->ecc.ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); } static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand) { struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); - writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN, - nfc->regs + NFC_REG_ECC_CTL); + writel(0, nfc->regs + NFC_REG_ECC_CTL); } static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf) @@ -1604,12 +1593,19 @@ static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section, return 0; } + /* + * The controller does not provide access to OOB bytes + * past the end of the ECC data. + */ + if (section == ecc->steps && ecc->engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) + return -ERANGE; + oobregion->offset = section * (ecc->bytes + 4); if (section < ecc->steps) oobregion->length = 4; else - oobregion->offset = mtd->oobsize - oobregion->offset; + oobregion->length = mtd->oobsize - oobregion->offset; return 0; } @@ -1619,11 +1615,6 @@ static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = { .free = sunxi_nand_ooblayout_free, }; -static void sunxi_nand_hw_ecc_ctrl_cleanup(struct sunxi_nand_chip *sunxi_nand) -{ - kfree(sunxi_nand->ecc); -} - static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, struct nand_ecc_ctrl *ecc, struct device_node *np) @@ -1634,7 +1625,6 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, struct mtd_info *mtd = nand_to_mtd(nand); struct nand_device *nanddev = mtd_to_nanddev(mtd); int nsectors; - int ret; int i; if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) { @@ -1669,10 +1659,6 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, if (ecc->size != 512 && ecc->size != 1024) return -EINVAL; - sunxi_nand->ecc = kzalloc(sizeof(*sunxi_nand->ecc), GFP_KERNEL); - if (!sunxi_nand->ecc) - return -ENOMEM; - /* Prefer 1k ECC chunk over 512 ones */ if (ecc->size == 512 && mtd->writesize > 512) { ecc->size = 1024; @@ -1693,12 +1679,9 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, if (i >= ARRAY_SIZE(strengths)) { dev_err(nfc->dev, "unsupported strength\n"); - ret = -ENOTSUPP; - goto err; + return -ENOTSUPP; } - sunxi_nand->ecc->mode = i; - /* HW ECC always request ECC bytes for 1024 bytes blocks */ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8); @@ -1707,10 +1690,8 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, nsectors = mtd->writesize / ecc->size; - if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) { - ret = -EINVAL; - goto err; - } + if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) + return -EINVAL; ecc->read_oob = sunxi_nfc_hw_ecc_read_oob; ecc->write_oob = sunxi_nfc_hw_ecc_write_oob; @@ -1732,26 +1713,13 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, ecc->read_oob_raw = nand_read_oob_std; ecc->write_oob_raw = nand_write_oob_std; - return 0; - -err: - kfree(sunxi_nand->ecc); + sunxi_nand->ecc.ecc_ctl = NFC_ECC_MODE(i) | NFC_ECC_EXCEPTION | + NFC_ECC_PIPELINE | NFC_ECC_EN; - return ret; -} + if (ecc->size == 512) + sunxi_nand->ecc.ecc_ctl |= NFC_ECC_BLOCK_512; -static void sunxi_nand_ecc_cleanup(struct sunxi_nand_chip *sunxi_nand) -{ - struct nand_ecc_ctrl *ecc = &sunxi_nand->nand.ecc; - - switch (ecc->engine_type) { - case NAND_ECC_ENGINE_TYPE_ON_HOST: - sunxi_nand_hw_ecc_ctrl_cleanup(sunxi_nand); - break; - case NAND_ECC_ENGINE_TYPE_NONE: - default: - break; - } + return 0; } static int sunxi_nand_attach_chip(struct nand_chip *nand) @@ -1950,6 +1918,24 @@ static const struct nand_controller_ops sunxi_nand_controller_ops = { .exec_op = sunxi_nfc_exec_op, }; +static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) +{ + struct sunxi_nand_chip *sunxi_nand; + struct nand_chip *chip; + int ret; + + while (!list_empty(&nfc->chips)) { + sunxi_nand = list_first_entry(&nfc->chips, + struct sunxi_nand_chip, + node); + chip = &sunxi_nand->nand; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&sunxi_nand->node); + } +} + static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, struct device_node *np) { @@ -2041,18 +2027,13 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) { struct device_node *np = dev->of_node; struct device_node *nand_np; - int nchips = of_get_child_count(np); int ret; - if (nchips > 8) { - dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips); - return -EINVAL; - } - for_each_child_of_node(np, nand_np) { ret = sunxi_nand_chip_init(dev, nfc, nand_np); if (ret) { of_node_put(nand_np); + sunxi_nand_chips_cleanup(nfc); return ret; } } @@ -2060,25 +2041,6 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) return 0; } -static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) -{ - struct sunxi_nand_chip *sunxi_nand; - struct nand_chip *chip; - int ret; - - while (!list_empty(&nfc->chips)) { - sunxi_nand = list_first_entry(&nfc->chips, - struct sunxi_nand_chip, - node); - chip = &sunxi_nand->nand; - ret = mtd_device_unregister(nand_to_mtd(chip)); - WARN_ON(ret); - nand_cleanup(chip); - sunxi_nand_ecc_cleanup(sunxi_nand); - list_del(&sunxi_nand->node); - } -} - static int sunxi_nfc_dma_init(struct sunxi_nfc *nfc, struct resource *r) { int ret; diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c deleted file mode 100644 index 8f1a42bf199c..000000000000 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ /dev/null @@ -1,533 +0,0 @@ -/* - * Toshiba TMIO NAND flash controller driver - * - * Slightly murky pre-git history of the driver: - * - * Copyright (c) Ian Molton 2004, 2005, 2008 - * Original work, independent of sharps code. Included hardware ECC support. - * Hard ECC did not work for writes in the early revisions. - * Copyright (c) Dirk Opfer 2005. - * Modifications developed from sharps code but - * NOT containing any, ported onto Ians base. - * Copyright (c) Chris Humbert 2005 - * Copyright (c) Dmitry Baryshkov 2008 - * Minor fixes - * - * Parts copyright Sebastian Carlier - * - * This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - * - */ - - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/mfd/core.h> -#include <linux/mfd/tmio.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/irq.h> -#include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/partitions.h> -#include <linux/slab.h> - -/*--------------------------------------------------------------------------*/ - -/* - * NAND Flash Host Controller Configuration Register - */ -#define CCR_COMMAND 0x04 /* w Command */ -#define CCR_BASE 0x10 /* l NAND Flash Control Reg Base Addr */ -#define CCR_INTP 0x3d /* b Interrupt Pin */ -#define CCR_INTE 0x48 /* b Interrupt Enable */ -#define CCR_EC 0x4a /* b Event Control */ -#define CCR_ICC 0x4c /* b Internal Clock Control */ -#define CCR_ECCC 0x5b /* b ECC Control */ -#define CCR_NFTC 0x60 /* b NAND Flash Transaction Control */ -#define CCR_NFM 0x61 /* b NAND Flash Monitor */ -#define CCR_NFPSC 0x62 /* b NAND Flash Power Supply Control */ -#define CCR_NFDC 0x63 /* b NAND Flash Detect Control */ - -/* - * NAND Flash Control Register - */ -#define FCR_DATA 0x00 /* bwl Data Register */ -#define FCR_MODE 0x04 /* b Mode Register */ -#define FCR_STATUS 0x05 /* b Status Register */ -#define FCR_ISR 0x06 /* b Interrupt Status Register */ -#define FCR_IMR 0x07 /* b Interrupt Mask Register */ - -/* FCR_MODE Register Command List */ -#define FCR_MODE_DATA 0x94 /* Data Data_Mode */ -#define FCR_MODE_COMMAND 0x95 /* Data Command_Mode */ -#define FCR_MODE_ADDRESS 0x96 /* Data Address_Mode */ - -#define FCR_MODE_HWECC_CALC 0xB4 /* HW-ECC Data */ -#define FCR_MODE_HWECC_RESULT 0xD4 /* HW-ECC Calc result Read_Mode */ -#define FCR_MODE_HWECC_RESET 0xF4 /* HW-ECC Reset */ - -#define FCR_MODE_POWER_ON 0x0C /* Power Supply ON to SSFDC card */ -#define FCR_MODE_POWER_OFF 0x08 /* Power Supply OFF to SSFDC card */ - -#define FCR_MODE_LED_OFF 0x00 /* LED OFF */ -#define FCR_MODE_LED_ON 0x04 /* LED ON */ - -#define FCR_MODE_EJECT_ON 0x68 /* Ejection events active */ -#define FCR_MODE_EJECT_OFF 0x08 /* Ejection events ignored */ - -#define FCR_MODE_LOCK 0x6C /* Lock_Mode. Eject Switch Invalid */ -#define FCR_MODE_UNLOCK 0x0C /* UnLock_Mode. Eject Switch is valid */ - -#define FCR_MODE_CONTROLLER_ID 0x40 /* Controller ID Read */ -#define FCR_MODE_STANDBY 0x00 /* SSFDC card Changes Standby State */ - -#define FCR_MODE_WE 0x80 -#define FCR_MODE_ECC1 0x40 -#define FCR_MODE_ECC0 0x20 -#define FCR_MODE_CE 0x10 -#define FCR_MODE_PCNT1 0x08 -#define FCR_MODE_PCNT0 0x04 -#define FCR_MODE_ALE 0x02 -#define FCR_MODE_CLE 0x01 - -#define FCR_STATUS_BUSY 0x80 - -/*--------------------------------------------------------------------------*/ - -struct tmio_nand { - struct nand_controller controller; - struct nand_chip chip; - struct completion comp; - - struct platform_device *dev; - - void __iomem *ccr; - void __iomem *fcr; - unsigned long fcr_base; - - unsigned int irq; - - /* for tmio_nand_read_byte */ - u8 read; - unsigned read_good:1; -}; - -static inline struct tmio_nand *mtd_to_tmio(struct mtd_info *mtd) -{ - return container_of(mtd_to_nand(mtd), struct tmio_nand, chip); -} - - -/*--------------------------------------------------------------------------*/ - -static void tmio_nand_hwcontrol(struct nand_chip *chip, int cmd, - unsigned int ctrl) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip)); - - if (ctrl & NAND_CTRL_CHANGE) { - u8 mode; - - if (ctrl & NAND_NCE) { - mode = FCR_MODE_DATA; - - if (ctrl & NAND_CLE) - mode |= FCR_MODE_CLE; - else - mode &= ~FCR_MODE_CLE; - - if (ctrl & NAND_ALE) - mode |= FCR_MODE_ALE; - else - mode &= ~FCR_MODE_ALE; - } else { - mode = FCR_MODE_STANDBY; - } - - tmio_iowrite8(mode, tmio->fcr + FCR_MODE); - tmio->read_good = 0; - } - - if (cmd != NAND_CMD_NONE) - tmio_iowrite8(cmd, chip->legacy.IO_ADDR_W); -} - -static int tmio_nand_dev_ready(struct nand_chip *chip) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip)); - - return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY); -} - -static irqreturn_t tmio_irq(int irq, void *__tmio) -{ - struct tmio_nand *tmio = __tmio; - - /* disable RDYREQ interrupt */ - tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); - complete(&tmio->comp); - - return IRQ_HANDLED; -} - -/* - *The TMIO core has a RDYREQ interrupt on the posedge of #SMRB. - *This interrupt is normally disabled, but for long operations like - *erase and write, we enable it to wake us up. The irq handler - *disables the interrupt. - */ -static int tmio_nand_wait(struct nand_chip *nand_chip) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(nand_chip)); - long timeout; - u8 status; - - /* enable RDYREQ interrupt */ - - tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR); - reinit_completion(&tmio->comp); - tmio_iowrite8(0x81, tmio->fcr + FCR_IMR); - - timeout = 400; - timeout = wait_for_completion_timeout(&tmio->comp, - msecs_to_jiffies(timeout)); - - if (unlikely(!tmio_nand_dev_ready(nand_chip))) { - tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); - dev_warn(&tmio->dev->dev, "still busy after 400 ms\n"); - - } else if (unlikely(!timeout)) { - tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); - dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n"); - } - - nand_status_op(nand_chip, &status); - return status; -} - -/* - *The TMIO controller combines two 8-bit data bytes into one 16-bit - *word. This function separates them so nand_base.c works as expected, - *especially its NAND_CMD_READID routines. - * - *To prevent stale data from being read, tmio_nand_hwcontrol() clears - *tmio->read_good. - */ -static u_char tmio_nand_read_byte(struct nand_chip *chip) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip)); - unsigned int data; - - if (tmio->read_good--) - return tmio->read; - - data = tmio_ioread16(tmio->fcr + FCR_DATA); - tmio->read = data >> 8; - return data; -} - -/* - *The TMIO controller converts an 8-bit NAND interface to a 16-bit - *bus interface, so all data reads and writes must be 16-bit wide. - *Thus, we implement 16-bit versions of the read, write, and verify - *buffer functions. - */ -static void -tmio_nand_write_buf(struct nand_chip *chip, const u_char *buf, int len) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip)); - - tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1); -} - -static void tmio_nand_read_buf(struct nand_chip *chip, u_char *buf, int len) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip)); - - tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1); -} - -static void tmio_nand_enable_hwecc(struct nand_chip *chip, int mode) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip)); - - tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE); - tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */ - tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE); -} - -static int tmio_nand_calculate_ecc(struct nand_chip *chip, const u_char *dat, - u_char *ecc_code) -{ - struct tmio_nand *tmio = mtd_to_tmio(nand_to_mtd(chip)); - unsigned int ecc; - - tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE); - - ecc = tmio_ioread16(tmio->fcr + FCR_DATA); - ecc_code[1] = ecc; /* 000-255 LP7-0 */ - ecc_code[0] = ecc >> 8; /* 000-255 LP15-8 */ - ecc = tmio_ioread16(tmio->fcr + FCR_DATA); - ecc_code[2] = ecc; /* 000-255 CP5-0,11b */ - ecc_code[4] = ecc >> 8; /* 256-511 LP7-0 */ - ecc = tmio_ioread16(tmio->fcr + FCR_DATA); - ecc_code[3] = ecc; /* 256-511 LP15-8 */ - ecc_code[5] = ecc >> 8; /* 256-511 CP5-0,11b */ - - tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE); - return 0; -} - -static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf, - unsigned char *read_ecc, - unsigned char *calc_ecc) -{ - int r0, r1; - - /* assume ecc.size = 512 and ecc.bytes = 6 */ - r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc); - if (r0 < 0) - return r0; - r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3, - calc_ecc + 3); - if (r1 < 0) - return r1; - return r0 + r1; -} - -static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio) -{ - const struct mfd_cell *cell = mfd_get_cell(dev); - int ret; - - if (cell->enable) { - ret = cell->enable(dev); - if (ret) - return ret; - } - - /* (4Ch) CLKRUN Enable 1st spcrunc */ - tmio_iowrite8(0x81, tmio->ccr + CCR_ICC); - - /* (10h)BaseAddress 0x1000 spba.spba2 */ - tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE); - tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2); - - /* (04h)Command Register I/O spcmd */ - tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND); - - /* (62h) Power Supply Control ssmpwc */ - /* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */ - tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC); - - /* (63h) Detect Control ssmdtc */ - tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC); - - /* Interrupt status register clear sintst */ - tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR); - - /* After power supply, Media are reset smode */ - tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE); - tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE); - tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA); - - /* Standby Mode smode */ - tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE); - - mdelay(5); - - return 0; -} - -static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) -{ - const struct mfd_cell *cell = mfd_get_cell(dev); - - tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE); - if (cell->disable) - cell->disable(dev); -} - -static int tmio_attach_chip(struct nand_chip *chip) -{ - if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) - return 0; - - chip->ecc.size = 512; - chip->ecc.bytes = 6; - chip->ecc.strength = 2; - chip->ecc.hwctl = tmio_nand_enable_hwecc; - chip->ecc.calculate = tmio_nand_calculate_ecc; - chip->ecc.correct = tmio_nand_correct_data; - - return 0; -} - -static const struct nand_controller_ops tmio_ops = { - .attach_chip = tmio_attach_chip, -}; - -static int tmio_probe(struct platform_device *dev) -{ - struct tmio_nand_data *data = dev_get_platdata(&dev->dev); - struct resource *fcr = platform_get_resource(dev, - IORESOURCE_MEM, 0); - struct resource *ccr = platform_get_resource(dev, - IORESOURCE_MEM, 1); - int irq = platform_get_irq(dev, 0); - struct tmio_nand *tmio; - struct mtd_info *mtd; - struct nand_chip *nand_chip; - int retval; - - if (data == NULL) - dev_warn(&dev->dev, "NULL platform data!\n"); - - if (!ccr || !fcr) - return -EINVAL; - - tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL); - if (!tmio) - return -ENOMEM; - - init_completion(&tmio->comp); - - tmio->dev = dev; - - platform_set_drvdata(dev, tmio); - nand_chip = &tmio->chip; - mtd = nand_to_mtd(nand_chip); - mtd->name = "tmio-nand"; - mtd->dev.parent = &dev->dev; - - nand_controller_init(&tmio->controller); - tmio->controller.ops = &tmio_ops; - nand_chip->controller = &tmio->controller; - - tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr)); - if (!tmio->ccr) - return -EIO; - - tmio->fcr_base = fcr->start & 0xfffff; - tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr)); - if (!tmio->fcr) - return -EIO; - - retval = tmio_hw_init(dev, tmio); - if (retval) - return retval; - - /* Set address of NAND IO lines */ - nand_chip->legacy.IO_ADDR_R = tmio->fcr; - nand_chip->legacy.IO_ADDR_W = tmio->fcr; - - /* Set address of hardware control function */ - nand_chip->legacy.cmd_ctrl = tmio_nand_hwcontrol; - nand_chip->legacy.dev_ready = tmio_nand_dev_ready; - nand_chip->legacy.read_byte = tmio_nand_read_byte; - nand_chip->legacy.write_buf = tmio_nand_write_buf; - nand_chip->legacy.read_buf = tmio_nand_read_buf; - - if (data) - nand_chip->badblock_pattern = data->badblock_pattern; - - /* 15 us command delay time */ - nand_chip->legacy.chip_delay = 15; - - retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0, - dev_name(&dev->dev), tmio); - if (retval) { - dev_err(&dev->dev, "request_irq error %d\n", retval); - goto err_irq; - } - - tmio->irq = irq; - nand_chip->legacy.waitfunc = tmio_nand_wait; - - /* Scan to find existence of the device */ - retval = nand_scan(nand_chip, 1); - if (retval) - goto err_irq; - - /* Register the partitions */ - retval = mtd_device_parse_register(mtd, - data ? data->part_parsers : NULL, - NULL, - data ? data->partition : NULL, - data ? data->num_partitions : 0); - if (!retval) - return retval; - - nand_cleanup(nand_chip); - -err_irq: - tmio_hw_stop(dev, tmio); - return retval; -} - -static int tmio_remove(struct platform_device *dev) -{ - struct tmio_nand *tmio = platform_get_drvdata(dev); - struct nand_chip *chip = &tmio->chip; - int ret; - - ret = mtd_device_unregister(nand_to_mtd(chip)); - WARN_ON(ret); - nand_cleanup(chip); - tmio_hw_stop(dev, tmio); - return 0; -} - -#ifdef CONFIG_PM -static int tmio_suspend(struct platform_device *dev, pm_message_t state) -{ - const struct mfd_cell *cell = mfd_get_cell(dev); - - if (cell->suspend) - cell->suspend(dev); - - tmio_hw_stop(dev, platform_get_drvdata(dev)); - return 0; -} - -static int tmio_resume(struct platform_device *dev) -{ - const struct mfd_cell *cell = mfd_get_cell(dev); - - /* FIXME - is this required or merely another attack of the broken - * SHARP platform? Looks suspicious. - */ - tmio_hw_init(dev, platform_get_drvdata(dev)); - - if (cell->resume) - cell->resume(dev); - - return 0; -} -#else -#define tmio_suspend NULL -#define tmio_resume NULL -#endif - -static struct platform_driver tmio_driver = { - .driver.name = "tmio-nand", - .driver.owner = THIS_MODULE, - .probe = tmio_probe, - .remove = tmio_remove, - .suspend = tmio_suspend, - .resume = tmio_resume, -}; - -module_platform_driver(tmio_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov"); -MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller"); -MODULE_ALIAS("platform:tmio-nand"); diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c index a2b89b75073f..b643332ea1ff 100644 --- a/drivers/mtd/nand/raw/vf610_nfc.c +++ b/drivers/mtd/nand/raw/vf610_nfc.c @@ -206,7 +206,7 @@ static inline bool vf610_nfc_kernel_is_little_endian(void) #endif } -/** +/* * Read accessor for internal SRAM buffer * @dst: destination address in regular memory * @src: source address in SRAM buffer @@ -241,7 +241,7 @@ static inline void vf610_nfc_rd_from_sram(void *dst, const void __iomem *src, } } -/** +/* * Write accessor for internal SRAM buffer * @dst: destination address in SRAM buffer * @src: source address in regular memory diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile index b520fe634041..4ec973b8b6bf 100644 --- a/drivers/mtd/nand/spi/Makefile +++ b/drivers/mtd/nand/spi/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -spinand-objs := core.o ato.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o +spinand-objs := core.o alliancememory.o ato.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o obj-$(CONFIG_MTD_SPI_NAND) += spinand.o diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c new file mode 100644 index 000000000000..7936ea546b03 --- /dev/null +++ b/drivers/mtd/nand/spi/alliancememory.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Mario Kicherer <dev@kicherer.org> + */ + +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/mtd/spinand.h> + +#define SPINAND_MFR_ALLIANCEMEMORY 0x52 + +#define AM_STATUS_ECC_BITMASK (3 << 4) + +#define AM_STATUS_ECC_NONE_DETECTED (0 << 4) +#define AM_STATUS_ECC_CORRECTED (1 << 4) +#define AM_STATUS_ECC_ERRORED (2 << 4) +#define AM_STATUS_ECC_MAX_CORRECTED (3 << 4) + +static SPINAND_OP_VARIANTS(read_cache_variants, + SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); + +static SPINAND_OP_VARIANTS(write_cache_variants, + SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), + SPINAND_PROG_LOAD(true, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(update_cache_variants, + SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), + SPINAND_PROG_LOAD(false, 0, NULL, 0)); + +static int am_get_eccsize(struct mtd_info *mtd) +{ + if (mtd->oobsize == 64) + return 0x20; + else if (mtd->oobsize == 128) + return 0x38; + else if (mtd->oobsize == 256) + return 0x70; + else + return -EINVAL; +} + +static int am_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + int ecc_bytes; + + ecc_bytes = am_get_eccsize(mtd); + if (ecc_bytes < 0) + return ecc_bytes; + + region->offset = mtd->oobsize - ecc_bytes; + region->length = ecc_bytes; + + return 0; +} + +static int am_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + int ecc_bytes; + + if (section) + return -ERANGE; + + ecc_bytes = am_get_eccsize(mtd); + if (ecc_bytes < 0) + return ecc_bytes; + + /* + * It is unclear how many bytes are used for the bad block marker. We + * reserve the common two bytes here. + * + * The free area in this kind of flash is divided into chunks where the + * first 4 bytes of each chunk are unprotected. The number of chunks + * depends on the specific model. The models with 4096+256 bytes pages + * have 8 chunks, the others 4 chunks. + */ + + region->offset = 2; + region->length = mtd->oobsize - 2 - ecc_bytes; + + return 0; +} + +static const struct mtd_ooblayout_ops am_ooblayout = { + .ecc = am_ooblayout_ecc, + .free = am_ooblayout_free, +}; + +static int am_ecc_get_status(struct spinand_device *spinand, u8 status) +{ + switch (status & AM_STATUS_ECC_BITMASK) { + case AM_STATUS_ECC_NONE_DETECTED: + return 0; + + case AM_STATUS_ECC_CORRECTED: + /* + * use oobsize to determine the flash model and the maximum of + * correctable errors and return maximum - 1 by convention + */ + if (spinand->base.mtd.oobsize == 64) + return 3; + else + return 7; + + case AM_STATUS_ECC_ERRORED: + return -EBADMSG; + + case AM_STATUS_ECC_MAX_CORRECTED: + /* + * use oobsize to determine the flash model and the maximum of + * correctable errors + */ + if (spinand->base.mtd.oobsize == 64) + return 4; + else + return 8; + + default: + break; + } + + return -EINVAL; +} + +static const struct spinand_info alliancememory_spinand_table[] = { + SPINAND_INFO("AS5F34G04SND", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x2f), + NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&am_ooblayout, + am_ecc_get_status)), +}; + +static const struct spinand_manufacturer_ops alliancememory_spinand_manuf_ops = { +}; + +const struct spinand_manufacturer alliancememory_spinand_manufacturer = { + .id = SPINAND_MFR_ALLIANCEMEMORY, + .name = "AllianceMemory", + .chips = alliancememory_spinand_table, + .nchips = ARRAY_SIZE(alliancememory_spinand_table), + .ops = &alliancememory_spinand_manuf_ops, +}; diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index dacd9c0e8b20..638391f77d8c 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -937,6 +937,7 @@ static const struct nand_ops spinand_ops = { }; static const struct spinand_manufacturer *spinand_manufacturers[] = { + &alliancememory_spinand_manufacturer, &ato_spinand_manufacturer, &gigadevice_spinand_manufacturer, ¯onix_spinand_manufacturer, diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index dce835132a1e..722a9738ba37 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -83,9 +83,10 @@ static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, * in order to avoid forcing the wear-leveling layer to move * data around if it's not necessary. */ - if (mx35lf1ge4ab_get_eccsr(spinand, &eccsr)) + if (mx35lf1ge4ab_get_eccsr(spinand, spinand->scratchbuf)) return nanddev_get_ecc_conf(nand)->strength; + eccsr = *spinand->scratchbuf; if (WARN_ON(eccsr > nanddev_get_ecc_conf(nand)->strength || !eccsr)) return nanddev_get_ecc_conf(nand)->strength; diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c index 192190c42fc8..e7b8e9d0a910 100644 --- a/drivers/mtd/parsers/ofpart_core.c +++ b/drivers/mtd/parsers/ofpart_core.c @@ -122,6 +122,25 @@ static int parse_fixed_partitions(struct mtd_info *master, a_cells = of_n_addr_cells(pp); s_cells = of_n_size_cells(pp); + if (!dedicated && s_cells == 0) { + /* + * This is a ugly workaround to not create + * regression on devices that are still creating + * partitions as direct children of the nand controller. + * This can happen in case the nand controller node has + * #size-cells equal to 0 and the firmware (e.g. + * U-Boot) just add the partitions there assuming + * 32-bit addressing. + * + * If you get this warning your firmware and/or DTS + * should be really fixed. + * + * This is working only for devices smaller than 4GiB. + */ + pr_warn("%s: ofpart partition %pOF (%pOF) #size-cells is wrongly set to <0>, assuming <1> for parsing partitions.\n", + master->name, pp, mtd_node); + s_cells = 1; + } if (len / 4 != a_cells + s_cells) { pr_debug("%s: ofpart partition %pOF (%pOF) error parsing reg property.\n", master->name, pp, diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index d67c926bca8b..522d375aeccf 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -9,19 +9,18 @@ #include <linux/err.h> #include <linux/errno.h> -#include <linux/module.h> #include <linux/delay.h> #include <linux/device.h> -#include <linux/mutex.h> #include <linux/math64.h> -#include <linux/sizes.h> -#include <linux/slab.h> - +#include <linux/module.h> #include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> +#include <linux/mutex.h> #include <linux/of_platform.h> #include <linux/sched/task_stack.h> +#include <linux/sizes.h> +#include <linux/slab.h> #include <linux/spi/flash.h> -#include <linux/mtd/spi-nor.h> #include "core.h" @@ -2027,6 +2026,15 @@ void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, } /** + * spi_nor_mask_erase_type() - mask out a SPI NOR erase type + * @erase: pointer to a structure that describes a SPI NOR erase type + */ +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase) +{ + erase->size = 0; +} + +/** * spi_nor_init_uniform_erase_map() - Initialize uniform erase map * @map: the erase map of the SPI NOR * @erase_mask: bitmask encoding erase types that can erase the entire @@ -3335,7 +3343,19 @@ static struct spi_mem_driver spi_nor_driver = { .remove = spi_nor_remove, .shutdown = spi_nor_shutdown, }; -module_spi_mem_driver(spi_nor_driver); + +static int __init spi_nor_module_init(void) +{ + return spi_mem_driver_register(&spi_nor_driver); +} +module_init(spi_nor_module_init); + +static void __exit spi_nor_module_exit(void) +{ + spi_mem_driver_unregister(&spi_nor_driver); + spi_nor_debugfs_shutdown(); +} +module_exit(spi_nor_module_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index f03b55cf7e6f..e0cc42a4a0c8 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -529,33 +529,30 @@ struct flash_info { const struct spi_nor_fixups *fixups; }; +#define SPI_NOR_ID_2ITEMS(_id) ((_id) >> 8) & 0xff, (_id) & 0xff +#define SPI_NOR_ID_3ITEMS(_id) ((_id) >> 16) & 0xff, SPI_NOR_ID_2ITEMS(_id) + +#define SPI_NOR_ID(_jedec_id, _ext_id) \ + .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_2ITEMS(_ext_id) }, \ + .id_len = !(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0)) + +#define SPI_NOR_ID6(_jedec_id, _ext_id) \ + .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_3ITEMS(_ext_id) }, \ + .id_len = 6 + +#define SPI_NOR_GEOMETRY(_sector_size, _n_sectors) \ + .sector_size = (_sector_size), \ + .n_sectors = (_n_sectors), \ + .page_size = 256 + /* Used when the "_ext_id" is two bytes at most */ #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors) \ - .id = { \ - ((_jedec_id) >> 16) & 0xff, \ - ((_jedec_id) >> 8) & 0xff, \ - (_jedec_id) & 0xff, \ - ((_ext_id) >> 8) & 0xff, \ - (_ext_id) & 0xff, \ - }, \ - .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \ - .sector_size = (_sector_size), \ - .n_sectors = (_n_sectors), \ - .page_size = 256, \ + SPI_NOR_ID((_jedec_id), (_ext_id)), \ + SPI_NOR_GEOMETRY((_sector_size), (_n_sectors)), #define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors) \ - .id = { \ - ((_jedec_id) >> 16) & 0xff, \ - ((_jedec_id) >> 8) & 0xff, \ - (_jedec_id) & 0xff, \ - ((_ext_id) >> 16) & 0xff, \ - ((_ext_id) >> 8) & 0xff, \ - (_ext_id) & 0xff, \ - }, \ - .id_len = 6, \ - .sector_size = (_sector_size), \ - .n_sectors = (_n_sectors), \ - .page_size = 256, \ + SPI_NOR_ID6((_jedec_id), (_ext_id)), \ + SPI_NOR_GEOMETRY((_sector_size), (_n_sectors)), #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \ .sector_size = (_sector_size), \ @@ -684,6 +681,7 @@ void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, u8 opcode); +void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase); struct spi_nor_erase_region * spi_nor_region_next(struct spi_nor_erase_region *region); void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, @@ -713,8 +711,10 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) #ifdef CONFIG_DEBUG_FS void spi_nor_debugfs_register(struct spi_nor *nor); +void spi_nor_debugfs_shutdown(void); #else static inline void spi_nor_debugfs_register(struct spi_nor *nor) {} +static inline void spi_nor_debugfs_shutdown(void) {} #endif #endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */ diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c index ff895f6758ea..fc7ad203df12 100644 --- a/drivers/mtd/spi-nor/debugfs.c +++ b/drivers/mtd/spi-nor/debugfs.c @@ -1,9 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/debugfs.h> #include <linux/mtd/spi-nor.h> #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> -#include <linux/debugfs.h> #include "core.h" @@ -226,13 +226,13 @@ static void spi_nor_debugfs_unregister(void *data) nor->debugfs_root = NULL; } +static struct dentry *rootdir; + void spi_nor_debugfs_register(struct spi_nor *nor) { - struct dentry *rootdir, *d; + struct dentry *d; int ret; - /* Create rootdir once. Will never be deleted again. */ - rootdir = debugfs_lookup(SPI_NOR_DEBUGFS_ROOT, NULL); if (!rootdir) rootdir = debugfs_create_dir(SPI_NOR_DEBUGFS_ROOT, NULL); @@ -247,3 +247,8 @@ void spi_nor_debugfs_register(struct spi_nor *nor) debugfs_create_file("capabilities", 0444, d, nor, &spi_nor_capabilities_fops); } + +void spi_nor_debugfs_shutdown(void) +{ + debugfs_remove(rootdir); +} diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c index a0ddad2afffc..400e2b42f45a 100644 --- a/drivers/mtd/spi-nor/issi.c +++ b/drivers/mtd/spi-nor/issi.c @@ -18,7 +18,7 @@ is25lp256_post_bfpt_fixups(struct spi_nor *nor, * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY. * Overwrite the number of address bytes advertised by the BFPT. */ - if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) == + if ((bfpt->dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) == BFPT_DWORD1_ADDRESS_BYTES_3_ONLY) nor->params->addr_nbytes = 4; diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index d81a4cb2812b..6853ec9ae65d 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -22,7 +22,7 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor, * seems that the F version advertises support for Fast Read 4-4-4 in * its BFPT table. */ - if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4) + if (bfpt->dwords[SFDP_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4) nor->flags |= SNOR_F_4B_OPCODES; return 0; diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index 8434f654eca1..298ab5e53a8c 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -5,9 +5,9 @@ */ #include <linux/bitfield.h> +#include <linux/mtd/spi-nor.h> #include <linux/slab.h> #include <linux/sort.h> -#include <linux/mtd/spi-nor.h> #include "core.h" @@ -242,64 +242,64 @@ static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { /* Fast Read 1-1-2 */ { SNOR_HWCAPS_READ_1_1_2, - BFPT_DWORD(1), BIT(16), /* Supported bit */ - BFPT_DWORD(4), 0, /* Settings */ + SFDP_DWORD(1), BIT(16), /* Supported bit */ + SFDP_DWORD(4), 0, /* Settings */ SNOR_PROTO_1_1_2, }, /* Fast Read 1-2-2 */ { SNOR_HWCAPS_READ_1_2_2, - BFPT_DWORD(1), BIT(20), /* Supported bit */ - BFPT_DWORD(4), 16, /* Settings */ + SFDP_DWORD(1), BIT(20), /* Supported bit */ + SFDP_DWORD(4), 16, /* Settings */ SNOR_PROTO_1_2_2, }, /* Fast Read 2-2-2 */ { SNOR_HWCAPS_READ_2_2_2, - BFPT_DWORD(5), BIT(0), /* Supported bit */ - BFPT_DWORD(6), 16, /* Settings */ + SFDP_DWORD(5), BIT(0), /* Supported bit */ + SFDP_DWORD(6), 16, /* Settings */ SNOR_PROTO_2_2_2, }, /* Fast Read 1-1-4 */ { SNOR_HWCAPS_READ_1_1_4, - BFPT_DWORD(1), BIT(22), /* Supported bit */ - BFPT_DWORD(3), 16, /* Settings */ + SFDP_DWORD(1), BIT(22), /* Supported bit */ + SFDP_DWORD(3), 16, /* Settings */ SNOR_PROTO_1_1_4, }, /* Fast Read 1-4-4 */ { SNOR_HWCAPS_READ_1_4_4, - BFPT_DWORD(1), BIT(21), /* Supported bit */ - BFPT_DWORD(3), 0, /* Settings */ + SFDP_DWORD(1), BIT(21), /* Supported bit */ + SFDP_DWORD(3), 0, /* Settings */ SNOR_PROTO_1_4_4, }, /* Fast Read 4-4-4 */ { SNOR_HWCAPS_READ_4_4_4, - BFPT_DWORD(5), BIT(4), /* Supported bit */ - BFPT_DWORD(7), 16, /* Settings */ + SFDP_DWORD(5), BIT(4), /* Supported bit */ + SFDP_DWORD(7), 16, /* Settings */ SNOR_PROTO_4_4_4, }, }; static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { /* Erase Type 1 in DWORD8 bits[15:0] */ - {BFPT_DWORD(8), 0}, + {SFDP_DWORD(8), 0}, /* Erase Type 2 in DWORD8 bits[31:16] */ - {BFPT_DWORD(8), 16}, + {SFDP_DWORD(8), 16}, /* Erase Type 3 in DWORD9 bits[15:0] */ - {BFPT_DWORD(9), 0}, + {SFDP_DWORD(9), 0}, /* Erase Type 4 in DWORD9 bits[31:16] */ - {BFPT_DWORD(9), 16}, + {SFDP_DWORD(9), 16}, }; /** @@ -458,7 +458,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX); /* Number of address bytes. */ - switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { + switch (bfpt.dwords[SFDP_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: params->addr_nbytes = 3; @@ -475,7 +475,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, } /* Flash Memory Density (in bits). */ - val = bfpt.dwords[BFPT_DWORD(2)]; + val = bfpt.dwords[SFDP_DWORD(2)]; if (val & BIT(31)) { val &= ~BIT(31); @@ -555,13 +555,13 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ - val = bfpt.dwords[BFPT_DWORD(11)]; + val = bfpt.dwords[SFDP_DWORD(11)]; val &= BFPT_DWORD11_PAGE_SIZE_MASK; val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; params->page_size = 1U << val; /* Quad Enable Requirements. */ - switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { + switch (bfpt.dwords[SFDP_DWORD(15)] & BFPT_DWORD15_QER_MASK) { case BFPT_DWORD15_QER_NONE: params->quad_enable = NULL; break; @@ -608,7 +608,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, } /* Soft Reset support. */ - if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST) + if (bfpt.dwords[SFDP_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST) nor->flags |= SNOR_F_SOFT_RESET; /* Stop here if not JESD216 rev C or later. */ @@ -616,7 +616,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); /* 8D-8D-8D command extension. */ - switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { + switch (bfpt.dwords[SFDP_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { case BFPT_DWORD18_CMD_EXT_REP: nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; break; @@ -875,7 +875,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, */ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) if (!(regions_erase_type & BIT(erase[i].idx))) - spi_nor_set_erase_type(&erase[i], 0, 0xFF); + spi_nor_mask_erase_type(&erase[i]); return 0; } @@ -1004,7 +1004,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, discard_hwcaps |= read->hwcaps; if ((params->hwcaps.mask & read->hwcaps) && - (dwords[0] & read->supported_bit)) + (dwords[SFDP_DWORD(1)] & read->supported_bit)) read_hwcaps |= read->hwcaps; } @@ -1023,7 +1023,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, * authority for specifying Page Program support. */ discard_hwcaps |= program->hwcaps; - if (dwords[0] & program->supported_bit) + if (dwords[SFDP_DWORD(1)] & program->supported_bit) pp_hwcaps |= program->hwcaps; } @@ -1035,7 +1035,7 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { const struct sfdp_4bait *erase = &erases[i]; - if (dwords[0] & erase->supported_bit) + if (dwords[SFDP_DWORD(1)] & erase->supported_bit) erase_mask |= BIT(i); } @@ -1086,10 +1086,10 @@ static int spi_nor_parse_4bait(struct spi_nor *nor, for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { if (erase_mask & BIT(i)) - erase_type[i].opcode = (dwords[1] >> + erase_type[i].opcode = (dwords[SFDP_DWORD(2)] >> erase_type[i].idx * 8) & 0xFF; else - spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF); + spi_nor_mask_erase_type(&erase_type[i]); } /* @@ -1145,15 +1145,15 @@ static int spi_nor_parse_profile1(struct spi_nor *nor, le32_to_cpu_array(dwords, profile1_header->length); /* Get 8D-8D-8D fast read opcode and dummy cycles. */ - opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]); + opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[SFDP_DWORD(1)]); /* Set the Read Status Register dummy cycles and dummy address bytes. */ - if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY) + if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_DUMMY) nor->params->rdsr_dummy = 8; else nor->params->rdsr_dummy = 4; - if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES) + if (dwords[SFDP_DWORD(1)] & PROFILE1_DWORD1_RDSR_ADDR_BYTES) nor->params->rdsr_addr_nbytes = 4; else nor->params->rdsr_addr_nbytes = 0; @@ -1167,13 +1167,16 @@ static int spi_nor_parse_profile1(struct spi_nor *nor, * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let * flashes set the correct value if needed in their fixup hooks. */ - dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]); + dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[SFDP_DWORD(4)]); if (!dummy) - dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]); + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, + dwords[SFDP_DWORD(5)]); if (!dummy) - dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]); + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, + dwords[SFDP_DWORD(5)]); if (!dummy) - dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]); + dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, + dwords[SFDP_DWORD(5)]); if (!dummy) dev_dbg(nor->dev, "Can't find dummy cycles from Profile 1.0 table\n"); @@ -1228,7 +1231,8 @@ static int spi_nor_parse_sccr(struct spi_nor *nor, le32_to_cpu_array(dwords, sccr_header->length); - if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22])) + if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, + dwords[SFDP_DWORD(22)])) nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; out: diff --git a/drivers/mtd/spi-nor/sfdp.h b/drivers/mtd/spi-nor/sfdp.h index c1969f0a2f46..500659b35655 100644 --- a/drivers/mtd/spi-nor/sfdp.h +++ b/drivers/mtd/spi-nor/sfdp.h @@ -13,13 +13,12 @@ #define SFDP_JESD216A_MINOR 5 #define SFDP_JESD216B_MINOR 6 +/* SFDP DWORDS are indexed from 1 but C arrays are indexed from 0. */ +#define SFDP_DWORD(i) ((i) - 1) + /* Basic Flash Parameter Table */ -/* - * JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs. - * They are indexed from 1 but C arrays are indexed from 0. - */ -#define BFPT_DWORD(i) ((i) - 1) +/* JESD216 rev D defines a Basic Flash Parameter Table of 20 DWORDs. */ #define BFPT_DWORD_MAX 20 struct sfdp_bfpt { diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index b621cdfd506f..12a256c0ef4c 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -15,14 +15,19 @@ #define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ #define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ #define SPINOR_REG_CYPRESS_CFR1V 0x00800002 -#define SPINOR_REG_CYPRESS_CFR1V_QUAD_EN BIT(1) /* Quad Enable */ +#define SPINOR_REG_CYPRESS_CFR1_QUAD_EN BIT(1) /* Quad Enable */ #define SPINOR_REG_CYPRESS_CFR2V 0x00800003 -#define SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24 0xb +#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24 0xb #define SPINOR_REG_CYPRESS_CFR3V 0x00800004 -#define SPINOR_REG_CYPRESS_CFR3V_PGSZ BIT(4) /* Page size. */ +#define SPINOR_REG_CYPRESS_CFR3_PGSZ BIT(4) /* Page size. */ #define SPINOR_REG_CYPRESS_CFR5V 0x00800006 -#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN 0x3 -#define SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS 0 +#define SPINOR_REG_CYPRESS_CFR5_BIT6 BIT(6) +#define SPINOR_REG_CYPRESS_CFR5_DDR BIT(1) +#define SPINOR_REG_CYPRESS_CFR5_OPI BIT(0) +#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN \ + (SPINOR_REG_CYPRESS_CFR5_BIT6 | SPINOR_REG_CYPRESS_CFR5_DDR | \ + SPINOR_REG_CYPRESS_CFR5_OPI) +#define SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS SPINOR_REG_CYPRESS_CFR5_BIT6 #define SPINOR_OP_CYPRESS_RD_FAST 0xee /* Cypress SPI NOR flash operations. */ @@ -52,7 +57,7 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor) u8 addr_mode_nbytes = nor->params->addr_mode_nbytes; /* Use 24 dummy cycles for memory array reads. */ - *buf = SPINOR_REG_CYPRESS_CFR2V_MEMLAT_11_24; + *buf = SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24; op = (struct spi_mem_op) CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, SPINOR_REG_CYPRESS_CFR2V, 1, buf); @@ -64,7 +69,7 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor) nor->read_dummy = 24; /* Set the octal and DTR enable bits. */ - buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_EN; + buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN; op = (struct spi_mem_op) CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, SPINOR_REG_CYPRESS_CFR5V, 1, buf); @@ -98,7 +103,7 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor) * in 8D-8D-8D mode. Since there is no register at the next location, * just initialize the value to 0 and let the transaction go on. */ - buf[0] = SPINOR_REG_CYPRESS_CFR5V_OCT_DTR_DS; + buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS; buf[1] = 0; op = (struct spi_mem_op) CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes, @@ -150,11 +155,11 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor) if (ret) return ret; - if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1V_QUAD_EN) + if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR1_QUAD_EN) return 0; /* Update the Quad Enable bit. */ - nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1V_QUAD_EN; + nor->bouncebuf[0] |= SPINOR_REG_CYPRESS_CFR1_QUAD_EN; op = (struct spi_mem_op) CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, SPINOR_REG_CYPRESS_CFR1V, 1, @@ -205,7 +210,7 @@ static int cypress_nor_set_page_size(struct spi_nor *nor) if (ret) return ret; - if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3V_PGSZ) + if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3_PGSZ) nor->params->page_size = 512; else nor->params->page_size = 256; diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 75eaecc8639f..3711d7f74600 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -35,7 +35,6 @@ #include <linux/mutex.h> #include <linux/slab.h> #include <linux/mtd/ubi.h> -#include <linux/workqueue.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/hdreg.h> @@ -62,7 +61,6 @@ struct ubiblock_param { }; struct ubiblock_pdu { - struct work_struct work; struct ubi_sgl usgl; }; @@ -82,8 +80,6 @@ struct ubiblock { struct gendisk *gd; struct request_queue *rq; - struct workqueue_struct *wq; - struct mutex dev_mutex; struct list_head list; struct blk_mq_tag_set tag_set; @@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id) return NULL; } -static int ubiblock_read(struct ubiblock_pdu *pdu) +static blk_status_t ubiblock_read(struct request *req) { - int ret, leb, offset, bytes_left, to_read; - u64 pos; - struct request *req = blk_mq_rq_from_pdu(pdu); + struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); struct ubiblock *dev = req->q->queuedata; + u64 pos = blk_rq_pos(req) << 9; + int to_read = blk_rq_bytes(req); + int bytes_left = to_read; + /* Get LEB:offset address to read from */ + int offset = do_div(pos, dev->leb_size); + int leb = pos; + struct req_iterator iter; + struct bio_vec bvec; + int ret; - to_read = blk_rq_bytes(req); - pos = blk_rq_pos(req) << 9; + blk_mq_start_request(req); - /* Get LEB:offset address to read from */ - offset = do_div(pos, dev->leb_size); - leb = pos; - bytes_left = to_read; + /* + * It is safe to ignore the return value of blk_rq_map_sg() because + * the number of sg entries is limited to UBI_MAX_SG_COUNT + * and ubi_read_sg() will check that limit. + */ + ubi_sgl_init(&pdu->usgl); + blk_rq_map_sg(req->q, req, pdu->usgl.sg); while (bytes_left) { /* @@ -206,14 +211,20 @@ static int ubiblock_read(struct ubiblock_pdu *pdu) ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read); if (ret < 0) - return ret; + break; bytes_left -= to_read; to_read = bytes_left; leb += 1; offset = 0; } - return 0; + + rq_for_each_segment(bvec, req, iter) + flush_dcache_page(bvec.bv_page); + + blk_mq_end_request(req, errno_to_blk_status(ret)); + + return BLK_STS_OK; } static int ubiblock_open(struct block_device *bdev, fmode_t mode) @@ -289,47 +300,15 @@ static const struct block_device_operations ubiblock_ops = { .getgeo = ubiblock_getgeo, }; -static void ubiblock_do_work(struct work_struct *work) -{ - int ret; - struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work); - struct request *req = blk_mq_rq_from_pdu(pdu); - struct req_iterator iter; - struct bio_vec bvec; - - blk_mq_start_request(req); - - /* - * It is safe to ignore the return value of blk_rq_map_sg() because - * the number of sg entries is limited to UBI_MAX_SG_COUNT - * and ubi_read_sg() will check that limit. - */ - blk_rq_map_sg(req->q, req, pdu->usgl.sg); - - ret = ubiblock_read(pdu); - - rq_for_each_segment(bvec, req, iter) - flush_dcache_page(bvec.bv_page); - - blk_mq_end_request(req, errno_to_blk_status(ret)); -} - static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { - struct request *req = bd->rq; - struct ubiblock *dev = hctx->queue->queuedata; - struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); - - switch (req_op(req)) { + switch (req_op(bd->rq)) { case REQ_OP_READ: - ubi_sgl_init(&pdu->usgl); - queue_work(dev->wq, &pdu->work); - return BLK_STS_OK; + return ubiblock_read(bd->rq); default: return BLK_STS_IOERR; } - } static int ubiblock_init_request(struct blk_mq_tag_set *set, @@ -339,8 +318,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set, struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req); sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT); - INIT_WORK(&pdu->work, ubiblock_do_work); - return 0; } @@ -354,9 +331,12 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity) u64 size = vi->used_bytes >> 9; if (vi->used_bytes % 512) { - pr_warn("UBI: block: volume size is not a multiple of 512, " - "last %llu bytes are ignored!\n", - vi->used_bytes - (size << 9)); + if (vi->vol_type == UBI_DYNAMIC_VOLUME) + pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n", + vi->used_bytes - (size << 9)); + else + pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n", + vi->used_bytes - (size << 9)); } if ((sector_t)size != size) @@ -401,7 +381,7 @@ int ubiblock_create(struct ubi_volume_info *vi) dev->tag_set.ops = &ubiblock_mq_ops; dev->tag_set.queue_depth = 64; dev->tag_set.numa_node = NUMA_NO_NODE; - dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu); dev->tag_set.driver_data = dev; dev->tag_set.nr_hw_queues = 1; @@ -439,32 +419,20 @@ int ubiblock_create(struct ubi_volume_info *vi) dev->rq = gd->queue; blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT); - /* - * Create one workqueue per volume (per registered block device). - * Remember workqueues are cheap, they're not threads. - */ - dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); - if (!dev->wq) { - ret = -ENOMEM; - goto out_remove_minor; - } - list_add_tail(&dev->list, &ubiblock_devices); /* Must be the last step: anyone can call file ops from now on */ - ret = add_disk(dev->gd); + ret = device_add_disk(vi->dev, dev->gd, NULL); if (ret) - goto out_destroy_wq; + goto out_remove_minor; dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)", dev->ubi_num, dev->vol_id, vi->name); mutex_unlock(&devices_mutex); return 0; -out_destroy_wq: - list_del(&dev->list); - destroy_workqueue(dev->wq); out_remove_minor: + list_del(&dev->list); idr_remove(&ubiblock_minor_idr, gd->first_minor); out_cleanup_disk: put_disk(dev->gd); @@ -482,8 +450,6 @@ static void ubiblock_cleanup(struct ubiblock *dev) { /* Stop new requests to arrive */ del_gendisk(dev->gd); - /* Flush pending work */ - destroy_workqueue(dev->wq); /* Finally destroy the blk queue */ dev_info(disk_to_dev(dev->gd), "released"); put_disk(dev->gd); diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index a901f8edfa41..0904eb40c95f 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -35,7 +35,7 @@ #define MTD_PARAM_LEN_MAX 64 /* Maximum number of comma-separated items in the 'mtd=' parameter */ -#define MTD_PARAM_MAX_COUNT 4 +#define MTD_PARAM_MAX_COUNT 5 /* Maximum value for the number of bad PEBs per 1024 PEBs */ #define MAX_MTD_UBI_BEB_LIMIT 768 @@ -53,12 +53,14 @@ * @ubi_num: UBI number * @vid_hdr_offs: VID header offset * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs + * @enable_fm: enable fastmap when value is non-zero */ struct mtd_dev_param { char name[MTD_PARAM_LEN_MAX]; int ubi_num; int vid_hdr_offs; int max_beb_per1024; + int enable_fm; }; /* Numbers of elements set in the @mtd_dev_param array */ @@ -468,6 +470,7 @@ static int uif_init(struct ubi_device *ubi) err = ubi_add_volume(ubi, ubi->volumes[i]); if (err) { ubi_err(ubi, "cannot add volume %d", i); + ubi->volumes[i] = NULL; goto out_volumes; } } @@ -663,6 +666,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); + if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) > + ubi->vid_hdr_alsize)) { + ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset); + return -EINVAL; + } + dbg_gen("min_io_size %d", ubi->min_io_size); dbg_gen("max_write_size %d", ubi->max_write_size); dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); @@ -906,6 +915,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, ubi->dev.release = dev_release; ubi->dev.class = &ubi_class; ubi->dev.groups = ubi_dev_groups; + ubi->dev.parent = &mtd->dev; ubi->mtd = mtd; ubi->ubi_num = ubi_num; @@ -1248,7 +1258,7 @@ static int __init ubi_init(void) mutex_lock(&ubi_devices_mutex); err = ubi_attach_mtd_dev(mtd, p->ubi_num, p->vid_hdr_offs, p->max_beb_per1024, - false); + p->enable_fm == 0 ? true : false); mutex_unlock(&ubi_devices_mutex); if (err < 0) { pr_err("UBI error: cannot attach mtd%d\n", @@ -1427,7 +1437,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp) int err = kstrtoint(token, 10, &p->max_beb_per1024); if (err) { - pr_err("UBI error: bad value for max_beb_per1024 parameter: %s", + pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n", token); return -EINVAL; } @@ -1438,13 +1448,25 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp) int err = kstrtoint(token, 10, &p->ubi_num); if (err) { - pr_err("UBI error: bad value for ubi_num parameter: %s", + pr_err("UBI error: bad value for ubi_num parameter: %s\n", token); return -EINVAL; } } else p->ubi_num = UBI_DEV_NUM_AUTO; + token = tokens[4]; + if (token) { + int err = kstrtoint(token, 10, &p->enable_fm); + + if (err) { + pr_err("UBI error: bad value for enable_fm parameter: %s\n", + token); + return -EINVAL; + } + } else + p->enable_fm = 0; + mtd_devs += 1; return 0; } @@ -1457,11 +1479,13 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value (" __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n" "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n" + "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n" "\n" "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n" "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n" "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n" "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n" + "example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n" "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device)."); #ifdef CONFIG_MTD_UBI_FASTMAP module_param(fm_autoconvert, bool, 0644); diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index fcca6942dbdd..27168f511d6d 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -504,6 +504,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) { unsigned long ubi_num = ubi->ubi_num; struct ubi_debug_info *d = &ubi->dbg; + umode_t mode = S_IRUSR | S_IWUSR; int n; if (!IS_ENABLED(CONFIG_DEBUG_FS)) @@ -518,41 +519,41 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi) d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir); - d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir, + d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); - d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir, + d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); - d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR, + d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); - d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR, + d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips", - S_IWUSR, d->dfs_dir, + mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures", - S_IWUSR, d->dfs_dir, + mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut", - S_IWUSR, d->dfs_dir, + mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min", - S_IWUSR, d->dfs_dir, + mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max", - S_IWUSR, d->dfs_dir, + mode, d->dfs_dir, (void *)ubi_num, &dfs_fops); debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir, diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 09c408c45a62..403b79d6efd5 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -61,7 +61,7 @@ struct ubi_eba_table { }; /** - * next_sqnum - get next sequence number. + * ubi_next_sqnum - get next sequence number. * @ubi: UBI device description object * * This function returns next sequence number to use, which is just the current diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 0ee452275578..863f571f1adb 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi) if (ubi->fm_anchor) { wl_tree_add(ubi->fm_anchor, &ubi->free); ubi->free_count++; + ubi->fm_anchor = NULL; } - /* - * All available PEBs are in ubi->free, now is the time to get - * the best anchor PEBs. - */ - ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); + if (!ubi->fm_disabled) + /* + * All available PEBs are in ubi->free, now is the time to get + * the best anchor PEBs. + */ + ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); for (;;) { enough = 0; diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index ca2d9efe62c3..28c8151a0725 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -93,7 +93,7 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi) /** - * new_fm_vhdr - allocate a new volume header for fastmap usage. + * new_fm_vbuf() - allocate a new volume header for fastmap usage. * @ubi: UBI device description object * @vol_id: the VID of the new header * diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 0fce99ff29b5..5db653eacbd4 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -79,6 +79,7 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, vi->name_len = vol->name_len; vi->name = vol->name; vi->cdev = vol->cdev.dev; + vi->dev = &vol->dev; } /** diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index 7b30c8ee3e82..1794d66b6eb7 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c @@ -10,7 +10,7 @@ #include "ubi.h" /** - * calc_data_len - calculate how much real data is stored in a buffer. + * ubi_calc_data_len - calculate how much real data is stored in a buffer. * @ubi: UBI device description object * @buf: a buffer with the contents of the physical eraseblock * @length: the buffer length diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 8fcc0bdf0635..2c867d16f89f 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) for (i = 0; i < -pebs; i++) { err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); if (err) - goto out_acc; + goto out_free; } spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs += pebs; @@ -512,8 +512,10 @@ out_acc: ubi->avail_pebs += pebs; spin_unlock(&ubi->volumes_lock); } + return err; + out_free: - kfree(new_eba_tbl); + ubi_eba_destroy_table(new_eba_tbl); return err; } @@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) if (err) { ubi_err(ubi, "cannot add character device for volume %d, error %d", vol_id, err); + vol_release(&vol->dev); return err; } @@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) vol->dev.groups = volume_dev_groups; dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); - if (err) - goto out_cdev; + if (err) { + cdev_del(&vol->cdev); + put_device(&vol->dev); + return err; + } self_check_volumes(ubi); return err; - -out_cdev: - cdev_del(&vol->cdev); - return err; } /** diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 68eb0f21b3fe..40f39e5d6dfc 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -165,7 +165,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) } /** - * wl_tree_destroy - destroy a wear-leveling entry. + * wl_entry_destroy - destroy a wear-leveling entry. * @ubi: UBI device description object * @e: the wear-leveling entry to add * @@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = do_sync_erase(ubi, e1, vol_id, lnum, 0); if (err) { - if (e2) + if (e2) { + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e2); + spin_unlock(&ubi->wl_lock); + } goto out_ro; } @@ -973,11 +976,11 @@ out_error: spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; + wl_entry_destroy(ubi, e1); + wl_entry_destroy(ubi, e2); spin_unlock(&ubi->wl_lock); ubi_free_vid_buf(vidb); - wl_entry_destroy(ubi, e1); - wl_entry_destroy(ubi, e2); out_ro: ubi_ro_mode(ubi); @@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) /* Re-schedule the LEB for erasure */ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); if (err1) { + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e); + spin_unlock(&ubi->wl_lock); err = err1; goto out_ro; } return err; } + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e); + spin_unlock(&ubi->wl_lock); if (err != -EIO) /* * If this is not %-EIO, we have no idea what to do. Scheduling @@ -1253,6 +1260,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; + if (!e) { + /* + * This wl entry has been removed for some errors by other + * process (eg. wear leveling worker), corresponding process + * (except __erase_worker, which cannot concurrent with + * ubi_wl_put_peb) will set ubi ro_mode at the same time, + * just ignore this wl entry. + */ + spin_unlock(&ubi->wl_lock); + up_read(&ubi->fm_protect); + return 0; + } if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to |