diff options
Diffstat (limited to 'drivers/mtd')
45 files changed, 2377 insertions, 369 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index ff2f9e55ef28..aed653ce8fa2 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -98,7 +98,7 @@ config MTD_MCHP48L640 config MTD_SPEAR_SMI tristate "SPEAR MTD NOR Support through SMI controller" depends on PLAT_SPEAR || COMPILE_TEST - default y + default PLAT_SPEAR help This enable SNOR support on SPEAR platforms using SMI controller diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c index 7584d0ba9396..4af9208f9690 100644 --- a/drivers/mtd/devices/mchp48l640.c +++ b/drivers/mtd/devices/mchp48l640.c @@ -23,6 +23,7 @@ #include <linux/spi/flash.h> #include <linux/spi/spi.h> #include <linux/of.h> +#include <linux/string_choices.h> struct mchp48_caps { unsigned int size; @@ -128,11 +129,11 @@ static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable) mutex_unlock(&flash->lock); if (ret) - dev_err(&flash->spi->dev, "write %sable failed ret: %d", - (enable ? "en" : "dis"), ret); + dev_err(&flash->spi->dev, "write %s failed ret: %d", + str_enable_disable(enable), ret); - dev_dbg(&flash->spi->dev, "write %sable success ret: %d", - (enable ? "en" : "dis"), ret); + dev_dbg(&flash->spi->dev, "write %s success ret: %d", + str_enable_disable(enable), ret); if (enable) return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true); diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 8c22064ead38..f2bd1984609c 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c @@ -344,7 +344,7 @@ static int erase_xfer(partition_t *part, return -ENOMEM; erase->addr = xfer->Offset; - erase->len = 1 << part->header.EraseUnitSize; + erase->len = 1ULL << part->header.EraseUnitSize; ret = mtd_erase(part->mbd.mtd, erase); if (!ret) { diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 724f917f91ba..5ba9a741f5ac 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -741,7 +741,9 @@ int add_mtd_device(struct mtd_info *mtd) mtd->dev.type = &mtd_devtype; mtd->dev.class = &mtd_class; mtd->dev.devt = MTD_DEVT(i); - dev_set_name(&mtd->dev, "mtd%d", i); + error = dev_set_name(&mtd->dev, "mtd%d", i); + if (error) + goto fail_devname; dev_set_drvdata(&mtd->dev, mtd); mtd_check_of_node(mtd); of_node_get(mtd_get_of_node(mtd)); @@ -790,6 +792,7 @@ fail_nvmem_add: device_unregister(&mtd->dev); fail_added: of_node_put(mtd_get_of_node(mtd)); +fail_devname: idr_remove(&mtd_idr, i); fail_locked: mutex_unlock(&mtd_table_mutex); @@ -1053,7 +1056,7 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, const struct mtd_partition *parts, int nr_parts) { - int ret; + int ret, err; mtd_set_dev_defaults(mtd); @@ -1105,8 +1108,11 @@ out: nvmem_unregister(mtd->otp_factory_nvmem); } - if (ret && device_is_registered(&mtd->dev)) - del_mtd_device(mtd); + if (ret && device_is_registered(&mtd->dev)) { + err = del_mtd_device(mtd); + if (err) + pr_err("Error when deleting MTD device (%d)\n", err); + } return ret; } diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 6811a714349d..994e8c51e674 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -690,10 +690,9 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types, parser = mtd_part_parser_get(*types); if (!parser && !request_module("%s", *types)) parser = mtd_part_parser_get(*types); - pr_debug("%s: got parser %s\n", master->name, - parser ? parser->name : NULL); if (!parser) continue; + pr_debug("%s: got parser %s\n", master->name, parser->name); ret = mtd_part_do_parse(parser, master, &pparts, data); if (ret <= 0) mtd_part_parser_put(parser); diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index da1586a36574..44913ff1bf12 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -3,6 +3,7 @@ nandcore-objs := core.o bbt.o obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o +obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o obj-y += onenand/ obj-y += raw/ diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c index 56b56f726b99..1bf9a5a64b87 100644 --- a/drivers/mtd/nand/ecc-mxic.c +++ b/drivers/mtd/nand/ecc-mxic.c @@ -614,7 +614,7 @@ static int mxic_ecc_finish_io_req_external(struct nand_device *nand, { struct mxic_ecc_engine *mxic = nand_to_mxic(nand); struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand); - int nents, step, ret; + int nents, step, ret = 0; if (req->mode == MTD_OPS_RAW) return 0; diff --git a/drivers/mtd/nand/qpic_common.c b/drivers/mtd/nand/qpic_common.c index e0ed25b5afea..8e604cc22ca3 100644 --- a/drivers/mtd/nand/qpic_common.c +++ b/drivers/mtd/nand/qpic_common.c @@ -57,14 +57,15 @@ qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc) bam_txn_buf += sizeof(*bam_txn); bam_txn->bam_ce = bam_txn_buf; - bam_txn_buf += - sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; + bam_txn->bam_ce_nitems = QPIC_PER_CW_CMD_ELEMENTS * num_cw; + bam_txn_buf += sizeof(*bam_txn->bam_ce) * bam_txn->bam_ce_nitems; bam_txn->cmd_sgl = bam_txn_buf; - bam_txn_buf += - sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; + bam_txn->cmd_sgl_nitems = QPIC_PER_CW_CMD_SGL * num_cw; + bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * bam_txn->cmd_sgl_nitems; bam_txn->data_sgl = bam_txn_buf; + bam_txn->data_sgl_nitems = QPIC_PER_CW_DATA_SGL * num_cw; init_completion(&bam_txn->txn_done); @@ -236,21 +237,26 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, int i, ret; struct bam_cmd_element *bam_ce_buffer; struct bam_transaction *bam_txn = nandc->bam_txn; + u32 offset; + + if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "CE"); + return -EINVAL; + } bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; /* fill the command desc */ for (i = 0; i < size; i++) { + offset = nandc->props->bam_offset + reg_off + 4 * i; if (read) bam_prep_ce(&bam_ce_buffer[i], - nandc_reg_phys(nandc, reg_off + 4 * i), - BAM_READ_COMMAND, + offset, BAM_READ_COMMAND, reg_buf_dma_addr(nandc, (__le32 *)vaddr + i)); else bam_prep_ce_le32(&bam_ce_buffer[i], - nandc_reg_phys(nandc, reg_off + 4 * i), - BAM_WRITE_COMMAND, + offset, BAM_WRITE_COMMAND, *((__le32 *)vaddr + i)); } @@ -258,6 +264,12 @@ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, /* use the separate sgl after this command */ if (flags & NAND_BAM_NEXT_SGL) { + if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", + "CMD sgl"); + return -EINVAL; + } + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; bam_ce_size = (bam_txn->bam_ce_pos - bam_txn->bam_ce_start) * @@ -297,10 +309,20 @@ int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, struct bam_transaction *bam_txn = nandc->bam_txn; if (read) { + if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "RX sgl"); + return -EINVAL; + } + sg_set_buf(&bam_txn->data_sgl[bam_txn->rx_sgl_pos], vaddr, size); bam_txn->rx_sgl_pos++; } else { + if (bam_txn->tx_sgl_pos >= bam_txn->data_sgl_nitems) { + dev_err(nandc->dev, "BAM %s array is full\n", "TX sgl"); + return -EINVAL; + } + sg_set_buf(&bam_txn->data_sgl[bam_txn->tx_sgl_pos], vaddr, size); bam_txn->tx_sgl_pos++; diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index b8035df8f732..4b99d9c422c3 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -34,7 +34,7 @@ config MTD_NAND_DENALI_DT config MTD_NAND_AMS_DELTA tristate "Amstrad E3 NAND controller" depends on MACH_AMS_DELTA || COMPILE_TEST - default y + default MACH_AMS_DELTA help Support for NAND flash on Amstrad E3 (Delta). @@ -462,6 +462,13 @@ config MTD_NAND_NUVOTON_MA35 Enables support for the NAND controller found on the Nuvoton MA35 series SoCs. +config MTD_NAND_LOONGSON1 + tristate "Loongson1 NAND controller" + depends on LOONGSON1_APB_DMA || COMPILE_TEST + select REGMAP_MMIO + help + Enables support for NAND controller on Loongson1 SoCs. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 99e79c448847..711d043ad4f8 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o obj-$(CONFIG_MTD_NAND_RENESAS) += renesas-nand-controller.o obj-$(CONFIG_MTD_NAND_NUVOTON_MA35) += nuvoton-ma35d1-nand-controller.o +obj-$(CONFIG_MTD_NAND_LOONGSON1) += loongson1-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index dedcca87defc..84ab4a83cbd6 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -373,7 +373,7 @@ static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc, dma_cookie_t cookie; buf_dma = dma_map_single(nc->dev, buf, len, dir); - if (dma_mapping_error(nc->dev, dev_dma)) { + if (dma_mapping_error(nc->dev, buf_dma)) { dev_err(nc->dev, "Failed to prepare a buffer for DMA access\n"); goto err; diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 3c7dee1be21d..0b402823b619 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -143,6 +143,7 @@ struct atmel_pmecc_caps { int nstrengths; int el_offset; bool correct_erased_chunks; + bool clk_ctrl; }; struct atmel_pmecc { @@ -843,6 +844,10 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev, if (IS_ERR(pmecc->regs.errloc)) return ERR_CAST(pmecc->regs.errloc); + /* pmecc data setup time */ + if (caps->clk_ctrl) + writel(PMECC_CLK_133MHZ, pmecc->regs.base + ATMEL_PMECC_CLK); + /* Disable all interrupts before registering the PMECC handler. */ writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); atmel_pmecc_reset(pmecc); @@ -896,6 +901,7 @@ static struct atmel_pmecc_caps at91sam9g45_caps = { .strengths = atmel_pmecc_strengths, .nstrengths = 5, .el_offset = 0x8c, + .clk_ctrl = true, }; static struct atmel_pmecc_caps sama5d4_caps = { diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c index 6487dfc64258..e532c3535b16 100644 --- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c +++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c @@ -171,6 +171,7 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip, { struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip); u32 code = 0; + int rc; if (cmd == NAND_CMD_NONE) return; @@ -182,7 +183,9 @@ static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct nand_chip *nand_chip, if (cmd != NAND_CMD_RESET) code |= NCTL_CSA; - bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code); + rc = bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code); + if (rc) + pr_err("ctl_cmd didn't work with error %d\n", rc); } /* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */ diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 17f6d9723df9..62bdda3be92f 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -65,6 +65,7 @@ module_param(wp_on, int, 0444); #define CMD_PARAMETER_READ 0x0e #define CMD_PARAMETER_CHANGE_COL 0x0f #define CMD_LOW_LEVEL_OP 0x10 +#define CMD_NOT_SUPPORTED 0xff struct brcm_nand_dma_desc { u32 next_desc; @@ -101,7 +102,7 @@ struct brcm_nand_dma_desc { #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) #define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) -#define NAND_POLL_STATUS_TIMEOUT_MS 100 +#define NAND_POLL_STATUS_TIMEOUT_MS 500 #define EDU_CMD_WRITE 0x00 #define EDU_CMD_READ 0x01 @@ -199,6 +200,30 @@ static const u16 flash_dma_regs_v4[] = { [FLASH_DMA_CURRENT_DESC_EXT] = 0x34, }; +/* Native command conversion for legacy controllers (< v5.0) */ +static const u8 native_cmd_conv[] = { + [NAND_CMD_READ0] = CMD_NOT_SUPPORTED, + [NAND_CMD_READ1] = CMD_NOT_SUPPORTED, + [NAND_CMD_RNDOUT] = CMD_PARAMETER_CHANGE_COL, + [NAND_CMD_PAGEPROG] = CMD_NOT_SUPPORTED, + [NAND_CMD_READOOB] = CMD_NOT_SUPPORTED, + [NAND_CMD_ERASE1] = CMD_BLOCK_ERASE, + [NAND_CMD_STATUS] = CMD_NOT_SUPPORTED, + [NAND_CMD_SEQIN] = CMD_NOT_SUPPORTED, + [NAND_CMD_RNDIN] = CMD_NOT_SUPPORTED, + [NAND_CMD_READID] = CMD_DEVICE_ID_READ, + [NAND_CMD_ERASE2] = CMD_NULL, + [NAND_CMD_PARAM] = CMD_PARAMETER_READ, + [NAND_CMD_GET_FEATURES] = CMD_NOT_SUPPORTED, + [NAND_CMD_SET_FEATURES] = CMD_NOT_SUPPORTED, + [NAND_CMD_RESET] = CMD_NOT_SUPPORTED, + [NAND_CMD_READSTART] = CMD_NOT_SUPPORTED, + [NAND_CMD_READCACHESEQ] = CMD_NOT_SUPPORTED, + [NAND_CMD_READCACHEEND] = CMD_NOT_SUPPORTED, + [NAND_CMD_RNDOUTSTART] = CMD_NULL, + [NAND_CMD_CACHEDPROG] = CMD_NOT_SUPPORTED, +}; + /* Controller feature flags */ enum { BRCMNAND_HAS_1K_SECTORS = BIT(0), @@ -237,6 +262,12 @@ struct brcmnand_controller { /* List of NAND hosts (one for each chip-select) */ struct list_head host_list; + /* Functions to be called from exec_op */ + int (*check_instr)(struct nand_chip *chip, + const struct nand_operation *op); + int (*exec_instr)(struct nand_chip *chip, + const struct nand_operation *op); + /* EDU info, per-transaction */ const u16 *edu_offsets; void __iomem *edu_base; @@ -310,9 +341,6 @@ struct brcmnand_host { struct platform_device *pdev; int cs; - unsigned int last_cmd; - unsigned int last_byte; - u64 last_addr; struct brcmnand_cfg hwcfg; struct brcmnand_controller *ctrl; }; @@ -2233,14 +2261,11 @@ static int brcmnand_read_page(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct brcmnand_host *host = nand_get_controller_data(chip); u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; u64 addr = (u64)page << chip->page_shift; - host->last_addr = addr; - - return brcmnand_read(mtd, chip, host->last_addr, - mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); + return brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT, + (u32 *)buf, oob); } static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, @@ -2252,11 +2277,9 @@ static int brcmnand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int ret; u64 addr = (u64)page << chip->page_shift; - host->last_addr = addr; - brcmnand_set_ecc_enabled(host, 0); - ret = brcmnand_read(mtd, chip, host->last_addr, - mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); + ret = brcmnand_read(mtd, chip, addr, mtd->writesize >> FC_SHIFT, + (u32 *)buf, oob); brcmnand_set_ecc_enabled(host, 1); return ret; } @@ -2363,13 +2386,10 @@ static int brcmnand_write_page(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct brcmnand_host *host = nand_get_controller_data(chip); void *oob = oob_required ? chip->oob_poi : NULL; u64 addr = (u64)page << chip->page_shift; - host->last_addr = addr; - - return brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); + return brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob); } static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, @@ -2381,9 +2401,8 @@ static int brcmnand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, u64 addr = (u64)page << chip->page_shift; int ret = 0; - host->last_addr = addr; brcmnand_set_ecc_enabled(host, 0); - ret = brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); + ret = brcmnand_write(mtd, chip, addr, (const u32 *)buf, oob); brcmnand_set_ecc_enabled(host, 1); return ret; @@ -2490,18 +2509,190 @@ static int brcmnand_op_is_reset(const struct nand_operation *op) return 0; } +static int brcmnand_check_instructions(struct nand_chip *chip, + const struct nand_operation *op) +{ + return 0; +} + +static int brcmnand_exec_instructions(struct nand_chip *chip, + const struct nand_operation *op) +{ + struct brcmnand_host *host = nand_get_controller_data(chip); + unsigned int i; + int ret = 0; + + for (i = 0; i < op->ninstrs; i++) { + ret = brcmnand_exec_instr(host, i, op); + if (ret) + break; + } + + return ret; +} + +static int brcmnand_check_instructions_legacy(struct nand_chip *chip, + const struct nand_operation *op) +{ + const struct nand_op_instr *instr; + unsigned int i; + u8 cmd; + + for (i = 0; i < op->ninstrs; i++) { + instr = &op->instrs[i]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + cmd = native_cmd_conv[instr->ctx.cmd.opcode]; + if (cmd == CMD_NOT_SUPPORTED) + return -EOPNOTSUPP; + break; + case NAND_OP_ADDR_INSTR: + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_WAITRDY_INSTR: + break; + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + +static int brcmnand_exec_instructions_legacy(struct nand_chip *chip, + const struct nand_operation *op) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_controller *ctrl = host->ctrl; + const struct nand_op_instr *instr; + unsigned int i, j; + u8 cmd = CMD_NULL, last_cmd = CMD_NULL; + int ret = 0; + u64 last_addr; + + for (i = 0; i < op->ninstrs; i++) { + instr = &op->instrs[i]; + + if (instr->type == NAND_OP_CMD_INSTR) { + cmd = native_cmd_conv[instr->ctx.cmd.opcode]; + if (cmd == CMD_NOT_SUPPORTED) { + dev_err(ctrl->dev, "unsupported cmd=%d\n", + instr->ctx.cmd.opcode); + ret = -EOPNOTSUPP; + break; + } + } else if (instr->type == NAND_OP_ADDR_INSTR) { + u64 addr = 0; + + if (cmd == CMD_NULL) + continue; + + if (instr->ctx.addr.naddrs > 8) { + dev_err(ctrl->dev, "unsupported naddrs=%u\n", + instr->ctx.addr.naddrs); + ret = -EOPNOTSUPP; + break; + } + + for (j = 0; j < instr->ctx.addr.naddrs; j++) + addr |= (instr->ctx.addr.addrs[j]) << (j << 3); + + if (cmd == CMD_BLOCK_ERASE) + addr <<= chip->page_shift; + else if (cmd == CMD_PARAMETER_CHANGE_COL) + addr &= ~((u64)(FC_BYTES - 1)); + + brcmnand_set_cmd_addr(mtd, addr); + brcmnand_send_cmd(host, cmd); + last_addr = addr; + last_cmd = cmd; + cmd = CMD_NULL; + brcmnand_waitfunc(chip); + + if (last_cmd == CMD_PARAMETER_READ || + last_cmd == CMD_PARAMETER_CHANGE_COL) { + /* Copy flash cache word-wise */ + u32 *flash_cache = (u32 *)ctrl->flash_cache; + + brcmnand_soc_data_bus_prepare(ctrl->soc, true); + + /* + * Must cache the FLASH_CACHE now, since changes in + * SECTOR_SIZE_1K may invalidate it + */ + for (j = 0; j < FC_WORDS; j++) + /* + * Flash cache is big endian for parameter pages, at + * least on STB SoCs + */ + flash_cache[j] = be32_to_cpu(brcmnand_read_fc(ctrl, j)); + + brcmnand_soc_data_bus_unprepare(ctrl->soc, true); + } + } else if (instr->type == NAND_OP_DATA_IN_INSTR) { + u8 *in = instr->ctx.data.buf.in; + + if (last_cmd == CMD_DEVICE_ID_READ) { + u32 val; + + if (instr->ctx.data.len > 8) { + dev_err(ctrl->dev, "unsupported len=%u\n", + instr->ctx.data.len); + ret = -EOPNOTSUPP; + break; + } + + for (j = 0; j < instr->ctx.data.len; j++) { + if (j == 0) + val = brcmnand_read_reg(ctrl, BRCMNAND_ID); + else if (j == 4) + val = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT); + + in[j] = (val >> (24 - ((j % 4) << 3))) & 0xff; + } + } else if (last_cmd == CMD_PARAMETER_READ || + last_cmd == CMD_PARAMETER_CHANGE_COL) { + u64 addr; + u32 offs; + + for (j = 0; j < instr->ctx.data.len; j++) { + addr = last_addr + j; + offs = addr & (FC_BYTES - 1); + + if (j > 0 && offs == 0) + nand_change_read_column_op(chip, addr, NULL, 0, + false); + + in[j] = ctrl->flash_cache[offs]; + } + } + } else if (instr->type == NAND_OP_WAITRDY_INSTR) { + ret = bcmnand_ctrl_poll_status(host, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); + if (ret) + break; + } else { + dev_err(ctrl->dev, "unsupported instruction type: %d\n", instr->type); + ret = -EOPNOTSUPP; + break; + } + } + + return ret; +} + static int brcmnand_exec_op(struct nand_chip *chip, const struct nand_operation *op, bool check_only) { struct brcmnand_host *host = nand_get_controller_data(chip); + struct brcmnand_controller *ctrl = host->ctrl; struct mtd_info *mtd = nand_to_mtd(chip); u8 *status; - unsigned int i; int ret = 0; if (check_only) - return 0; + return ctrl->check_instr(chip, op); if (brcmnand_op_is_status(op)) { status = op->instrs[1].ctx.data.buf.in; @@ -2525,11 +2716,7 @@ static int brcmnand_exec_op(struct nand_chip *chip, if (op->deassert_wp) brcmnand_wp(mtd, 0); - for (i = 0; i < op->ninstrs; i++) { - ret = brcmnand_exec_instr(host, i, op); - if (ret) - break; - } + ret = ctrl->exec_instr(chip, op); if (op->deassert_wp) brcmnand_wp(mtd, 1); @@ -3142,6 +3329,15 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) if (ret) goto err; + /* Only v5.0+ controllers have low level ops support */ + if (ctrl->nand_version >= 0x0500) { + ctrl->check_instr = brcmnand_check_instructions; + ctrl->exec_instr = brcmnand_exec_instructions; + } else { + ctrl->check_instr = brcmnand_check_instructions_legacy; + ctrl->exec_instr = brcmnand_exec_instructions_legacy; + } + /* * Most chips have this cache at a fixed offset within 'nand' block. * Some must specify this region separately. diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 341318024a19..ec95d787001b 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -351,20 +351,20 @@ static int __init cs553x_init(void) return -ENXIO; /* If it doesn't have the CS553[56], abort */ - rdmsrl(MSR_DIVIL_GLD_CAP, val); + rdmsrq(MSR_DIVIL_GLD_CAP, val); val &= ~0xFFULL; if (val != CAP_CS5535 && val != CAP_CS5536) return -ENXIO; /* If it doesn't have the NAND controller enabled, abort */ - rdmsrl(MSR_DIVIL_BALL_OPTS, val); + rdmsrq(MSR_DIVIL_BALL_OPTS, val); if (val & PIN_OPT_IDE) { pr_info("CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n"); return -ENXIO; } for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { - rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val); + rdmsrq(MSR_DIVIL_LBAR_FLSH0 + i, val); if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND)) err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF); diff --git a/drivers/mtd/nand/raw/denali_pci.c b/drivers/mtd/nand/raw/denali_pci.c index e22094e39546..97fa32d73441 100644 --- a/drivers/mtd/nand/raw/denali_pci.c +++ b/drivers/mtd/nand/raw/denali_pci.c @@ -68,7 +68,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->clk_rate = 50000000; /* 50 MHz */ denali->clk_x_rate = 200000000; /* 200 MHz */ - ret = pci_request_regions(dev, DENALI_NAND_NAME); + ret = pcim_request_all_regions(dev, DENALI_NAND_NAME); if (ret) { dev_err(&dev->dev, "Spectra: Unable to request memory regions\n"); return ret; @@ -77,20 +77,18 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) denali->reg = devm_ioremap(denali->dev, csr_base, csr_len); if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); - ret = -ENOMEM; - goto regions_release; + return -ENOMEM; } denali->host = devm_ioremap(denali->dev, mem_base, mem_len); if (!denali->host) { dev_err(&dev->dev, "Spectra: ioremap failed!"); - ret = -ENOMEM; - goto regions_release; + return -ENOMEM; } ret = denali_init(denali); if (ret) - goto regions_release; + return ret; nsels = denali->nbanks; @@ -118,8 +116,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) out_remove_denali: denali_remove(denali); -regions_release: - pci_release_regions(dev); return ret; } @@ -127,7 +123,6 @@ static void denali_pci_remove(struct pci_dev *dev) { struct denali_controller *denali = pci_get_drvdata(dev); - pci_release_regions(dev); denali_remove(denali); } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index d76802944453..f4e68008ea03 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -17,6 +17,7 @@ #include <linux/pm_runtime.h> #include <linux/pinctrl/consumer.h> #include <linux/dma/mxs-dma.h> +#include <linux/string_choices.h> #include "gpmi-nand.h" #include "gpmi-regs.h" #include "bch-regs.h" @@ -2319,8 +2320,8 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip) "fsl,no-blockmark-swap")) this->swap_block_mark = false; } - dev_dbg(this->dev, "Blockmark swapping %sabled\n", - this->swap_block_mark ? "en" : "dis"); + dev_dbg(this->dev, "Blockmark swapping %s\n", + str_enabled_disabled(this->swap_block_mark)); ret = gpmi_init_last(this); if (ret) diff --git a/drivers/mtd/nand/raw/loongson1-nand-controller.c b/drivers/mtd/nand/raw/loongson1-nand-controller.c new file mode 100644 index 000000000000..ef8e4f9ce287 --- /dev/null +++ b/drivers/mtd/nand/raw/loongson1-nand-controller.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NAND Controller Driver for Loongson-1 SoC + * + * Copyright (C) 2015-2025 Keguang Zhang <keguang.zhang@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/sizes.h> + +/* Loongson-1 NAND Controller Registers */ +#define LS1X_NAND_CMD 0x0 +#define LS1X_NAND_ADDR1 0x4 +#define LS1X_NAND_ADDR2 0x8 +#define LS1X_NAND_TIMING 0xc +#define LS1X_NAND_IDL 0x10 +#define LS1X_NAND_IDH_STATUS 0x14 +#define LS1X_NAND_PARAM 0x18 +#define LS1X_NAND_OP_NUM 0x1c + +/* NAND Command Register Bits */ +#define LS1X_NAND_CMD_OP_DONE BIT(10) +#define LS1X_NAND_CMD_OP_SPARE BIT(9) +#define LS1X_NAND_CMD_OP_MAIN BIT(8) +#define LS1X_NAND_CMD_STATUS BIT(7) +#define LS1X_NAND_CMD_RESET BIT(6) +#define LS1X_NAND_CMD_READID BIT(5) +#define LS1X_NAND_CMD_BLOCKS_ERASE BIT(4) +#define LS1X_NAND_CMD_ERASE BIT(3) +#define LS1X_NAND_CMD_WRITE BIT(2) +#define LS1X_NAND_CMD_READ BIT(1) +#define LS1X_NAND_CMD_VALID BIT(0) + +#define LS1X_NAND_WAIT_CYCLE_MASK GENMASK(7, 0) +#define LS1X_NAND_HOLD_CYCLE_MASK GENMASK(15, 8) +#define LS1X_NAND_CELL_SIZE_MASK GENMASK(11, 8) + +#define LS1X_NAND_COL_ADDR_CYC 2U +#define LS1X_NAND_MAX_ADDR_CYC 5U + +#define BITS_PER_WORD (4 * BITS_PER_BYTE) + +struct ls1x_nand_host; + +struct ls1x_nand_op { + char addrs[LS1X_NAND_MAX_ADDR_CYC]; + unsigned int naddrs; + unsigned int addrs_offset; + unsigned int aligned_offset; + unsigned int cmd_reg; + unsigned int row_start; + unsigned int rdy_timeout_ms; + unsigned int orig_len; + bool is_readid; + bool is_erase; + bool is_write; + bool is_read; + bool is_change_column; + size_t len; + char *buf; +}; + +struct ls1x_nand_data { + unsigned int status_field; + unsigned int op_scope_field; + unsigned int hold_cycle; + unsigned int wait_cycle; + void (*set_addr)(struct ls1x_nand_host *host, struct ls1x_nand_op *op); +}; + +struct ls1x_nand_host { + struct device *dev; + struct nand_chip chip; + struct nand_controller controller; + const struct ls1x_nand_data *data; + void __iomem *reg_base; + struct regmap *regmap; + /* DMA Engine stuff */ + dma_addr_t dma_base; + struct dma_chan *dma_chan; + dma_cookie_t dma_cookie; + struct completion dma_complete; +}; + +static const struct regmap_config ls1x_nand_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static int ls1x_nand_op_cmd_mapping(struct nand_chip *chip, struct ls1x_nand_op *op, u8 opcode) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + + op->row_start = chip->page_shift + 1; + + /* The controller abstracts the following NAND operations. */ + switch (opcode) { + case NAND_CMD_STATUS: + op->cmd_reg = LS1X_NAND_CMD_STATUS; + break; + case NAND_CMD_RESET: + op->cmd_reg = LS1X_NAND_CMD_RESET; + break; + case NAND_CMD_READID: + op->is_readid = true; + op->cmd_reg = LS1X_NAND_CMD_READID; + break; + case NAND_CMD_ERASE1: + op->is_erase = true; + op->addrs_offset = LS1X_NAND_COL_ADDR_CYC; + break; + case NAND_CMD_ERASE2: + if (!op->is_erase) + return -EOPNOTSUPP; + /* During erasing, row_start differs from the default value. */ + op->row_start = chip->page_shift; + op->cmd_reg = LS1X_NAND_CMD_ERASE; + break; + case NAND_CMD_SEQIN: + op->is_write = true; + break; + case NAND_CMD_PAGEPROG: + if (!op->is_write) + return -EOPNOTSUPP; + op->cmd_reg = LS1X_NAND_CMD_WRITE; + break; + case NAND_CMD_READ0: + op->is_read = true; + break; + case NAND_CMD_READSTART: + if (!op->is_read) + return -EOPNOTSUPP; + op->cmd_reg = LS1X_NAND_CMD_READ; + break; + case NAND_CMD_RNDOUT: + op->is_change_column = true; + break; + case NAND_CMD_RNDOUTSTART: + if (!op->is_change_column) + return -EOPNOTSUPP; + op->cmd_reg = LS1X_NAND_CMD_READ; + break; + default: + dev_dbg(host->dev, "unsupported opcode: %u\n", opcode); + return -EOPNOTSUPP; + } + + return 0; +} + +static int ls1x_nand_parse_instructions(struct nand_chip *chip, + const struct nand_subop *subop, struct ls1x_nand_op *op) +{ + unsigned int op_id; + int ret; + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + const struct nand_op_instr *instr = &subop->instrs[op_id]; + unsigned int offset, naddrs; + const u8 *addrs; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ret = ls1x_nand_op_cmd_mapping(chip, op, instr->ctx.cmd.opcode); + if (ret < 0) + return ret; + + break; + case NAND_OP_ADDR_INSTR: + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + if (naddrs > LS1X_NAND_MAX_ADDR_CYC) + return -EOPNOTSUPP; + op->naddrs = naddrs; + offset = nand_subop_get_addr_start_off(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + memcpy(op->addrs + op->addrs_offset, addrs, naddrs); + break; + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + offset = nand_subop_get_data_start_off(subop, op_id); + op->orig_len = nand_subop_get_data_len(subop, op_id); + if (instr->type == NAND_OP_DATA_IN_INSTR) + op->buf = instr->ctx.data.buf.in + offset; + else if (instr->type == NAND_OP_DATA_OUT_INSTR) + op->buf = (void *)instr->ctx.data.buf.out + offset; + + break; + case NAND_OP_WAITRDY_INSTR: + op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms; + break; + default: + break; + } + } + + return 0; +} + +static void ls1b_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + int i; + + for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { + int shift, mask, val; + + if (i < LS1X_NAND_COL_ADDR_CYC) { + shift = i * BITS_PER_BYTE; + mask = (u32)0xff << shift; + mask &= GENMASK(chip->page_shift, 0); + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); + } else if (!op->is_change_column) { + shift = op->row_start + (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); + + if (i == 4) { + mask = (u32)0xff >> (BITS_PER_WORD - shift); + val = (u32)op->addrs[i] >> (BITS_PER_WORD - shift); + regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); + } + } + } +} + +static void ls1c_nand_set_addr(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + int i; + + for (i = 0; i < LS1X_NAND_MAX_ADDR_CYC; i++) { + int shift, mask, val; + + if (i < LS1X_NAND_COL_ADDR_CYC) { + shift = i * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR1, mask, val); + } else if (!op->is_change_column) { + shift = (i - LS1X_NAND_COL_ADDR_CYC) * BITS_PER_BYTE; + mask = (u32)0xff << shift; + val = (u32)op->addrs[i] << shift; + regmap_update_bits(host->regmap, LS1X_NAND_ADDR2, mask, val); + } + } +} + +static void ls1x_nand_trigger_op(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int col0 = op->addrs[0]; + short col; + + if (!IS_ALIGNED(col0, chip->buf_align)) { + col0 = ALIGN_DOWN(op->addrs[0], chip->buf_align); + op->aligned_offset = op->addrs[0] - col0; + op->addrs[0] = col0; + } + + if (host->data->set_addr) + host->data->set_addr(host, op); + + /* set operation length */ + if (op->is_write || op->is_read || op->is_change_column) + op->len = ALIGN(op->orig_len + op->aligned_offset, chip->buf_align); + else if (op->is_erase) + op->len = 1; + else + op->len = op->orig_len; + + writel(op->len, host->reg_base + LS1X_NAND_OP_NUM); + + /* set operation area and scope */ + col = op->addrs[1] << BITS_PER_BYTE | op->addrs[0]; + if (op->orig_len && !op->is_readid) { + unsigned int op_scope = 0; + + if (col < mtd->writesize) { + op->cmd_reg |= LS1X_NAND_CMD_OP_MAIN; + op_scope = mtd->writesize; + } + + op->cmd_reg |= LS1X_NAND_CMD_OP_SPARE; + op_scope += mtd->oobsize; + + op_scope <<= __ffs(host->data->op_scope_field); + regmap_update_bits(host->regmap, LS1X_NAND_PARAM, + host->data->op_scope_field, op_scope); + } + + /* set command */ + writel(op->cmd_reg, host->reg_base + LS1X_NAND_CMD); + + /* trigger operation */ + regmap_write_bits(host->regmap, LS1X_NAND_CMD, LS1X_NAND_CMD_VALID, LS1X_NAND_CMD_VALID); +} + +static int ls1x_nand_wait_for_op_done(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + unsigned int val; + int ret = 0; + + if (op->rdy_timeout_ms) { + ret = regmap_read_poll_timeout(host->regmap, LS1X_NAND_CMD, + val, val & LS1X_NAND_CMD_OP_DONE, + 0, op->rdy_timeout_ms * MSEC_PER_SEC); + if (ret) + dev_err(host->dev, "operation failed\n"); + } + + return ret; +} + +static void ls1x_nand_dma_callback(void *data) +{ + struct ls1x_nand_host *host = (struct ls1x_nand_host *)data; + struct dma_chan *chan = host->dma_chan; + struct device *dev = chan->device->dev; + enum dma_status status; + + status = dmaengine_tx_status(chan, host->dma_cookie, NULL); + if (likely(status == DMA_COMPLETE)) { + dev_dbg(dev, "DMA complete with cookie=%d\n", host->dma_cookie); + complete(&host->dma_complete); + } else { + dev_err(dev, "DMA error with cookie=%d\n", host->dma_cookie); + } +} + +static int ls1x_nand_dma_transfer(struct ls1x_nand_host *host, struct ls1x_nand_op *op) +{ + struct nand_chip *chip = &host->chip; + struct dma_chan *chan = host->dma_chan; + struct device *dev = chan->device->dev; + struct dma_async_tx_descriptor *desc; + enum dma_data_direction data_dir = op->is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + enum dma_transfer_direction xfer_dir = op->is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + void *buf = op->buf; + char *dma_buf = NULL; + dma_addr_t dma_addr; + int ret; + + if (IS_ALIGNED((uintptr_t)buf, chip->buf_align) && + IS_ALIGNED(op->orig_len, chip->buf_align)) { + dma_addr = dma_map_single(dev, buf, op->orig_len, data_dir); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "failed to map DMA buffer\n"); + return -ENXIO; + } + } else if (!op->is_write) { + dma_buf = dma_alloc_coherent(dev, op->len, &dma_addr, GFP_KERNEL); + if (!dma_buf) + return -ENOMEM; + } else { + dev_err(dev, "subpage writing not supported\n"); + return -EOPNOTSUPP; + } + + desc = dmaengine_prep_slave_single(chan, dma_addr, op->len, xfer_dir, DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "failed to prepare DMA descriptor\n"); + ret = -ENOMEM; + goto err; + } + desc->callback = ls1x_nand_dma_callback; + desc->callback_param = host; + + host->dma_cookie = dmaengine_submit(desc); + ret = dma_submit_error(host->dma_cookie); + if (ret) { + dev_err(dev, "failed to submit DMA descriptor\n"); + goto err; + } + + dev_dbg(dev, "issue DMA with cookie=%d\n", host->dma_cookie); + dma_async_issue_pending(chan); + + if (!wait_for_completion_timeout(&host->dma_complete, msecs_to_jiffies(1000))) { + dmaengine_terminate_sync(chan); + reinit_completion(&host->dma_complete); + ret = -ETIMEDOUT; + goto err; + } + + if (dma_buf) + memcpy(buf, dma_buf + op->aligned_offset, op->orig_len); +err: + if (dma_buf) + dma_free_coherent(dev, op->len, dma_buf, dma_addr); + else + dma_unmap_single(dev, dma_addr, op->orig_len, data_dir); + + return ret; +} + +static int ls1x_nand_data_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + struct ls1x_nand_op op = {}; + int ret; + + ret = ls1x_nand_parse_instructions(chip, subop, &op); + if (ret) + return ret; + + ls1x_nand_trigger_op(host, &op); + + ret = ls1x_nand_dma_transfer(host, &op); + if (ret) + return ret; + + return ls1x_nand_wait_for_op_done(host, &op); +} + +static int ls1x_nand_misc_type_exec(struct nand_chip *chip, + const struct nand_subop *subop, struct ls1x_nand_op *op) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + int ret; + + ret = ls1x_nand_parse_instructions(chip, subop, op); + if (ret) + return ret; + + ls1x_nand_trigger_op(host, op); + + return ls1x_nand_wait_for_op_done(host, op); +} + +static int ls1x_nand_zerolen_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_op op = {}; + + return ls1x_nand_misc_type_exec(chip, subop, &op); +} + +static int ls1x_nand_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + struct ls1x_nand_op op = {}; + int i, ret; + union { + char ids[5]; + struct { + int idl; + char idh; + }; + } nand_id; + + ret = ls1x_nand_misc_type_exec(chip, subop, &op); + if (ret) + return ret; + + nand_id.idl = readl(host->reg_base + LS1X_NAND_IDL); + nand_id.idh = readb(host->reg_base + LS1X_NAND_IDH_STATUS); + + for (i = 0; i < min(sizeof(nand_id.ids), op.orig_len); i++) + op.buf[i] = nand_id.ids[sizeof(nand_id.ids) - 1 - i]; + + return ret; +} + +static int ls1x_nand_read_status_type_exec(struct nand_chip *chip, const struct nand_subop *subop) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + struct ls1x_nand_op op = {}; + int val, ret; + + ret = ls1x_nand_misc_type_exec(chip, subop, &op); + if (ret) + return ret; + + val = readl(host->reg_base + LS1X_NAND_IDH_STATUS); + val &= ~host->data->status_field; + op.buf[0] = val << ffs(host->data->status_field); + + return ret; +} + +static const struct nand_op_parser ls1x_nand_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + ls1x_nand_read_id_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_read_status_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_zerolen_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_zerolen_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_data_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)), + NAND_OP_PARSER_PATTERN( + ls1x_nand_data_type_exec, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, LS1X_NAND_MAX_ADDR_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), + ); + +static int ls1x_nand_is_valid_cmd(u8 opcode) +{ + if (opcode == NAND_CMD_STATUS || opcode == NAND_CMD_RESET || opcode == NAND_CMD_READID) + return 0; + + return -EOPNOTSUPP; +} + +static int ls1x_nand_is_valid_cmd_seq(u8 opcode1, u8 opcode2) +{ + if (opcode1 == NAND_CMD_RNDOUT && opcode2 == NAND_CMD_RNDOUTSTART) + return 0; + + if (opcode1 == NAND_CMD_READ0 && opcode2 == NAND_CMD_READSTART) + return 0; + + if (opcode1 == NAND_CMD_ERASE1 && opcode2 == NAND_CMD_ERASE2) + return 0; + + if (opcode1 == NAND_CMD_SEQIN && opcode2 == NAND_CMD_PAGEPROG) + return 0; + + return -EOPNOTSUPP; +} + +static int ls1x_nand_check_op(struct nand_chip *chip, const struct nand_operation *op) +{ + const struct nand_op_instr *instr1 = NULL, *instr2 = NULL; + int op_id; + + for (op_id = 0; op_id < op->ninstrs; op_id++) { + const struct nand_op_instr *instr = &op->instrs[op_id]; + + if (instr->type == NAND_OP_CMD_INSTR) { + if (!instr1) + instr1 = instr; + else if (!instr2) + instr2 = instr; + else + break; + } + } + + if (!instr1) + return -EOPNOTSUPP; + + if (!instr2) + return ls1x_nand_is_valid_cmd(instr1->ctx.cmd.opcode); + + return ls1x_nand_is_valid_cmd_seq(instr1->ctx.cmd.opcode, instr2->ctx.cmd.opcode); +} + +static int ls1x_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, bool check_only) +{ + if (check_only) + return ls1x_nand_check_op(chip, op); + + return nand_op_parser_exec_op(chip, &ls1x_nand_op_parser, op, check_only); +} + +static int ls1x_nand_attach_chip(struct nand_chip *chip) +{ + struct ls1x_nand_host *host = nand_get_controller_data(chip); + u64 chipsize = nanddev_target_size(&chip->base); + int cell_size = 0; + + switch (chipsize) { + case SZ_128M: + cell_size = 0x0; + break; + case SZ_256M: + cell_size = 0x1; + break; + case SZ_512M: + cell_size = 0x2; + break; + case SZ_1G: + cell_size = 0x3; + break; + case SZ_2G: + cell_size = 0x4; + break; + case SZ_4G: + cell_size = 0x5; + break; + case SZ_8G: + cell_size = 0x6; + break; + case SZ_16G: + cell_size = 0x7; + break; + default: + dev_err(host->dev, "unsupported chip size: %llu MB\n", chipsize); + return -EINVAL; + } + + switch (chip->ecc.engine_type) { + case NAND_ECC_ENGINE_TYPE_NONE: + break; + case NAND_ECC_ENGINE_TYPE_SOFT: + break; + default: + return -EINVAL; + } + + /* set cell size */ + regmap_update_bits(host->regmap, LS1X_NAND_PARAM, LS1X_NAND_CELL_SIZE_MASK, + FIELD_PREP(LS1X_NAND_CELL_SIZE_MASK, cell_size)); + + regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_HOLD_CYCLE_MASK, + FIELD_PREP(LS1X_NAND_HOLD_CYCLE_MASK, host->data->hold_cycle)); + + regmap_update_bits(host->regmap, LS1X_NAND_TIMING, LS1X_NAND_WAIT_CYCLE_MASK, + FIELD_PREP(LS1X_NAND_WAIT_CYCLE_MASK, host->data->wait_cycle)); + + chip->ecc.read_page_raw = nand_monolithic_read_page_raw; + chip->ecc.write_page_raw = nand_monolithic_write_page_raw; + + return 0; +} + +static const struct nand_controller_ops ls1x_nand_controller_ops = { + .exec_op = ls1x_nand_exec_op, + .attach_chip = ls1x_nand_attach_chip, +}; + +static void ls1x_nand_controller_cleanup(struct ls1x_nand_host *host) +{ + if (host->dma_chan) + dma_release_channel(host->dma_chan); +} + +static int ls1x_nand_controller_init(struct ls1x_nand_host *host) +{ + struct device *dev = host->dev; + struct dma_chan *chan; + struct dma_slave_config cfg = {}; + int ret; + + host->regmap = devm_regmap_init_mmio(dev, host->reg_base, &ls1x_nand_regmap_config); + if (IS_ERR(host->regmap)) + return dev_err_probe(dev, PTR_ERR(host->regmap), "failed to init regmap\n"); + + chan = dma_request_chan(dev, "rxtx"); + if (IS_ERR(chan)) + return dev_err_probe(dev, PTR_ERR(chan), "failed to request DMA channel\n"); + host->dma_chan = chan; + + cfg.src_addr = host->dma_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr = host->dma_base; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + ret = dmaengine_slave_config(host->dma_chan, &cfg); + if (ret) + return dev_err_probe(dev, ret, "failed to config DMA channel\n"); + + init_completion(&host->dma_complete); + + return 0; +} + +static int ls1x_nand_chip_init(struct ls1x_nand_host *host) +{ + struct device *dev = host->dev; + int nchips = of_get_child_count(dev->of_node); + struct device_node *chip_np; + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + if (nchips != 1) + return dev_err_probe(dev, -EINVAL, "Currently one NAND chip supported\n"); + + chip_np = of_get_next_child(dev->of_node, NULL); + if (!chip_np) + return dev_err_probe(dev, -ENODEV, "failed to get child node for NAND chip\n"); + + nand_set_flash_node(chip, chip_np); + of_node_put(chip_np); + if (!mtd->name) + return dev_err_probe(dev, -EINVAL, "Missing MTD label\n"); + + nand_set_controller_data(chip, host); + chip->controller = &host->controller; + chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA | NAND_BROKEN_XD; + chip->buf_align = 16; + mtd->dev.parent = dev; + mtd->owner = THIS_MODULE; + + ret = nand_scan(chip, 1); + if (ret) + return dev_err_probe(dev, ret, "failed to scan NAND chip\n"); + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + nand_cleanup(chip); + return dev_err_probe(dev, ret, "failed to register MTD device\n"); + } + + return 0; +} + +static int ls1x_nand_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct ls1x_nand_data *data; + struct ls1x_nand_host *host; + struct resource *res; + int ret; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->reg_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->reg_base)) + return PTR_ERR(host->reg_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-dma"); + if (!res) + return dev_err_probe(dev, -EINVAL, "Missing 'nand-dma' in reg-names property\n"); + + host->dma_base = dma_map_resource(dev, res->start, resource_size(res), + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dev, host->dma_base)) + return -ENXIO; + + host->dev = dev; + host->data = data; + host->controller.ops = &ls1x_nand_controller_ops; + + nand_controller_init(&host->controller); + + ret = ls1x_nand_controller_init(host); + if (ret) + goto err; + + ret = ls1x_nand_chip_init(host); + if (ret) + goto err; + + platform_set_drvdata(pdev, host); + + return 0; +err: + ls1x_nand_controller_cleanup(host); + + return ret; +} + +static void ls1x_nand_remove(struct platform_device *pdev) +{ + struct ls1x_nand_host *host = platform_get_drvdata(pdev); + struct nand_chip *chip = &host->chip; + int ret; + + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + ls1x_nand_controller_cleanup(host); +} + +static const struct ls1x_nand_data ls1b_nand_data = { + .status_field = GENMASK(15, 8), + .hold_cycle = 0x2, + .wait_cycle = 0xc, + .set_addr = ls1b_nand_set_addr, +}; + +static const struct ls1x_nand_data ls1c_nand_data = { + .status_field = GENMASK(23, 16), + .op_scope_field = GENMASK(29, 16), + .hold_cycle = 0x2, + .wait_cycle = 0xc, + .set_addr = ls1c_nand_set_addr, +}; + +static const struct of_device_id ls1x_nand_match[] = { + { + .compatible = "loongson,ls1b-nand-controller", + .data = &ls1b_nand_data, + }, + { + .compatible = "loongson,ls1c-nand-controller", + .data = &ls1c_nand_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ls1x_nand_match); + +static struct platform_driver ls1x_nand_driver = { + .probe = ls1x_nand_probe, + .remove = ls1x_nand_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = ls1x_nand_match, + }, +}; + +module_platform_driver(ls1x_nand_driver); + +MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>"); +MODULE_DESCRIPTION("Loongson-1 NAND Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 53e16d39af4b..13e4060bd1b6 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -1833,7 +1833,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, /* READ_ID data bytes are received twice in NV-DDR mode */ if (len && nand_interface_is_nvddr(conf)) { - ddrbuf = kzalloc(len * 2, GFP_KERNEL); + ddrbuf = kcalloc(2, len, GFP_KERNEL); if (!ddrbuf) return -ENOMEM; @@ -2203,7 +2203,7 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, * twice. */ if (force_8bit && nand_interface_is_nvddr(conf)) { - ddrbuf = kzalloc(len * 2, GFP_KERNEL); + ddrbuf = kcalloc(2, len, GFP_KERNEL); if (!ddrbuf) return -ENOMEM; diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 6720b547892b..1003cf118c01 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -165,9 +165,9 @@ static void nandc_set_read_loc_first(struct nand_chip *chip, { struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); __le32 locreg_val; - u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | - ((read_size) << READ_LOCATION_SIZE) | - ((is_last_read_loc) << READ_LOCATION_LAST)); + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); locreg_val = cpu_to_le32(val); @@ -197,9 +197,9 @@ static void nandc_set_read_loc_last(struct nand_chip *chip, { struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); __le32 locreg_val; - u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | - ((read_size) << READ_LOCATION_SIZE) | - ((is_last_read_loc) << READ_LOCATION_LAST)); + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); locreg_val = cpu_to_le32(val); @@ -271,14 +271,14 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i } if (host->use_ecc) { - cfg0 = cpu_to_le32((host->cfg0 & ~(7U << CW_PER_PAGE)) | - (num_cw - 1) << CW_PER_PAGE); + cfg0 = cpu_to_le32((host->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1))); cfg1 = cpu_to_le32(host->cfg1); ecc_bch_cfg = cpu_to_le32(host->ecc_bch_cfg); } else { - cfg0 = cpu_to_le32((host->cfg0_raw & ~(7U << CW_PER_PAGE)) | - (num_cw - 1) << CW_PER_PAGE); + cfg0 = cpu_to_le32((host->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, (num_cw - 1))); cfg1 = cpu_to_le32(host->cfg1_raw); ecc_bch_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE); @@ -882,12 +882,12 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page) host->bbm_size - host->cw_data; host->cfg0 &= ~(SPARE_SIZE_BYTES_MASK | UD_SIZE_BYTES_MASK); - host->cfg0 |= host->spare_bytes << SPARE_SIZE_BYTES | - host->cw_data << UD_SIZE_BYTES; + host->cfg0 |= FIELD_PREP(SPARE_SIZE_BYTES_MASK, host->spare_bytes) | + FIELD_PREP(UD_SIZE_BYTES_MASK, host->cw_data); host->ecc_bch_cfg &= ~ECC_NUM_DATA_BYTES_MASK; - host->ecc_bch_cfg |= host->cw_data << ECC_NUM_DATA_BYTES; - host->ecc_buf_cfg = (host->cw_data - 1) << NUM_STEPS; + host->ecc_bch_cfg |= FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, host->cw_data); + host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, host->cw_data - 1); } /* implements ecc->read_page() */ @@ -1531,7 +1531,7 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, host->ecc_bytes_hw); if (!nandc->props->qpic_version2) - host->ecc_buf_cfg = 0x203 << NUM_STEPS; + host->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203); host->clrflashstatus = FS_READY_BSY_N; host->clrreadstatus = 0xc0; @@ -1817,7 +1817,7 @@ static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_sub q_op.cmd_reg |= cpu_to_le32(PAGE_ACC | LAST_PAGE); nandc->regs->addr0 = q_op.addr1_reg; nandc->regs->addr1 = q_op.addr2_reg; - nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~(7 << CW_PER_PAGE)); + nandc->regs->cfg0 = cpu_to_le32(host->cfg0_raw & ~CW_PER_PAGE_MASK); nandc->regs->cfg1 = cpu_to_le32(host->cfg1_raw); instrs = 3; } else if (q_op.cmd_reg != cpu_to_le32(OP_RESET_DEVICE)) { @@ -1863,7 +1863,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ const struct nand_op_instr *instr = NULL; unsigned int op_id = 0; unsigned int len = 0; - int ret; + int ret, reg_base; + + reg_base = NAND_READ_LOCATION_0; + + if (nandc->props->qpic_version2) + reg_base = NAND_READ_LOCATION_LAST_CW_0; ret = qcom_parse_instructions(chip, subop, &q_op); if (ret) @@ -1900,8 +1905,8 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ if (!nandc->props->qpic_version2) { nandc->regs->vld = cpu_to_le32((nandc->vld & ~READ_START_VLD)); - nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~(0xFF << READ_ADDR)) - | NAND_CMD_PARAM << READ_ADDR); + nandc->regs->cmd1 = cpu_to_le32((nandc->cmd1 & ~READ_ADDR_MASK) | + FIELD_PREP(READ_ADDR_MASK, NAND_CMD_PARAM)); } nandc->regs->exec = cpu_to_le32(1); @@ -1915,14 +1920,17 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ op_id = q_op.data_instr_idx; len = nand_subop_get_data_len(subop, op_id); - nandc_set_read_loc(chip, 0, 0, 0, len, 1); + if (nandc->props->qpic_version2) + nandc_set_read_loc_last(chip, reg_base, 0, len, 1); + else + nandc_set_read_loc_first(chip, reg_base, 0, len, 1); if (!nandc->props->qpic_version2) { qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0); qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); } - nandc->buf_count = len; + nandc->buf_count = 512; memset(nandc->data_buffer, 0xff, nandc->buf_count); config_nand_single_cw_page_read(chip, false, 0); @@ -2360,6 +2368,7 @@ static const struct qcom_nandc_props ipq806x_nandc_props = { .supports_bam = false, .use_codeword_fixup = true, .dev_cmd_reg_start = 0x0, + .bam_offset = 0x30000, }; static const struct qcom_nandc_props ipq4019_nandc_props = { @@ -2367,6 +2376,7 @@ static const struct qcom_nandc_props ipq4019_nandc_props = { .supports_bam = true, .nandc_part_of_qpic = true, .dev_cmd_reg_start = 0x0, + .bam_offset = 0x30000, }; static const struct qcom_nandc_props ipq8074_nandc_props = { @@ -2374,6 +2384,7 @@ static const struct qcom_nandc_props ipq8074_nandc_props = { .supports_bam = true, .nandc_part_of_qpic = true, .dev_cmd_reg_start = 0x7000, + .bam_offset = 0x30000, }; static const struct qcom_nandc_props sdx55_nandc_props = { @@ -2382,6 +2393,7 @@ static const struct qcom_nandc_props sdx55_nandc_props = { .nandc_part_of_qpic = true, .qpic_version2 = true, .dev_cmd_reg_start = 0x7000, + .bam_offset = 0x30000, }; /* diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 63e7b9e39a5a..c5d7cd8a6cab 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -656,9 +656,16 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, mtd->writesize, DMA_TO_DEVICE); + if (dma_mapping_error(nfc->dev, dma_data)) + return -ENOMEM; + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, ecc->steps * oob_step, DMA_TO_DEVICE); + if (dma_mapping_error(nfc->dev, dma_oob)) { + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, DMA_TO_DEVICE); + return -ENOMEM; + } reinit_completion(&nfc->done); writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off); @@ -772,9 +779,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, dma_data = dma_map_single(nfc->dev, nfc->page_buf, mtd->writesize, DMA_FROM_DEVICE); + if (dma_mapping_error(nfc->dev, dma_data)) + return -ENOMEM; + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, ecc->steps * oob_step, DMA_FROM_DEVICE); + if (dma_mapping_error(nfc->dev, dma_oob)) { + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, + DMA_FROM_DEVICE); + return -ENOMEM; + } /* * The first blocks (4, 8 or 16 depending on the device) diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index fab371e3e9b7..162cd5f4f234 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand, if (ret) return ret; + sunxi_nfc_randomizer_config(nand, page, false); sunxi_nfc_randomizer_enable(nand); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); @@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand, if (ret) return ret; + sunxi_nfc_randomizer_config(nand, page, false); sunxi_nfc_randomizer_enable(nand); sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page); diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile index 1e61ab21893a..258da42451a4 100644 --- a/drivers/mtd/nand/spi/Makefile +++ b/drivers/mtd/nand/spi/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o +spinand-objs := core.o otp.o +spinand-objs += alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o spinand-objs += micron.o paragon.o skyhigh.o toshiba.o winbond.o xtx.o obj-$(CONFIG_MTD_SPI_NAND) += spinand.o diff --git a/drivers/mtd/nand/spi/alliancememory.c b/drivers/mtd/nand/spi/alliancememory.c index 6046c73f8424..2ee498230ec1 100644 --- a/drivers/mtd/nand/spi/alliancememory.c +++ b/drivers/mtd/nand/spi/alliancememory.c @@ -17,20 +17,20 @@ #define AM_STATUS_ECC_MAX_CORRECTED (3 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int am_get_eccsize(struct mtd_info *mtd) { diff --git a/drivers/mtd/nand/spi/ato.c b/drivers/mtd/nand/spi/ato.c index bb5298911137..2b4df1d917ac 100644 --- a/drivers/mtd/nand/spi/ato.c +++ b/drivers/mtd/nand/spi/ato.c @@ -14,17 +14,17 @@ static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int ato25d1ga_ooblayout_ecc(struct mtd_info *mtd, int section, diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index da4713692674..c411fe9be3ef 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -22,7 +22,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) { - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(reg, spinand->scratchbuf); int ret; @@ -36,7 +36,7 @@ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) { - struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, + struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(reg, spinand->scratchbuf); *spinand->scratchbuf = val; @@ -362,7 +362,7 @@ static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) static int spinand_write_enable_op(struct spinand_device *spinand) { - struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); + struct spi_mem_op op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); return spi_mem_exec_op(spinand->spimem, &op); } @@ -372,7 +372,7 @@ static int spinand_load_page_op(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); unsigned int row = nanddev_pos_to_row(nand, &req->pos); - struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); + struct spi_mem_op op = SPINAND_PAGE_READ_1S_1S_0_OP(row); return spi_mem_exec_op(spinand->spimem, &op); } @@ -519,7 +519,7 @@ static int spinand_program_op(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); unsigned int row = nanddev_pos_to_row(nand, &req->pos); - struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); + struct spi_mem_op op = SPINAND_PROG_EXEC_1S_1S_0_OP(row); return spi_mem_exec_op(spinand->spimem, &op); } @@ -529,18 +529,28 @@ static int spinand_erase_op(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); unsigned int row = nanddev_pos_to_row(nand, pos); - struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); + struct spi_mem_op op = SPINAND_BLK_ERASE_1S_1S_0_OP(row); return spi_mem_exec_op(spinand->spimem, &op); } -static int spinand_wait(struct spinand_device *spinand, - unsigned long initial_delay_us, - unsigned long poll_delay_us, - u8 *s) +/** + * spinand_wait() - Poll memory device status + * @spinand: the spinand device + * @initial_delay_us: delay in us before starting to poll + * @poll_delay_us: time to sleep between reads in us + * @s: the pointer to variable to store the value of REG_STATUS + * + * This function polls a status register (REG_STATUS) and returns when + * the STATUS_READY bit is 0 or when the timeout has expired. + * + * Return: 0 on success, a negative error code otherwise. + */ +int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us, + unsigned long poll_delay_us, u8 *s) { - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(REG_STATUS, - spinand->scratchbuf); + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(REG_STATUS, + spinand->scratchbuf); u8 status; int ret; @@ -573,7 +583,7 @@ out: static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, u8 ndummy, u8 *buf) { - struct spi_mem_op op = SPINAND_READID_OP( + struct spi_mem_op op = SPINAND_READID_1S_1S_1S_OP( naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN); int ret; @@ -586,7 +596,7 @@ static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr, static int spinand_reset_op(struct spinand_device *spinand) { - struct spi_mem_op op = SPINAND_RESET_OP; + struct spi_mem_op op = SPINAND_RESET_1S_0_0_OP; int ret; ret = spi_mem_exec_op(spinand->spimem, &op); @@ -604,8 +614,16 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock) return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); } -static int spinand_read_page(struct spinand_device *spinand, - const struct nand_page_io_req *req) +/** + * spinand_read_page() - Read a page + * @spinand: the spinand device + * @req: the I/O request + * + * Return: 0 or a positive number of bitflips corrected on success. + * A negative error code otherwise. + */ +int spinand_read_page(struct spinand_device *spinand, + const struct nand_page_io_req *req) { struct nand_device *nand = spinand_to_nand(spinand); u8 status; @@ -635,8 +653,16 @@ static int spinand_read_page(struct spinand_device *spinand, return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); } -static int spinand_write_page(struct spinand_device *spinand, - const struct nand_page_io_req *req) +/** + * spinand_write_page() - Write a page + * @spinand: the spinand device + * @req: the I/O request + * + * Return: 0 or a positive number of bitflips corrected on success. + * A negative error code otherwise. + */ +int spinand_write_page(struct spinand_device *spinand, + const struct nand_page_io_req *req) { struct nand_device *nand = spinand_to_nand(spinand); u8 status; @@ -674,11 +700,15 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from, { struct spinand_device *spinand = mtd_to_spinand(mtd); struct nand_device *nand = mtd_to_nanddev(mtd); + struct mtd_ecc_stats old_stats; struct nand_io_iter iter; bool disable_ecc = false; bool ecc_failed = false; + unsigned int retry_mode = 0; int ret; + old_stats = mtd->ecc_stats; + if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) disable_ecc = true; @@ -690,18 +720,43 @@ static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from, if (ret) break; +read_retry: ret = spinand_read_page(spinand, &iter.req); if (ret < 0 && ret != -EBADMSG) break; - if (ret == -EBADMSG) + if (ret == -EBADMSG && spinand->set_read_retry) { + if (spinand->read_retries && (++retry_mode <= spinand->read_retries)) { + ret = spinand->set_read_retry(spinand, retry_mode); + if (ret < 0) { + spinand->set_read_retry(spinand, 0); + return ret; + } + + /* Reset ecc_stats; retry */ + mtd->ecc_stats = old_stats; + goto read_retry; + } else { + /* No more retry modes; real failure */ + ecc_failed = true; + } + } else if (ret == -EBADMSG) { ecc_failed = true; - else + } else { *max_bitflips = max_t(unsigned int, *max_bitflips, ret); + } ret = 0; ops->retlen += iter.req.datalen; ops->oobretlen += iter.req.ooblen; + + /* Reset to retry mode 0 */ + if (retry_mode) { + retry_mode = 0; + ret = spinand->set_read_retry(spinand, retry_mode); + if (ret < 0) + return ret; + } } if (ecc_failed && !ret) @@ -1292,6 +1347,10 @@ int spinand_match_and_init(struct spinand_device *spinand, spinand->id.len = 1 + table[i].devid.len; spinand->select_target = table[i].select_target; spinand->set_cont_read = table[i].set_cont_read; + spinand->fact_otp = &table[i].fact_otp; + spinand->user_otp = &table[i].user_otp; + spinand->read_retries = table[i].read_retries; + spinand->set_read_retry = table[i].set_read_retry; op = spinand_select_op_variant(spinand, info->op_variants.read_cache); @@ -1478,6 +1537,12 @@ static int spinand_init(struct spinand_device *spinand) mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; mtd->_resume = spinand_mtd_resume; + if (spinand_user_otp_size(spinand) || spinand_fact_otp_size(spinand)) { + ret = spinand_set_mtd_otp_ops(spinand); + if (ret) + goto err_cleanup_ecc_engine; + } + if (nand->ecc.engine) { ret = mtd_ooblayout_count_freebytes(mtd); if (ret < 0) @@ -1520,6 +1585,7 @@ static void spinand_cleanup(struct spinand_device *spinand) { struct nand_device *nand = spinand_to_nand(spinand); + nanddev_ecc_engine_cleanup(nand); nanddev_cleanup(nand); spinand_manufacturer_cleanup(spinand); kfree(spinand->databuf); diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c index 323a20901fc9..9e286612a296 100644 --- a/drivers/mtd/nand/spi/esmt.c +++ b/drivers/mtd/nand/spi/esmt.c @@ -8,23 +8,28 @@ #include <linux/device.h> #include <linux/kernel.h> #include <linux/mtd/spinand.h> +#include <linux/spi/spi-mem.h> /* ESMT uses GigaDevice 0xc8 JECDEC ID on some SPI NANDs */ #define SPINAND_MFR_ESMT_C8 0xc8 +#define ESMT_F50L1G41LB_CFG_OTP_PROTECT BIT(7) +#define ESMT_F50L1G41LB_CFG_OTP_LOCK \ + (CFG_OTP_ENABLE | ESMT_F50L1G41LB_CFG_OTP_PROTECT) + static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); /* * OOB spare area map (64 bytes) @@ -102,6 +107,83 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = { .free = f50l1g41lb_ooblayout_free, }; +static int f50l1g41lb_otp_info(struct spinand_device *spinand, size_t len, + struct otp_info *buf, size_t *retlen, bool user) +{ + if (len < sizeof(*buf)) + return -EINVAL; + + buf->locked = 0; + buf->start = 0; + buf->length = user ? spinand_user_otp_size(spinand) : + spinand_fact_otp_size(spinand); + + *retlen = sizeof(*buf); + return 0; +} + +static int f50l1g41lb_fact_otp_info(struct spinand_device *spinand, size_t len, + struct otp_info *buf, size_t *retlen) +{ + return f50l1g41lb_otp_info(spinand, len, buf, retlen, false); +} + +static int f50l1g41lb_user_otp_info(struct spinand_device *spinand, size_t len, + struct otp_info *buf, size_t *retlen) +{ + return f50l1g41lb_otp_info(spinand, len, buf, retlen, true); +} + +static int f50l1g41lb_otp_lock(struct spinand_device *spinand, loff_t from, + size_t len) +{ + struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); + struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0); + u8 status; + int ret; + + ret = spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, + ESMT_F50L1G41LB_CFG_OTP_LOCK); + if (!ret) + return ret; + + ret = spi_mem_exec_op(spinand->spimem, &write_op); + if (!ret) + goto out; + + ret = spi_mem_exec_op(spinand->spimem, &exec_op); + if (!ret) + goto out; + + ret = spinand_wait(spinand, + SPINAND_WRITE_INITIAL_DELAY_US, + SPINAND_WRITE_POLL_DELAY_US, + &status); + if (!ret && (status & STATUS_PROG_FAILED)) + ret = -EIO; + +out: + if (spinand_upd_cfg(spinand, ESMT_F50L1G41LB_CFG_OTP_LOCK, 0)) { + dev_warn(&spinand_to_mtd(spinand)->dev, + "Can not disable OTP mode\n"); + ret = -EIO; + } + + return ret; +} + +static const struct spinand_user_otp_ops f50l1g41lb_user_otp_ops = { + .info = f50l1g41lb_user_otp_info, + .lock = f50l1g41lb_otp_lock, + .read = spinand_user_otp_read, + .write = spinand_user_otp_write, +}; + +static const struct spinand_fact_otp_ops f50l1g41lb_fact_otp_ops = { + .info = f50l1g41lb_fact_otp_info, + .read = spinand_fact_otp_read, +}; + static const struct spinand_info esmt_c8_spinand_table[] = { SPINAND_INFO("F50L1G41LB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f, @@ -112,17 +194,21 @@ static const struct spinand_info esmt_c8_spinand_table[] = { &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)), + SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL), + SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops), + SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)), SPINAND_INFO("F50D1G41LB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f, - 0x7f, 0x7f), + 0x7f), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), 0, - SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)), + SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL), + SPINAND_USER_OTP_INFO(28, 2, &f50l1g41lb_user_otp_ops), + SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)), SPINAND_INFO("F50D2G41KA", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f, 0x7f, 0x7f), diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c index ecd5f6bffa33..7c61644bfb10 100644 --- a/drivers/mtd/nand/spi/foresee.c +++ b/drivers/mtd/nand/spi/foresee.c @@ -12,18 +12,18 @@ #define SPINAND_MFR_FORESEE 0xCD static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index d620bb02a20a..cb1d316fc4d8 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -24,44 +24,44 @@ #define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_f, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP_3A(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP_3A(0, 0, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(0, 0, NULL, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_1gq5, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(read_cache_variants_2gq5, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -185,7 +185,7 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, u8 status) { u8 status2; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2, spinand->scratchbuf); int ret; @@ -228,7 +228,7 @@ static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand, u8 status) { u8 status2; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(GD5FXGQXXEXXG_REG_STATUS2, spinand->scratchbuf); int ret; diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 3dc4d63d6832..eeaf5bf9f082 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -14,6 +14,8 @@ #define MACRONIX_ECCSR_BF_LAST_PAGE(eccsr) FIELD_GET(GENMASK(3, 0), eccsr) #define MACRONIX_ECCSR_BF_ACCUMULATED_PAGES(eccsr) FIELD_GET(GENMASK(7, 4), eccsr) #define MACRONIX_CFG_CONT_READ BIT(2) +#define MACRONIX_FEATURE_ADDR_READ_RETRY 0x70 +#define MACRONIX_NUM_READ_RETRY_MODES 5 #define STATUS_ECC_HAS_BITFLIPS_THRESHOLD (3 << 4) @@ -26,18 +28,18 @@ struct macronix_priv { }; static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int mx35lfxge4ab_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -136,6 +138,23 @@ static int macronix_set_cont_read(struct spinand_device *spinand, bool enable) return 0; } +/** + * macronix_set_read_retry - Set the retry mode + * @spinand: SPI NAND device + * @retry_mode: Specify which retry mode to set + * + * Return: 0 on success, a negative error code otherwise. + */ +static int macronix_set_read_retry(struct spinand_device *spinand, + unsigned int retry_mode) +{ + struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MACRONIX_FEATURE_ADDR_READ_RETRY, + spinand->scratchbuf); + + *spinand->scratchbuf = retry_mode; + return spi_mem_exec_op(spinand->spimem, &op); +} + static const struct spinand_info macronix_spinand_table[] = { SPINAND_INFO("MX35LF1GE4AB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x12), @@ -168,7 +187,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, macronix_ecc_get_status), - SPINAND_CONT_READ(macronix_set_cont_read)), + SPINAND_CONT_READ(macronix_set_cont_read), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35LF4GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37, 0x03), NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1), @@ -179,7 +200,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, macronix_ecc_get_status), - SPINAND_CONT_READ(macronix_set_cont_read)), + SPINAND_CONT_READ(macronix_set_cont_read), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35LF1G24AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14, 0x03), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), @@ -188,7 +211,9 @@ static const struct spinand_info macronix_spinand_table[] = { &write_cache_variants, &update_cache_variants), SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35LF2G24AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24, 0x03), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), @@ -198,7 +223,9 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT | SPINAND_HAS_PROG_PLANE_SELECT_BIT, - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35LF2G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x64, 0x03), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), @@ -207,7 +234,9 @@ static const struct spinand_info macronix_spinand_table[] = { &write_cache_variants, &update_cache_variants), SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35LF4G24AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35, 0x03), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), @@ -217,7 +246,9 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT | SPINAND_HAS_PROG_PLANE_SELECT_BIT, - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35LF4G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x75, 0x03), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), @@ -226,7 +257,9 @@ static const struct spinand_info macronix_spinand_table[] = { &write_cache_variants, &update_cache_variants), SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX31LF1GE4BC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), @@ -270,7 +303,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT | SPINAND_HAS_PROG_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - macronix_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF4G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xf5, 0x03), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), @@ -280,7 +315,9 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - macronix_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF4GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7, 0x03), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), @@ -291,7 +328,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, macronix_ecc_get_status), - SPINAND_CONT_READ(macronix_set_cont_read)), + SPINAND_CONT_READ(macronix_set_cont_read), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF2G14AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), @@ -314,7 +353,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT | SPINAND_HAS_PROG_PLANE_SELECT_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - macronix_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF2G24AD-Z4I8", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xe4, 0x03), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), @@ -324,7 +365,9 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - macronix_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF2GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6, 0x03), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), @@ -335,7 +378,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, macronix_ecc_get_status), - SPINAND_CONT_READ(macronix_set_cont_read)), + SPINAND_CONT_READ(macronix_set_cont_read), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF2GE4AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2, 0x01), NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), @@ -366,7 +411,9 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, - macronix_ecc_get_status)), + macronix_ecc_get_status), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF1GE4AD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96, 0x03), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), @@ -377,7 +424,9 @@ static const struct spinand_info macronix_spinand_table[] = { SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, macronix_ecc_get_status), - SPINAND_CONT_READ(macronix_set_cont_read)), + SPINAND_CONT_READ(macronix_set_cont_read), + SPINAND_READ_RETRY(MACRONIX_NUM_READ_RETRY_MODES, + macronix_set_read_retry)), SPINAND_INFO("MX35UF1GE4AC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92, 0x01), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index ad0bb9755a09..8281c9d3f4f7 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -9,6 +9,8 @@ #include <linux/device.h> #include <linux/kernel.h> #include <linux/mtd/spinand.h> +#include <linux/spi/spi-mem.h> +#include <linux/string.h> #define SPINAND_MFR_MICRON 0x2c @@ -28,34 +30,38 @@ #define MICRON_SELECT_DIE(x) ((x) << 6) +#define MICRON_MT29F2G01ABAGD_CFG_OTP_STATE BIT(7) +#define MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK \ + (CFG_OTP_ENABLE | MICRON_MT29F2G01ABAGD_CFG_OTP_STATE) + static SPINAND_OP_VARIANTS(quadio_read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(x4_write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(x4_update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); /* Micron MT29F2G01AAAED Device */ static SPINAND_OP_VARIANTS(x4_read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(x1_write_cache_variants, - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(x1_update_cache_variants, - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -131,7 +137,7 @@ static const struct mtd_ooblayout_ops micron_4_ooblayout = { static int micron_select_target(struct spinand_device *spinand, unsigned int target) { - struct spi_mem_op op = SPINAND_SET_FEATURE_OP(MICRON_DIE_SELECT_REG, + struct spi_mem_op op = SPINAND_SET_FEATURE_1S_1S_1S_OP(MICRON_DIE_SELECT_REG, spinand->scratchbuf); if (target > 1) @@ -168,6 +174,131 @@ static int micron_8_ecc_get_status(struct spinand_device *spinand, return -EINVAL; } +static int mt29f2g01abagd_otp_is_locked(struct spinand_device *spinand) +{ + size_t bufsize = spinand_otp_page_size(spinand); + size_t retlen; + u8 *buf; + int ret; + + buf = kmalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = spinand_upd_cfg(spinand, + MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, + MICRON_MT29F2G01ABAGD_CFG_OTP_STATE); + if (ret) + goto free_buf; + + ret = spinand_user_otp_read(spinand, 0, bufsize, &retlen, buf); + + if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, + 0)) { + dev_warn(&spinand_to_mtd(spinand)->dev, + "Can not disable OTP mode\n"); + ret = -EIO; + } + + if (ret) + goto free_buf; + + /* If all zeros, then the OTP area is locked. */ + if (mem_is_zero(buf, bufsize)) + ret = 1; + +free_buf: + kfree(buf); + return ret; +} + +static int mt29f2g01abagd_otp_info(struct spinand_device *spinand, size_t len, + struct otp_info *buf, size_t *retlen, + bool user) +{ + int locked; + + if (len < sizeof(*buf)) + return -EINVAL; + + locked = mt29f2g01abagd_otp_is_locked(spinand); + if (locked < 0) + return locked; + + buf->locked = locked; + buf->start = 0; + buf->length = user ? spinand_user_otp_size(spinand) : + spinand_fact_otp_size(spinand); + + *retlen = sizeof(*buf); + return 0; +} + +static int mt29f2g01abagd_fact_otp_info(struct spinand_device *spinand, + size_t len, struct otp_info *buf, + size_t *retlen) +{ + return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, false); +} + +static int mt29f2g01abagd_user_otp_info(struct spinand_device *spinand, + size_t len, struct otp_info *buf, + size_t *retlen) +{ + return mt29f2g01abagd_otp_info(spinand, len, buf, retlen, true); +} + +static int mt29f2g01abagd_otp_lock(struct spinand_device *spinand, loff_t from, + size_t len) +{ + struct spi_mem_op write_op = SPINAND_WR_EN_DIS_1S_0_0_OP(true); + struct spi_mem_op exec_op = SPINAND_PROG_EXEC_1S_1S_0_OP(0); + u8 status; + int ret; + + ret = spinand_upd_cfg(spinand, + MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, + MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK); + if (!ret) + return ret; + + ret = spi_mem_exec_op(spinand->spimem, &write_op); + if (!ret) + goto out; + + ret = spi_mem_exec_op(spinand->spimem, &exec_op); + if (!ret) + goto out; + + ret = spinand_wait(spinand, + SPINAND_WRITE_INITIAL_DELAY_US, + SPINAND_WRITE_POLL_DELAY_US, + &status); + if (!ret && (status & STATUS_PROG_FAILED)) + ret = -EIO; + +out: + if (spinand_upd_cfg(spinand, MICRON_MT29F2G01ABAGD_CFG_OTP_LOCK, 0)) { + dev_warn(&spinand_to_mtd(spinand)->dev, + "Can not disable OTP mode\n"); + ret = -EIO; + } + + return ret; +} + +static const struct spinand_user_otp_ops mt29f2g01abagd_user_otp_ops = { + .info = mt29f2g01abagd_user_otp_info, + .lock = mt29f2g01abagd_otp_lock, + .read = spinand_user_otp_read, + .write = spinand_user_otp_write, +}; + +static const struct spinand_fact_otp_ops mt29f2g01abagd_fact_otp_ops = { + .info = mt29f2g01abagd_fact_otp_info, + .read = spinand_fact_otp_read, +}; + static const struct spinand_info micron_spinand_table[] = { /* M79A 2Gb 3.3V */ SPINAND_INFO("MT29F2G01ABAGD", @@ -179,7 +310,9 @@ static const struct spinand_info micron_spinand_table[] = { &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, - micron_8_ecc_get_status)), + micron_8_ecc_get_status), + SPINAND_USER_OTP_INFO(12, 2, &mt29f2g01abagd_user_otp_ops), + SPINAND_FACT_OTP_INFO(2, 0, &mt29f2g01abagd_fact_otp_ops)), /* M79A 2Gb 1.8V */ SPINAND_INFO("MT29F2G01ABBGD", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25), diff --git a/drivers/mtd/nand/spi/otp.c b/drivers/mtd/nand/spi/otp.c new file mode 100644 index 000000000000..ce41bca86ea9 --- /dev/null +++ b/drivers/mtd/nand/spi/otp.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025, SaluteDevices. All Rights Reserved. + * + * Author: Martin Kurbanov <mmkurbanov@salutedevices.com> + */ + +#include <linux/mtd/mtd.h> +#include <linux/mtd/spinand.h> + +/** + * spinand_otp_page_size() - Get SPI-NAND OTP page size + * @spinand: the spinand device + * + * Return: the OTP page size. + */ +size_t spinand_otp_page_size(struct spinand_device *spinand) +{ + struct nand_device *nand = spinand_to_nand(spinand); + + return nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); +} + +static size_t spinand_otp_size(struct spinand_device *spinand, + const struct spinand_otp_layout *layout) +{ + return layout->npages * spinand_otp_page_size(spinand); +} + +/** + * spinand_fact_otp_size() - Get SPI-NAND factory OTP area size + * @spinand: the spinand device + * + * Return: the OTP size. + */ +size_t spinand_fact_otp_size(struct spinand_device *spinand) +{ + return spinand_otp_size(spinand, &spinand->fact_otp->layout); +} + +/** + * spinand_user_otp_size() - Get SPI-NAND user OTP area size + * @spinand: the spinand device + * + * Return: the OTP size. + */ +size_t spinand_user_otp_size(struct spinand_device *spinand) +{ + return spinand_otp_size(spinand, &spinand->user_otp->layout); +} + +static int spinand_otp_check_bounds(struct spinand_device *spinand, loff_t ofs, + size_t len, + const struct spinand_otp_layout *layout) +{ + if (ofs < 0 || ofs + len > spinand_otp_size(spinand, layout)) + return -EINVAL; + + return 0; +} + +static int spinand_user_otp_check_bounds(struct spinand_device *spinand, + loff_t ofs, size_t len) +{ + return spinand_otp_check_bounds(spinand, ofs, len, + &spinand->user_otp->layout); +} + +static int spinand_otp_rw(struct spinand_device *spinand, loff_t ofs, + size_t len, size_t *retlen, u8 *buf, bool is_write, + const struct spinand_otp_layout *layout) +{ + struct nand_page_io_req req = {}; + unsigned long long page; + size_t copied = 0; + size_t otp_pagesize = spinand_otp_page_size(spinand); + int ret; + + if (!len) + return 0; + + ret = spinand_otp_check_bounds(spinand, ofs, len, layout); + if (ret) + return ret; + + ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, CFG_OTP_ENABLE); + if (ret) + return ret; + + page = ofs; + req.dataoffs = do_div(page, otp_pagesize); + req.pos.page = page + layout->start_page; + req.type = is_write ? NAND_PAGE_WRITE : NAND_PAGE_READ; + req.mode = MTD_OPS_RAW; + req.databuf.in = buf; + + while (copied < len) { + req.datalen = min_t(unsigned int, + otp_pagesize - req.dataoffs, + len - copied); + + if (is_write) + ret = spinand_write_page(spinand, &req); + else + ret = spinand_read_page(spinand, &req); + + if (ret < 0) + break; + + req.databuf.in += req.datalen; + req.pos.page++; + req.dataoffs = 0; + copied += req.datalen; + } + + *retlen = copied; + + if (spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0)) { + dev_warn(&spinand_to_mtd(spinand)->dev, + "Can not disable OTP mode\n"); + ret = -EIO; + } + + return ret; +} + +/** + * spinand_fact_otp_read() - Read from OTP area + * @spinand: the spinand device + * @ofs: the offset to read + * @len: the number of data bytes to read + * @retlen: the pointer to variable to store the number of read bytes + * @buf: the buffer to store the read data + * + * Return: 0 on success, an error code otherwise. + */ +int spinand_fact_otp_read(struct spinand_device *spinand, loff_t ofs, + size_t len, size_t *retlen, u8 *buf) +{ + return spinand_otp_rw(spinand, ofs, len, retlen, buf, false, + &spinand->fact_otp->layout); +} + +/** + * spinand_user_otp_read() - Read from OTP area + * @spinand: the spinand device + * @ofs: the offset to read + * @len: the number of data bytes to read + * @retlen: the pointer to variable to store the number of read bytes + * @buf: the buffer to store the read data + * + * Return: 0 on success, an error code otherwise. + */ +int spinand_user_otp_read(struct spinand_device *spinand, loff_t ofs, + size_t len, size_t *retlen, u8 *buf) +{ + return spinand_otp_rw(spinand, ofs, len, retlen, buf, false, + &spinand->user_otp->layout); +} + +/** + * spinand_user_otp_write() - Write to OTP area + * @spinand: the spinand device + * @ofs: the offset to write to + * @len: the number of bytes to write + * @retlen: the pointer to variable to store the number of written bytes + * @buf: the buffer with data to write + * + * Return: 0 on success, an error code otherwise. + */ +int spinand_user_otp_write(struct spinand_device *spinand, loff_t ofs, + size_t len, size_t *retlen, const u8 *buf) +{ + return spinand_otp_rw(spinand, ofs, len, retlen, (u8 *)buf, true, + &spinand->user_otp->layout); +} + +static int spinand_mtd_otp_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf, + bool is_fact) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + int ret; + + *retlen = 0; + + mutex_lock(&spinand->lock); + + if (is_fact) + ret = spinand->fact_otp->ops->info(spinand, len, buf, retlen); + else + ret = spinand->user_otp->ops->info(spinand, len, buf, retlen); + + mutex_unlock(&spinand->lock); + + return ret; +} + +static int spinand_mtd_fact_otp_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf) +{ + return spinand_mtd_otp_info(mtd, len, retlen, buf, true); +} + +static int spinand_mtd_user_otp_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf) +{ + return spinand_mtd_otp_info(mtd, len, retlen, buf, false); +} + +static int spinand_mtd_otp_read(struct mtd_info *mtd, loff_t ofs, size_t len, + size_t *retlen, u8 *buf, bool is_fact) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + int ret; + + *retlen = 0; + + if (!len) + return 0; + + ret = spinand_otp_check_bounds(spinand, ofs, len, + is_fact ? &spinand->fact_otp->layout : + &spinand->user_otp->layout); + if (ret) + return ret; + + mutex_lock(&spinand->lock); + + if (is_fact) + ret = spinand->fact_otp->ops->read(spinand, ofs, len, retlen, + buf); + else + ret = spinand->user_otp->ops->read(spinand, ofs, len, retlen, + buf); + + mutex_unlock(&spinand->lock); + + return ret; +} + +static int spinand_mtd_fact_otp_read(struct mtd_info *mtd, loff_t ofs, + size_t len, size_t *retlen, u8 *buf) +{ + return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, true); +} + +static int spinand_mtd_user_otp_read(struct mtd_info *mtd, loff_t ofs, + size_t len, size_t *retlen, u8 *buf) +{ + return spinand_mtd_otp_read(mtd, ofs, len, retlen, buf, false); +} + +static int spinand_mtd_user_otp_write(struct mtd_info *mtd, loff_t ofs, + size_t len, size_t *retlen, const u8 *buf) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + const struct spinand_user_otp_ops *ops = spinand->user_otp->ops; + int ret; + + *retlen = 0; + + if (!len) + return 0; + + ret = spinand_user_otp_check_bounds(spinand, ofs, len); + if (ret) + return ret; + + mutex_lock(&spinand->lock); + ret = ops->write(spinand, ofs, len, retlen, buf); + mutex_unlock(&spinand->lock); + + return ret; +} + +static int spinand_mtd_user_otp_erase(struct mtd_info *mtd, loff_t ofs, + size_t len) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + const struct spinand_user_otp_ops *ops = spinand->user_otp->ops; + int ret; + + if (!len) + return 0; + + ret = spinand_user_otp_check_bounds(spinand, ofs, len); + if (ret) + return ret; + + mutex_lock(&spinand->lock); + ret = ops->erase(spinand, ofs, len); + mutex_unlock(&spinand->lock); + + return ret; +} + +static int spinand_mtd_user_otp_lock(struct mtd_info *mtd, loff_t ofs, + size_t len) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + const struct spinand_user_otp_ops *ops = spinand->user_otp->ops; + int ret; + + if (!len) + return 0; + + ret = spinand_user_otp_check_bounds(spinand, ofs, len); + if (ret) + return ret; + + mutex_lock(&spinand->lock); + ret = ops->lock(spinand, ofs, len); + mutex_unlock(&spinand->lock); + + return ret; +} + +/** + * spinand_set_mtd_otp_ops() - Setup OTP methods + * @spinand: the spinand device + * + * Setup OTP methods. + * + * Return: 0 on success, a negative error code otherwise. + */ +int spinand_set_mtd_otp_ops(struct spinand_device *spinand) +{ + struct mtd_info *mtd = spinand_to_mtd(spinand); + const struct spinand_fact_otp_ops *fact_ops = spinand->fact_otp->ops; + const struct spinand_user_otp_ops *user_ops = spinand->user_otp->ops; + + if (!user_ops && !fact_ops) + return -EINVAL; + + if (user_ops) { + if (user_ops->info) + mtd->_get_user_prot_info = spinand_mtd_user_otp_info; + + if (user_ops->read) + mtd->_read_user_prot_reg = spinand_mtd_user_otp_read; + + if (user_ops->write) + mtd->_write_user_prot_reg = spinand_mtd_user_otp_write; + + if (user_ops->lock) + mtd->_lock_user_prot_reg = spinand_mtd_user_otp_lock; + + if (user_ops->erase) + mtd->_erase_user_prot_reg = spinand_mtd_user_otp_erase; + } + + if (fact_ops) { + if (fact_ops->info) + mtd->_get_fact_prot_info = spinand_mtd_fact_otp_info; + + if (fact_ops->read) + mtd->_read_fact_prot_reg = spinand_mtd_fact_otp_read; + } + + return 0; +} diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c index 6e7cc6995380..4670bac41245 100644 --- a/drivers/mtd/nand/spi/paragon.c +++ b/drivers/mtd/nand/spi/paragon.c @@ -22,20 +22,20 @@ static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section, diff --git a/drivers/mtd/nand/spi/skyhigh.c b/drivers/mtd/nand/spi/skyhigh.c index 961df0d74984..51d61785df61 100644 --- a/drivers/mtd/nand/spi/skyhigh.c +++ b/drivers/mtd/nand/spi/skyhigh.c @@ -17,20 +17,20 @@ #define SKYHIGH_CONFIG_PROTECT_EN BIT(1) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 4, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 4, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int skyhigh_spinand_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 2e2106b2705f..4c6923047aeb 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -15,28 +15,28 @@ #define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_x4_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_x4_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); /* * Backward compatibility for 1st generation Serial NAND devices * which don't support Quad Program Load operation. */ static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int tx58cxgxsxraix_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c index 8394a1b1fb0c..b7a28f001a38 100644 --- a/drivers/mtd/nand/spi/winbond.c +++ b/drivers/mtd/nand/spi/winbond.c @@ -23,34 +23,50 @@ * "X4" in the core is equivalent to "quad output" in the datasheets. */ -static SPINAND_OP_VARIANTS(read_cache_dtr_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_DTR_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_X4_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_DTR_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_X2_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DTR_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ)); +static SPINAND_OP_VARIANTS(read_cache_octal_variants, + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_8D_OP(0, 2, NULL, 0, 105 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_8S_8S_OP(0, 16, NULL, 0, 162 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_8S_OP(0, 1, NULL, 0, 133 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); + +static SPINAND_OP_VARIANTS(write_cache_octal_variants, + SPINAND_PROG_LOAD_1S_8S_8S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_8S_OP(0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(update_cache_octal_variants, + SPINAND_PROG_LOAD_1S_8S_8S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(read_cache_dual_quad_dtr_variants, + SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(0, 8, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0, 104 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(0, 4, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0, 104 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(0, 2, NULL, 0, 80 * HZ_PER_MHZ), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0, 54 * HZ_PER_MHZ)); static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 2, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int w25m02gv_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) @@ -141,12 +157,47 @@ static const struct mtd_ooblayout_ops w25n02kv_ooblayout = { .free = w25n02kv_ooblayout_free, }; +static int w35n01jw_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section > 7) + return -ERANGE; + + region->offset = (16 * section) + 12; + region->length = 4; + + return 0; +} + +static int w35n01jw_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section > 7) + return -ERANGE; + + region->offset = 16 * section; + region->length = 12; + + /* Extract BBM */ + if (!section) { + region->offset += 2; + region->length -= 2; + } + + return 0; +} + +static const struct mtd_ooblayout_ops w35n01jw_ooblayout = { + .ecc = w35n01jw_ooblayout_ecc, + .free = w35n01jw_ooblayout_free, +}; + static int w25n02kv_ecc_get_status(struct spinand_device *spinand, u8 status) { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); + struct spi_mem_op op = SPINAND_GET_FEATURE_1S_1S_1S_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -213,7 +264,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants, + SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants, &write_cache_variants, &update_cache_variants), 0, @@ -227,6 +278,33 @@ static const struct spinand_info winbond_spinand_table[] = { &update_cache_variants), 0, SPINAND_ECCINFO(&w25n01kv_ooblayout, w25n02kv_ecc_get_status)), + SPINAND_INFO("W35N01JW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdc, 0x21), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 1, 1), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, + &write_cache_octal_variants, + &update_cache_octal_variants), + 0, + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), + SPINAND_INFO("W35N02JW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x22), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 2, 1), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, + &write_cache_octal_variants, + &update_cache_octal_variants), + 0, + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), + SPINAND_INFO("W35N04JW", /* 1.8V */ + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xdf, 0x23), + NAND_MEMORG(1, 4096, 128, 64, 512, 10, 1, 4, 1), + NAND_ECCREQ(1, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_octal_variants, + &write_cache_octal_variants, + &update_cache_octal_variants), + 0, + SPINAND_ECCINFO(&w35n01jw_ooblayout, NULL)), /* 2G-bit densities */ SPINAND_INFO("W25M02GV", /* 2x1G-bit 3.3V */ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xab, 0x21), @@ -242,7 +320,7 @@ static const struct spinand_info winbond_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1), NAND_ECCREQ(1, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_dtr_variants, + SPINAND_INFO_OP_VARIANTS(&read_cache_dual_quad_dtr_variants, &write_cache_variants, &update_cache_variants), 0, diff --git a/drivers/mtd/nand/spi/xtx.c b/drivers/mtd/nand/spi/xtx.c index 3f539ca0de86..37336d5958a9 100644 --- a/drivers/mtd/nand/spi/xtx.c +++ b/drivers/mtd/nand/spi/xtx.c @@ -23,20 +23,20 @@ #define XT26XXXD_STATUS_ECC_UNCOR_ERROR (2) static SPINAND_OP_VARIANTS(read_cache_variants, - SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_FAST_OP(0, 1, NULL, 0), - SPINAND_PAGE_READ_FROM_CACHE_OP(0, 1, NULL, 0)); + SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(0, 1, NULL, 0)); static SPINAND_OP_VARIANTS(write_cache_variants, - SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), - SPINAND_PROG_LOAD(true, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(true, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(true, 0, NULL, 0)); static SPINAND_OP_VARIANTS(update_cache_variants, - SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), - SPINAND_PROG_LOAD(false, 0, NULL, 0)); + SPINAND_PROG_LOAD_1S_1S_4S_OP(false, 0, NULL, 0), + SPINAND_PROG_LOAD_1S_1S_1S_OP(false, 0, NULL, 0)); static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index b5b3c4c44a94..abc7b186353f 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -993,7 +993,7 @@ restart: /* flush timer, runs a second after last write */ static void sm_cache_flush_timer(struct timer_list *t) { - struct sm_ftl *ftl = from_timer(ftl, t, timer); + struct sm_ftl *ftl = timer_container_of(ftl, t, timer); queue_work(cache_flush_workqueue, &ftl->flush_work); } @@ -1067,7 +1067,7 @@ static int sm_write(struct mtd_blktrans_dev *dev, sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset); /* No need in flush thread running now */ - del_timer(&ftl->timer); + timer_delete(&ftl->timer); mutex_lock(&ftl->mutex); zone = sm_get_zone(ftl, zone_num); @@ -1111,7 +1111,7 @@ static void sm_release(struct mtd_blktrans_dev *dev) { struct sm_ftl *ftl = dev->priv; - del_timer_sync(&ftl->timer); + timer_delete_sync(&ftl->timer); cancel_work_sync(&ftl->flush_work); mutex_lock(&ftl->mutex); sm_cache_flush(ftl); diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 19eb98bd6821..ac4b960101cc 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -7,16 +7,17 @@ * Copyright (C) 2014, Freescale Semiconductor, Inc. */ -#include <linux/err.h> -#include <linux/errno.h> +#include <linux/cleanup.h> #include <linux/delay.h> #include <linux/device.h> +#include <linux/err.h> +#include <linux/errno.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/spi-nor.h> #include <linux/mutex.h> -#include <linux/of_platform.h> +#include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/sched/task_stack.h> #include <linux/sizes.h> @@ -639,32 +640,26 @@ static bool spi_nor_use_parallel_locking(struct spi_nor *nor) static int spi_nor_rww_start_rdst(struct spi_nor *nor) { struct spi_nor_rww *rww = &nor->rww; - int ret = -EAGAIN; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); if (rww->ongoing_io || rww->ongoing_rd) - goto busy; + return -EAGAIN; rww->ongoing_io = true; rww->ongoing_rd = true; - ret = 0; -busy: - mutex_unlock(&nor->lock); - return ret; + return 0; } static void spi_nor_rww_end_rdst(struct spi_nor *nor) { struct spi_nor_rww *rww = &nor->rww; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); rww->ongoing_io = false; rww->ongoing_rd = false; - - mutex_unlock(&nor->lock); } static int spi_nor_lock_rdst(struct spi_nor *nor) @@ -1212,26 +1207,21 @@ static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len, static bool spi_nor_rww_start_io(struct spi_nor *nor) { struct spi_nor_rww *rww = &nor->rww; - bool start = false; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); if (rww->ongoing_io) - goto busy; + return false; rww->ongoing_io = true; - start = true; -busy: - mutex_unlock(&nor->lock); - return start; + return true; } static void spi_nor_rww_end_io(struct spi_nor *nor) { - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); nor->rww.ongoing_io = false; - mutex_unlock(&nor->lock); } static int spi_nor_lock_device(struct spi_nor *nor) @@ -1254,32 +1244,27 @@ static void spi_nor_unlock_device(struct spi_nor *nor) static bool spi_nor_rww_start_exclusive(struct spi_nor *nor) { struct spi_nor_rww *rww = &nor->rww; - bool start = false; mutex_lock(&nor->lock); if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) - goto busy; + return false; rww->ongoing_io = true; rww->ongoing_rd = true; rww->ongoing_pe = true; - start = true; -busy: - mutex_unlock(&nor->lock); - return start; + return true; } static void spi_nor_rww_end_exclusive(struct spi_nor *nor) { struct spi_nor_rww *rww = &nor->rww; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); rww->ongoing_io = false; rww->ongoing_rd = false; rww->ongoing_pe = false; - mutex_unlock(&nor->lock); } int spi_nor_prep_and_lock(struct spi_nor *nor) @@ -1316,30 +1301,26 @@ static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len) { struct spi_nor_rww *rww = &nor->rww; unsigned int used_banks = 0; - bool started = false; u8 first, last; int bank; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) - goto busy; + return false; spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); for (bank = first; bank <= last; bank++) { if (rww->used_banks & BIT(bank)) - goto busy; + return false; used_banks |= BIT(bank); } rww->used_banks |= used_banks; rww->ongoing_pe = true; - started = true; -busy: - mutex_unlock(&nor->lock); - return started; + return true; } static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len) @@ -1348,15 +1329,13 @@ static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len) u8 first, last; int bank; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); for (bank = first; bank <= last; bank++) rww->used_banks &= ~BIT(bank); rww->ongoing_pe = false; - - mutex_unlock(&nor->lock); } static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len) @@ -1393,19 +1372,18 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len) { struct spi_nor_rww *rww = &nor->rww; unsigned int used_banks = 0; - bool started = false; u8 first, last; int bank; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); if (rww->ongoing_io || rww->ongoing_rd) - goto busy; + return false; spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); for (bank = first; bank <= last; bank++) { if (rww->used_banks & BIT(bank)) - goto busy; + return false; used_banks |= BIT(bank); } @@ -1413,11 +1391,8 @@ static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len) rww->used_banks |= used_banks; rww->ongoing_io = true; rww->ongoing_rd = true; - started = true; -busy: - mutex_unlock(&nor->lock); - return started; + return true; } static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) @@ -1426,7 +1401,7 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) u8 first, last; int bank; - mutex_lock(&nor->lock); + guard(mutex)(&nor->lock); spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); for (bank = first; bank <= last; bank++) @@ -1434,8 +1409,6 @@ static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) rww->ongoing_io = false; rww->ongoing_rd = false; - - mutex_unlock(&nor->lock); } static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len) diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index 99936fd25d43..e97f5cbd9aad 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -45,8 +45,55 @@ mx25l25635_post_bfpt_fixups(struct spi_nor *nor, return 0; } +static int +macronix_qpp4b_post_sfdp_fixups(struct spi_nor *nor) +{ + /* PP_1_1_4_4B is supported but missing in 4BAIT. */ + struct spi_nor_flash_parameter *params = nor->params; + + params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4], + SPINOR_OP_PP_1_1_4_4B, SNOR_PROTO_1_1_4); + + return 0; +} + +static int +mx25l3255e_late_init_fixups(struct spi_nor *nor) +{ + struct spi_nor_flash_parameter *params = nor->params; + + /* + * SFDP of MX25L3255E is JESD216, which does not include the Quad + * Enable bit Requirement in BFPT. As a result, during BFPT parsing, + * the quad_enable method is not set to spi_nor_sr1_bit6_quad_enable. + * Therefore, it is necessary to correct this setting by late_init. + */ + params->quad_enable = spi_nor_sr1_bit6_quad_enable; + + /* + * In addition, MX25L3255E also supports 1-4-4 page program in 3-byte + * address mode. However, since the 3-byte address 1-4-4 page program + * is not defined in SFDP, it needs to be configured in late_init. + */ + params->hwcaps.mask |= SNOR_HWCAPS_PP_1_4_4; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_4_4], + SPINOR_OP_PP_1_4_4, SNOR_PROTO_1_4_4); + + return 0; +} + static const struct spi_nor_fixups mx25l25635_fixups = { .post_bfpt = mx25l25635_post_bfpt_fixups, + .post_sfdp = macronix_qpp4b_post_sfdp_fixups, +}; + +static const struct spi_nor_fixups macronix_qpp4b_fixups = { + .post_sfdp = macronix_qpp4b_post_sfdp_fixups, +}; + +static const struct spi_nor_fixups mx25l3255e_fixups = { + .late_init = mx25l3255e_late_init_fixups, }; static const struct flash_info macronix_nor_parts[] = { @@ -70,10 +117,8 @@ static const struct flash_info macronix_nor_parts[] = { .name = "mx25l8005", .size = SZ_1M, }, { + /* MX25L1606E */ .id = SNOR_ID(0xc2, 0x20, 0x15), - .name = "mx25l1606e", - .size = SZ_2M, - .no_sfdp_flags = SECT_4K, }, { .id = SNOR_ID(0xc2, 0x20, 0x16), .name = "mx25l3205d", @@ -85,28 +130,26 @@ static const struct flash_info macronix_nor_parts[] = { .size = SZ_8M, .no_sfdp_flags = SECT_4K, }, { + /* MX25L12805D */ .id = SNOR_ID(0xc2, 0x20, 0x18), - .name = "mx25l12805d", - .size = SZ_16M, .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP, - .no_sfdp_flags = SECT_4K, }, { + /* MX25L25635E, MX25L25645G */ .id = SNOR_ID(0xc2, 0x20, 0x19), - .name = "mx25l25635e", - .size = SZ_32M, - .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, .fixups = &mx25l25635_fixups }, { + /* MX66L51235F */ .id = SNOR_ID(0xc2, 0x20, 0x1a), - .name = "mx66l51235f", - .size = SZ_64M, - .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, .fixup_flags = SPI_NOR_4B_OPCODES, + .fixups = ¯onix_qpp4b_fixups, }, { + /* MX66L1G45G */ .id = SNOR_ID(0xc2, 0x20, 0x1b), - .name = "mx66l1g45g", - .size = SZ_128M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, + .fixups = ¯onix_qpp4b_fixups, + }, { + /* MX66L2G45G */ + .id = SNOR_ID(0xc2, 0x20, 0x1c), + .fixups = ¯onix_qpp4b_fixups, }, { .id = SNOR_ID(0xc2, 0x23, 0x14), .name = "mx25v8035f", @@ -143,23 +186,17 @@ static const struct flash_info macronix_nor_parts[] = { .size = SZ_16M, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, }, { + /* MX25U51245G */ .id = SNOR_ID(0xc2, 0x25, 0x3a), - .name = "mx25u51245g", - .size = SZ_64M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, - .fixup_flags = SPI_NOR_4B_OPCODES, + .fixups = ¯onix_qpp4b_fixups, }, { - .id = SNOR_ID(0xc2, 0x25, 0x3a), - .name = "mx66u51235f", - .size = SZ_64M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, - .fixup_flags = SPI_NOR_4B_OPCODES, + /* MX66U1G45G */ + .id = SNOR_ID(0xc2, 0x25, 0x3b), + .fixups = ¯onix_qpp4b_fixups, }, { + /* MX66U2G45G */ .id = SNOR_ID(0xc2, 0x25, 0x3c), - .name = "mx66u2g45g", - .size = SZ_256M, - .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, - .fixup_flags = SPI_NOR_4B_OPCODES, + .fixups = ¯onix_qpp4b_fixups, }, { .id = SNOR_ID(0xc2, 0x26, 0x18), .name = "mx25l12855e", @@ -184,15 +221,14 @@ static const struct flash_info macronix_nor_parts[] = { .size = SZ_4M, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, }, { + /* MX25UW51245G */ .id = SNOR_ID(0xc2, 0x81, 0x3a), - .name = "mx25uw51245g", .n_banks = 4, .flags = SPI_NOR_RWW, }, { + /* MX25L3255E */ .id = SNOR_ID(0xc2, 0x9e, 0x16), - .name = "mx25l3255e", - .size = SZ_4M, - .no_sfdp_flags = SECT_4K, + .fixups = &mx25l3255e_fixups, }, /* * This spares us of adding new flash entries for flashes that can be diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c index 9a729aa3452d..7d0b145d78d8 100644 --- a/drivers/mtd/spi-nor/otp.c +++ b/drivers/mtd/spi-nor/otp.c @@ -6,6 +6,7 @@ */ #include <linux/log2.h> +#include <linux/math64.h> #include <linux/mtd/mtd.h> #include <linux/mtd/spi-nor.h> diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index bf08dbf5e742..b9f156c0f8bc 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -17,6 +17,7 @@ #define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ #define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */ +#define SPINOR_OP_CYPRESS_EX4B 0xB8 /* Exit 4-byte address mode */ #define SPINOR_OP_CYPRESS_DIE_ERASE 0x61 /* Chip (die) erase */ #define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */ #define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */ @@ -58,6 +59,13 @@ SPI_MEM_OP_DUMMY(ndummy, 0), \ SPI_MEM_OP_DATA_IN(1, buf, 0)) +#define CYPRESS_NOR_EN4B_EX4B_OP(enable) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? SPINOR_OP_EN4B : \ + SPINOR_OP_CYPRESS_EX4B, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + #define SPANSION_OP(opcode) \ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \ SPI_MEM_OP_NO_ADDR, \ @@ -356,6 +364,20 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor) return 0; } +static int cypress_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) +{ + int ret; + struct spi_mem_op op = CYPRESS_NOR_EN4B_EX4B_OP(enable); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + if (ret) + dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); + + return ret; +} + /** * cypress_nor_determine_addr_mode_by_sr1() - Determine current address mode * (3 or 4-byte) by querying status @@ -526,6 +548,9 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor, struct spi_mem_op op; int ret; + /* Assign 4-byte address mode method that is not determined in BFPT */ + nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode; + ret = cypress_nor_set_addr_mode_nbytes(nor); if (ret) return ret; @@ -591,6 +616,9 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor, { int ret; + /* Assign 4-byte address mode method that is not determined in BFPT */ + nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode; + ret = cypress_nor_set_addr_mode_nbytes(nor); if (ret) return ret; @@ -718,6 +746,9 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, const struct sfdp_bfpt *bfpt) { + /* Assign 4-byte address mode method that is not determined in BFPT */ + nor->params->set_4byte_addr_mode = cypress_nor_set_4byte_addr_mode; + return cypress_nor_set_addr_mode_nbytes(nor); } diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c index e48c3cff247a..9c9328478d8a 100644 --- a/drivers/mtd/spi-nor/swp.c +++ b/drivers/mtd/spi-nor/swp.c @@ -5,6 +5,7 @@ * Copyright (C) 2005, Intec Automation Inc. * Copyright (C) 2014, Freescale Semiconductor, Inc. */ +#include <linux/math64.h> #include <linux/mtd/mtd.h> #include <linux/mtd/spi-nor.h> diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 8d0a00d69e12..63a93c9eb917 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -10,6 +10,7 @@ #define WINBOND_NOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ #define WINBOND_NOR_OP_WREAR 0xc5 /* Write Extended Address Register */ +#define WINBOND_NOR_OP_SELDIE 0xc2 /* Select active die */ #define WINBOND_NOR_WREAR_OP(buf) \ SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_WREAR, 0), \ @@ -17,6 +18,12 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_DATA_OUT(1, buf, 0)) +#define WINBOND_NOR_SELDIE_OP(buf) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(WINBOND_NOR_OP_SELDIE, 0), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(1, buf, 0)) + static int w25q128_post_bfpt_fixups(struct spi_nor *nor, const struct sfdp_parameter_header *bfpt_header, @@ -66,6 +73,79 @@ static const struct spi_nor_fixups w25q256_fixups = { .post_bfpt = w25q256_post_bfpt_fixups, }; +/** + * winbond_nor_select_die() - Set active die. + * @nor: pointer to 'struct spi_nor'. + * @die: die to set active. + * + * Certain Winbond chips feature more than a single die. This is mostly hidden + * to the user, except that some chips may experience time deviation when + * modifying the status bits between dies, which in some corner cases may + * produce problematic races. Being able to explicitly select a die to check its + * state in this case may be useful. + * + * Return: 0 on success, -errno otherwise. + */ +static int winbond_nor_select_die(struct spi_nor *nor, u8 die) +{ + int ret; + + nor->bouncebuf[0] = die; + + if (nor->spimem) { + struct spi_mem_op op = WINBOND_NOR_SELDIE_OP(nor->bouncebuf); + + spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); + + ret = spi_mem_exec_op(nor->spimem, &op); + } else { + ret = spi_nor_controller_ops_write_reg(nor, + WINBOND_NOR_OP_SELDIE, + nor->bouncebuf, 1); + } + + if (ret) + dev_dbg(nor->dev, "error %d selecting die %d\n", ret, die); + + return ret; +} + +static int winbond_nor_multi_die_ready(struct spi_nor *nor) +{ + int ret, i; + + for (i = 0; i < nor->params->n_dice; i++) { + ret = winbond_nor_select_die(nor, i); + if (ret) + return ret; + + ret = spi_nor_sr_ready(nor); + if (ret <= 0) + return ret; + } + + return 1; +} + +static int +winbond_nor_multi_die_post_sfdp_fixups(struct spi_nor *nor) +{ + /* + * SFDP supports dice numbers, but this information is only available in + * optional additional tables which are not provided by these chips. + * Dice number has an impact though, because these devices need extra + * care when reading the busy bit. + */ + nor->params->n_dice = nor->params->size / SZ_64M; + nor->params->ready = winbond_nor_multi_die_ready; + + return 0; +} + +static const struct spi_nor_fixups winbond_nor_multi_die_fixups = { + .post_sfdp = winbond_nor_multi_die_post_sfdp_fixups, +}; + static const struct flash_info winbond_nor_parts[] = { { .id = SNOR_ID(0xef, 0x30, 0x10), @@ -147,6 +227,10 @@ static const struct flash_info winbond_nor_parts[] = { .size = SZ_64M, .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ, }, { + /* W25Q01JV */ + .id = SNOR_ID(0xef, 0x40, 0x21), + .fixups = &winbond_nor_multi_die_fixups, + }, { .id = SNOR_ID(0xef, 0x50, 0x12), .name = "w25q20bw", .size = SZ_256K, @@ -222,6 +306,10 @@ static const struct flash_info winbond_nor_parts[] = { .id = SNOR_ID(0xef, 0x70, 0x19), .name = "w25q256jvm", }, { + /* W25Q02JV */ + .id = SNOR_ID(0xef, 0x70, 0x22), + .fixups = &winbond_nor_multi_die_fixups, + }, { .id = SNOR_ID(0xef, 0x71, 0x19), .name = "w25m512jv", .size = SZ_64M, diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 2836905f0152..39cc0a6a4d37 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -199,7 +199,7 @@ static blk_status_t ubiblock_read(struct request *req) * and ubi_read_sg() will check that limit. */ ubi_sgl_init(&pdu->usgl); - blk_rq_map_sg(req->q, req, pdu->usgl.sg); + blk_rq_map_sg(req, pdu->usgl.sg); while (bytes_left) { /* |