diff options
Diffstat (limited to 'drivers/mtd/nand')
61 files changed, 4205 insertions, 1062 deletions
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 4a9aed4f0104..b40455234cbd 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -13,7 +13,38 @@ menu "ECC engine support" config MTD_NAND_ECC bool - depends on MTD_NAND_CORE + select MTD_NAND_CORE + +config MTD_NAND_ECC_SW_HAMMING + bool "Software Hamming ECC engine" + default y if MTD_RAW_NAND + select MTD_NAND_ECC + help + This enables support for software Hamming error + correction. This correction can correct up to 1 bit error + per chunk and detect up to 2 bit errors. While it used to be + widely used with old parts, newer NAND chips usually require + more strength correction and in this case BCH or RS will be + preferred. + +config MTD_NAND_ECC_SW_HAMMING_SMC + bool "NAND ECC Smart Media byte order" + depends on MTD_NAND_ECC_SW_HAMMING + default n + help + Software ECC according to the Smart Media Specification. + The original Linux implementation had byte 0 and 1 swapped. + +config MTD_NAND_ECC_SW_BCH + bool "Software BCH ECC engine" + select BCH + select MTD_NAND_ECC + default n + help + This enables support for software BCH error correction. Binary BCH + codes are more powerful and cpu intensive than traditional Hamming + ECC codes. They are used with NAND devices requiring more than 1 bit + of error correction. endmenu diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 981372953b56..1c0b46960eb1 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -8,3 +8,5 @@ obj-y += raw/ obj-y += spi/ nandcore-$(CONFIG_MTD_NAND_ECC) += ecc.o +nandcore-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += ecc-sw-hamming.o +nandcore-$(CONFIG_MTD_NAND_ECC_SW_BCH) += ecc-sw-bch.o diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c index b6de955ac8bf..5e13a03d2b32 100644 --- a/drivers/mtd/nand/core.c +++ b/drivers/mtd/nand/core.c @@ -208,6 +208,130 @@ int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len) EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks); /** + * nanddev_get_ecc_engine() - Find and get a suitable ECC engine + * @nand: NAND device + */ +static int nanddev_get_ecc_engine(struct nand_device *nand) +{ + int engine_type; + + /* Read the user desires in terms of ECC engine/configuration */ + of_get_nand_ecc_user_config(nand); + + engine_type = nand->ecc.user_conf.engine_type; + if (engine_type == NAND_ECC_ENGINE_TYPE_INVALID) + engine_type = nand->ecc.defaults.engine_type; + + switch (engine_type) { + case NAND_ECC_ENGINE_TYPE_NONE: + return 0; + case NAND_ECC_ENGINE_TYPE_SOFT: + nand->ecc.engine = nand_ecc_get_sw_engine(nand); + break; + case NAND_ECC_ENGINE_TYPE_ON_DIE: + nand->ecc.engine = nand_ecc_get_on_die_hw_engine(nand); + break; + case NAND_ECC_ENGINE_TYPE_ON_HOST: + pr_err("On-host hardware ECC engines not supported yet\n"); + break; + default: + pr_err("Missing ECC engine type\n"); + } + + if (!nand->ecc.engine) + return -EINVAL; + + return 0; +} + +/** + * nanddev_put_ecc_engine() - Dettach and put the in-use ECC engine + * @nand: NAND device + */ +static int nanddev_put_ecc_engine(struct nand_device *nand) +{ + switch (nand->ecc.ctx.conf.engine_type) { + case NAND_ECC_ENGINE_TYPE_ON_HOST: + pr_err("On-host hardware ECC engines not supported yet\n"); + break; + case NAND_ECC_ENGINE_TYPE_NONE: + case NAND_ECC_ENGINE_TYPE_SOFT: + case NAND_ECC_ENGINE_TYPE_ON_DIE: + default: + break; + } + + return 0; +} + +/** + * nanddev_find_ecc_configuration() - Find a suitable ECC configuration + * @nand: NAND device + */ +static int nanddev_find_ecc_configuration(struct nand_device *nand) +{ + int ret; + + if (!nand->ecc.engine) + return -ENOTSUPP; + + ret = nand_ecc_init_ctx(nand); + if (ret) + return ret; + + if (!nand_ecc_is_strong_enough(nand)) + pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n", + nand->mtd.name); + + return 0; +} + +/** + * nanddev_ecc_engine_init() - Initialize an ECC engine for the chip + * @nand: NAND device + */ +int nanddev_ecc_engine_init(struct nand_device *nand) +{ + int ret; + + /* Look for the ECC engine to use */ + ret = nanddev_get_ecc_engine(nand); + if (ret) { + pr_err("No ECC engine found\n"); + return ret; + } + + /* No ECC engine requested */ + if (!nand->ecc.engine) + return 0; + + /* Configure the engine: balance user input and chip requirements */ + ret = nanddev_find_ecc_configuration(nand); + if (ret) { + pr_err("No suitable ECC configuration\n"); + nanddev_put_ecc_engine(nand); + + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(nanddev_ecc_engine_init); + +/** + * nanddev_ecc_engine_cleanup() - Cleanup ECC engine initializations + * @nand: NAND device + */ +void nanddev_ecc_engine_cleanup(struct nand_device *nand) +{ + if (nand->ecc.engine) + nand_ecc_cleanup_ctx(nand); + + nanddev_put_ecc_engine(nand); +} +EXPORT_SYMBOL_GPL(nanddev_ecc_engine_cleanup); + +/** * nanddev_init() - Initialize a NAND device * @nand: NAND device * @ops: NAND device operations diff --git a/drivers/mtd/nand/ecc-sw-bch.c b/drivers/mtd/nand/ecc-sw-bch.c new file mode 100644 index 000000000000..0a0ac11d5725 --- /dev/null +++ b/drivers/mtd/nand/ecc-sw-bch.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file provides ECC correction for more than 1 bit per block of data, + * using binary BCH codes. It relies on the generic BCH library lib/bch.c. + * + * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/bitops.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand-ecc-sw-bch.h> + +/** + * nand_ecc_sw_bch_calculate - Calculate the ECC corresponding to a data block + * @nand: NAND device + * @buf: Input buffer with raw data + * @code: Output buffer with ECC + */ +int nand_ecc_sw_bch_calculate(struct nand_device *nand, + const unsigned char *buf, unsigned char *code) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int i; + + memset(code, 0, engine_conf->code_size); + bch_encode(engine_conf->bch, buf, nand->ecc.ctx.conf.step_size, code); + + /* apply mask so that an erased page is a valid codeword */ + for (i = 0; i < engine_conf->code_size; i++) + code[i] ^= engine_conf->eccmask[i]; + + return 0; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_calculate); + +/** + * nand_ecc_sw_bch_correct - Detect, correct and report bit error(s) + * @nand: NAND device + * @buf: Raw data read from the chip + * @read_ecc: ECC bytes from the chip + * @calc_ecc: ECC calculated from the raw data + * + * Detect and correct bit errors for a data block. + */ +int nand_ecc_sw_bch_correct(struct nand_device *nand, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int step_size = nand->ecc.ctx.conf.step_size; + unsigned int *errloc = engine_conf->errloc; + int i, count; + + count = bch_decode(engine_conf->bch, NULL, step_size, read_ecc, + calc_ecc, NULL, errloc); + if (count > 0) { + for (i = 0; i < count; i++) { + if (errloc[i] < (step_size * 8)) + /* The error is in the data area: correct it */ + buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); + + /* Otherwise the error is in the ECC area: nothing to do */ + pr_debug("%s: corrected bitflip %u\n", __func__, + errloc[i]); + } + } else if (count < 0) { + pr_err("ECC unrecoverable error\n"); + count = -EBADMSG; + } + + return count; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_correct); + +/** + * nand_ecc_sw_bch_cleanup - Cleanup software BCH ECC resources + * @nand: NAND device + */ +static void nand_ecc_sw_bch_cleanup(struct nand_device *nand) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + + bch_free(engine_conf->bch); + kfree(engine_conf->errloc); + kfree(engine_conf->eccmask); +} + +/** + * nand_ecc_sw_bch_init - Initialize software BCH ECC engine + * @nand: NAND device + * + * Returns: a pointer to a new NAND BCH control structure, or NULL upon failure + * + * Initialize NAND BCH error correction. @nand.ecc parameters 'step_size' and + * 'bytes' are used to compute the following BCH parameters: + * m, the Galois field order + * t, the error correction capability + * 'bytes' should be equal to the number of bytes required to store m * t + * bits, where m is such that 2^m - 1 > step_size * 8. + * + * Example: to configure 4 bit correction per 512 bytes, you should pass + * step_size = 512 (thus, m = 13 is the smallest integer such that 2^m - 1 > 512 * 8) + * bytes = 7 (7 bytes are required to store m * t = 13 * 4 = 52 bits) + */ +static int nand_ecc_sw_bch_init(struct nand_device *nand) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int eccsize = nand->ecc.ctx.conf.step_size; + unsigned int eccbytes = engine_conf->code_size; + unsigned int m, t, i; + unsigned char *erased_page; + int ret; + + m = fls(1 + (8 * eccsize)); + t = (eccbytes * 8) / m; + + engine_conf->bch = bch_init(m, t, 0, false); + if (!engine_conf->bch) + return -EINVAL; + + engine_conf->eccmask = kzalloc(eccbytes, GFP_KERNEL); + engine_conf->errloc = kmalloc_array(t, sizeof(*engine_conf->errloc), + GFP_KERNEL); + if (!engine_conf->eccmask || !engine_conf->errloc) { + ret = -ENOMEM; + goto cleanup; + } + + /* Compute and store the inverted ECC of an erased step */ + erased_page = kmalloc(eccsize, GFP_KERNEL); + if (!erased_page) { + ret = -ENOMEM; + goto cleanup; + } + + memset(erased_page, 0xff, eccsize); + bch_encode(engine_conf->bch, erased_page, eccsize, + engine_conf->eccmask); + kfree(erased_page); + + for (i = 0; i < eccbytes; i++) + engine_conf->eccmask[i] ^= 0xff; + + /* Verify that the number of code bytes has the expected value */ + if (engine_conf->bch->ecc_bytes != eccbytes) { + pr_err("Invalid number of ECC bytes: %u, expected: %u\n", + eccbytes, engine_conf->bch->ecc_bytes); + ret = -EINVAL; + goto cleanup; + } + + /* Sanity checks */ + if (8 * (eccsize + eccbytes) >= (1 << m)) { + pr_err("ECC step size is too large (%u)\n", eccsize); + ret = -EINVAL; + goto cleanup; + } + + return 0; + +cleanup: + nand_ecc_sw_bch_cleanup(nand); + + return ret; +} + +int nand_ecc_sw_bch_init_ctx(struct nand_device *nand) +{ + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + struct nand_ecc_sw_bch_conf *engine_conf; + unsigned int code_size = 0, nsteps; + int ret; + + /* Only large page NAND chips may use BCH */ + if (mtd->oobsize < 64) { + pr_err("BCH cannot be used with small page NAND chips\n"); + return -EINVAL; + } + + if (!mtd->ooblayout) + mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); + + conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + conf->algo = NAND_ECC_ALGO_BCH; + conf->step_size = nand->ecc.user_conf.step_size; + conf->strength = nand->ecc.user_conf.strength; + + /* + * Board driver should supply ECC size and ECC strength + * values to select how many bits are correctable. + * Otherwise, default to 512 bytes for large page devices and 256 for + * small page devices. + */ + if (!conf->step_size) { + if (mtd->oobsize >= 64) + conf->step_size = 512; + else + conf->step_size = 256; + + conf->strength = 4; + } + + nsteps = mtd->writesize / conf->step_size; + + /* Maximize */ + if (nand->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) { + conf->step_size = 1024; + nsteps = mtd->writesize / conf->step_size; + /* Reserve 2 bytes for the BBM */ + code_size = (mtd->oobsize - 2) / nsteps; + conf->strength = code_size * 8 / fls(8 * conf->step_size); + } + + if (!code_size) + code_size = DIV_ROUND_UP(conf->strength * + fls(8 * conf->step_size), 8); + + if (!conf->strength) + conf->strength = (code_size * 8) / fls(8 * conf->step_size); + + if (!code_size && !conf->strength) { + pr_err("Missing ECC parameters\n"); + return -EINVAL; + } + + engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); + if (!engine_conf) + return -ENOMEM; + + ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand); + if (ret) + goto free_engine_conf; + + engine_conf->code_size = code_size; + engine_conf->nsteps = nsteps; + engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + if (!engine_conf->calc_buf || !engine_conf->code_buf) { + ret = -ENOMEM; + goto free_bufs; + } + + nand->ecc.ctx.priv = engine_conf; + nand->ecc.ctx.total = nsteps * code_size; + + ret = nand_ecc_sw_bch_init(nand); + if (ret) + goto free_bufs; + + /* Verify the layout validity */ + if (mtd_ooblayout_count_eccbytes(mtd) != + engine_conf->nsteps * engine_conf->code_size) { + pr_err("Invalid ECC layout\n"); + ret = -EINVAL; + goto cleanup_bch_ctx; + } + + return 0; + +cleanup_bch_ctx: + nand_ecc_sw_bch_cleanup(nand); +free_bufs: + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); +free_engine_conf: + kfree(engine_conf); + + return ret; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_init_ctx); + +void nand_ecc_sw_bch_cleanup_ctx(struct nand_device *nand) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + + if (engine_conf) { + nand_ecc_sw_bch_cleanup(nand); + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); + kfree(engine_conf); + } +} +EXPORT_SYMBOL(nand_ecc_sw_bch_cleanup_ctx); + +static int nand_ecc_sw_bch_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + int total = nand->ecc.ctx.total; + u8 *ecccalc = engine_conf->calc_buf; + const u8 *data; + int i; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; + + nand_ecc_tweak_req(&engine_conf->req_ctx, req); + + /* No more preparation for page read */ + if (req->type == NAND_PAGE_READ) + return 0; + + /* Preparation for page write: derive the ECC bytes and place them */ + for (i = 0, data = req->databuf.out; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]); + + return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out, + 0, total); +} + +static int nand_ecc_sw_bch_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int total = nand->ecc.ctx.total; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + u8 *ecccalc = engine_conf->calc_buf; + u8 *ecccode = engine_conf->code_buf; + unsigned int max_bitflips = 0; + u8 *data = req->databuf.in; + int i, ret; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; + + /* No more preparation for page write */ + if (req->type == NAND_PAGE_WRITE) { + nand_ecc_restore_req(&engine_conf->req_ctx, req); + return 0; + } + + /* Finish a page read: retrieve the (raw) ECC bytes*/ + ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0, + total); + if (ret) + return ret; + + /* Calculate the ECC bytes */ + for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_bch_calculate(nand, data, &ecccalc[i]); + + /* Finish a page read: compare and correct */ + for (eccsteps = engine_conf->nsteps, i = 0, data = req->databuf.in; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) { + int stat = nand_ecc_sw_bch_correct(nand, data, + &ecccode[i], + &ecccalc[i]); + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + nand_ecc_restore_req(&engine_conf->req_ctx, req); + + return max_bitflips; +} + +static struct nand_ecc_engine_ops nand_ecc_sw_bch_engine_ops = { + .init_ctx = nand_ecc_sw_bch_init_ctx, + .cleanup_ctx = nand_ecc_sw_bch_cleanup_ctx, + .prepare_io_req = nand_ecc_sw_bch_prepare_io_req, + .finish_io_req = nand_ecc_sw_bch_finish_io_req, +}; + +static struct nand_ecc_engine nand_ecc_sw_bch_engine = { + .ops = &nand_ecc_sw_bch_engine_ops, +}; + +struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void) +{ + return &nand_ecc_sw_bch_engine; +} +EXPORT_SYMBOL(nand_ecc_sw_bch_get_engine); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); +MODULE_DESCRIPTION("NAND software BCH ECC support"); diff --git a/drivers/mtd/nand/raw/nand_ecc.c b/drivers/mtd/nand/ecc-sw-hamming.c index b6a46b1b7781..6334d1d7735d 100644 --- a/drivers/mtd/nand/raw/nand_ecc.c +++ b/drivers/mtd/nand/ecc-sw-hamming.c @@ -17,9 +17,9 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> +#include <linux/slab.h> #include <asm/byteorder.h> /* @@ -75,7 +75,7 @@ static const char bitsperbyte[256] = { * addressbits is a lookup table to filter out the bits from the xor-ed * ECC data that identify the faulty location. * this is only used for repairing parity - * see the comments in nand_correct_data for more details + * see the comments in nand_ecc_sw_hamming_correct for more details */ static const char addressbits[256] = { 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01, @@ -112,30 +112,21 @@ static const char addressbits[256] = { 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f }; -/** - * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte - * block - * @buf: input buffer with raw data - * @eccsize: data bytes per ECC step (256 or 512) - * @code: output buffer with ECC - * @sm_order: Smart Media byte ordering - */ -void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, - unsigned char *code, bool sm_order) +int ecc_sw_hamming_calculate(const unsigned char *buf, unsigned int step_size, + unsigned char *code, bool sm_order) { + const u32 *bp = (uint32_t *)buf; + const u32 eccsize_mult = (step_size == 256) ? 1 : 2; + /* current value in buffer */ + u32 cur; + /* rp0..rp17 are the various accumulated parities (per byte) */ + u32 rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7, rp8, rp9, rp10, rp11, rp12, + rp13, rp14, rp15, rp16, rp17; + /* Cumulative parity for all data */ + u32 par; + /* Cumulative parity at the end of the loop (rp12, rp14, rp16) */ + u32 tmppar; int i; - const uint32_t *bp = (uint32_t *)buf; - /* 256 or 512 bytes/ecc */ - const uint32_t eccsize_mult = eccsize >> 8; - uint32_t cur; /* current value in buffer */ - /* rp0..rp15..rp17 are the various accumulated parities (per byte) */ - uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7; - uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16; - uint32_t rp17; - uint32_t par; /* the cumulative parity for all data */ - uint32_t tmppar; /* the cumulative parity for this iteration; - for rp12, rp14 and rp16 at the end of the - loop */ par = 0; rp4 = 0; @@ -145,6 +136,7 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, rp12 = 0; rp14 = 0; rp16 = 0; + rp17 = 0; /* * The loop is unrolled a number of times; @@ -356,45 +348,35 @@ void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize, (invparity[par & 0x55] << 2) | (invparity[rp17] << 1) | (invparity[rp16] << 0); + + return 0; } -EXPORT_SYMBOL(__nand_calculate_ecc); +EXPORT_SYMBOL(ecc_sw_hamming_calculate); /** - * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte - * block - * @chip: NAND chip object - * @buf: input buffer with raw data - * @code: output buffer with ECC + * nand_ecc_sw_hamming_calculate - Calculate 3-byte ECC for 256/512-byte block + * @nand: NAND device + * @buf: Input buffer with raw data + * @code: Output buffer with ECC */ -int nand_calculate_ecc(struct nand_chip *chip, const unsigned char *buf, - unsigned char *code) +int nand_ecc_sw_hamming_calculate(struct nand_device *nand, + const unsigned char *buf, unsigned char *code) { - bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER; + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int step_size = nand->ecc.ctx.conf.step_size; - __nand_calculate_ecc(buf, chip->ecc.size, code, sm_order); - - return 0; + return ecc_sw_hamming_calculate(buf, step_size, code, + engine_conf->sm_order); } -EXPORT_SYMBOL(nand_calculate_ecc); +EXPORT_SYMBOL(nand_ecc_sw_hamming_calculate); -/** - * __nand_correct_data - [NAND Interface] Detect and correct bit error(s) - * @buf: raw data read from the chip - * @read_ecc: ECC from the chip - * @calc_ecc: the ECC calculated from raw data - * @eccsize: data bytes per ECC step (256 or 512) - * @sm_order: Smart Media byte order - * - * Detect and correct a 1 bit error for eccsize byte block - */ -int __nand_correct_data(unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc, - unsigned int eccsize, bool sm_order) +int ecc_sw_hamming_correct(unsigned char *buf, unsigned char *read_ecc, + unsigned char *calc_ecc, unsigned int step_size, + bool sm_order) { + const u32 eccsize_mult = step_size >> 8; unsigned char b0, b1, b2, bit_addr; unsigned int byte_addr; - /* 256 or 512 bytes/ecc */ - const uint32_t eccsize_mult = eccsize >> 8; /* * b0 to b2 indicate which bit is faulty (if any) @@ -458,27 +440,220 @@ int __nand_correct_data(unsigned char *buf, pr_err("%s: uncorrectable ECC error\n", __func__); return -EBADMSG; } -EXPORT_SYMBOL(__nand_correct_data); +EXPORT_SYMBOL(ecc_sw_hamming_correct); /** - * nand_correct_data - [NAND Interface] Detect and correct bit error(s) - * @chip: NAND chip object - * @buf: raw data read from the chip - * @read_ecc: ECC from the chip - * @calc_ecc: the ECC calculated from raw data + * nand_ecc_sw_hamming_correct - Detect and correct bit error(s) + * @nand: NAND device + * @buf: Raw data read from the chip + * @read_ecc: ECC bytes read from the chip + * @calc_ecc: ECC calculated from the raw data * - * Detect and correct a 1 bit error for 256/512 byte block + * Detect and correct up to 1 bit error per 256/512-byte block. */ -int nand_correct_data(struct nand_chip *chip, unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc) +int nand_ecc_sw_hamming_correct(struct nand_device *nand, unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc) +{ + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + unsigned int step_size = nand->ecc.ctx.conf.step_size; + + return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc, step_size, + engine_conf->sm_order); +} +EXPORT_SYMBOL(nand_ecc_sw_hamming_correct); + +int nand_ecc_sw_hamming_init_ctx(struct nand_device *nand) +{ + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct nand_ecc_sw_hamming_conf *engine_conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int ret; + + if (!mtd->ooblayout) { + switch (mtd->oobsize) { + case 8: + case 16: + mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout()); + break; + case 64: + case 128: + mtd_set_ooblayout(mtd, + nand_get_large_page_hamming_ooblayout()); + break; + default: + return -ENOTSUPP; + } + } + + conf->engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + conf->algo = NAND_ECC_ALGO_HAMMING; + conf->step_size = nand->ecc.user_conf.step_size; + conf->strength = 1; + + /* Use the strongest configuration by default */ + if (conf->step_size != 256 && conf->step_size != 512) + conf->step_size = 256; + + engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); + if (!engine_conf) + return -ENOMEM; + + ret = nand_ecc_init_req_tweaking(&engine_conf->req_ctx, nand); + if (ret) + goto free_engine_conf; + + engine_conf->code_size = 3; + engine_conf->nsteps = mtd->writesize / conf->step_size; + engine_conf->calc_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + engine_conf->code_buf = kzalloc(mtd->oobsize, GFP_KERNEL); + if (!engine_conf->calc_buf || !engine_conf->code_buf) { + ret = -ENOMEM; + goto free_bufs; + } + + nand->ecc.ctx.priv = engine_conf; + nand->ecc.ctx.total = engine_conf->nsteps * engine_conf->code_size; + + return 0; + +free_bufs: + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); +free_engine_conf: + kfree(engine_conf); + + return ret; +} +EXPORT_SYMBOL(nand_ecc_sw_hamming_init_ctx); + +void nand_ecc_sw_hamming_cleanup_ctx(struct nand_device *nand) +{ + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + + if (engine_conf) { + nand_ecc_cleanup_req_tweaking(&engine_conf->req_ctx); + kfree(engine_conf->calc_buf); + kfree(engine_conf->code_buf); + kfree(engine_conf); + } +} +EXPORT_SYMBOL(nand_ecc_sw_hamming_cleanup_ctx); + +static int nand_ecc_sw_hamming_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) { - bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER; + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + int total = nand->ecc.ctx.total; + u8 *ecccalc = engine_conf->calc_buf; + const u8 *data; + int i; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; - return __nand_correct_data(buf, read_ecc, calc_ecc, chip->ecc.size, - sm_order); + nand_ecc_tweak_req(&engine_conf->req_ctx, req); + + /* No more preparation for page read */ + if (req->type == NAND_PAGE_READ) + return 0; + + /* Preparation for page write: derive the ECC bytes and place them */ + for (i = 0, data = req->databuf.out; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]); + + return mtd_ooblayout_set_eccbytes(mtd, ecccalc, (void *)req->oobbuf.out, + 0, total); +} + +static int nand_ecc_sw_hamming_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct nand_ecc_sw_hamming_conf *engine_conf = nand->ecc.ctx.priv; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int eccsize = nand->ecc.ctx.conf.step_size; + int total = nand->ecc.ctx.total; + int eccbytes = engine_conf->code_size; + int eccsteps = engine_conf->nsteps; + u8 *ecccalc = engine_conf->calc_buf; + u8 *ecccode = engine_conf->code_buf; + unsigned int max_bitflips = 0; + u8 *data = req->databuf.in; + int i, ret; + + /* Nothing to do for a raw operation */ + if (req->mode == MTD_OPS_RAW) + return 0; + + /* This engine does not provide BBM/free OOB bytes protection */ + if (!req->datalen) + return 0; + + /* No more preparation for page write */ + if (req->type == NAND_PAGE_WRITE) { + nand_ecc_restore_req(&engine_conf->req_ctx, req); + return 0; + } + + /* Finish a page read: retrieve the (raw) ECC bytes*/ + ret = mtd_ooblayout_get_eccbytes(mtd, ecccode, req->oobbuf.in, 0, + total); + if (ret) + return ret; + + /* Calculate the ECC bytes */ + for (i = 0; eccsteps; eccsteps--, i += eccbytes, data += eccsize) + nand_ecc_sw_hamming_calculate(nand, data, &ecccalc[i]); + + /* Finish a page read: compare and correct */ + for (eccsteps = engine_conf->nsteps, i = 0, data = req->databuf.in; + eccsteps; + eccsteps--, i += eccbytes, data += eccsize) { + int stat = nand_ecc_sw_hamming_correct(nand, data, + &ecccode[i], + &ecccalc[i]); + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + nand_ecc_restore_req(&engine_conf->req_ctx, req); + + return max_bitflips; +} + +static struct nand_ecc_engine_ops nand_ecc_sw_hamming_engine_ops = { + .init_ctx = nand_ecc_sw_hamming_init_ctx, + .cleanup_ctx = nand_ecc_sw_hamming_cleanup_ctx, + .prepare_io_req = nand_ecc_sw_hamming_prepare_io_req, + .finish_io_req = nand_ecc_sw_hamming_finish_io_req, +}; + +static struct nand_ecc_engine nand_ecc_sw_hamming_engine = { + .ops = &nand_ecc_sw_hamming_engine_ops, +}; + +struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void) +{ + return &nand_ecc_sw_hamming_engine; } -EXPORT_SYMBOL(nand_correct_data); +EXPORT_SYMBOL(nand_ecc_sw_hamming_get_engine); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>"); -MODULE_DESCRIPTION("Generic NAND ECC support"); +MODULE_DESCRIPTION("NAND software Hamming ECC support"); diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c index 4a56e6c0da67..6c43dfda01d4 100644 --- a/drivers/mtd/nand/ecc.c +++ b/drivers/mtd/nand/ecc.c @@ -95,6 +95,7 @@ #include <linux/module.h> #include <linux/mtd/nand.h> +#include <linux/slab.h> /** * nand_ecc_init_ctx - Init the ECC engine context @@ -104,7 +105,7 @@ */ int nand_ecc_init_ctx(struct nand_device *nand) { - if (!nand->ecc.engine->ops->init_ctx) + if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx) return 0; return nand->ecc.engine->ops->init_ctx(nand); @@ -117,7 +118,7 @@ EXPORT_SYMBOL(nand_ecc_init_ctx); */ void nand_ecc_cleanup_ctx(struct nand_device *nand) { - if (nand->ecc.engine->ops->cleanup_ctx) + if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx) nand->ecc.engine->ops->cleanup_ctx(nand); } EXPORT_SYMBOL(nand_ecc_cleanup_ctx); @@ -130,7 +131,7 @@ EXPORT_SYMBOL(nand_ecc_cleanup_ctx); int nand_ecc_prepare_io_req(struct nand_device *nand, struct nand_page_io_req *req) { - if (!nand->ecc.engine->ops->prepare_io_req) + if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req) return 0; return nand->ecc.engine->ops->prepare_io_req(nand, req); @@ -145,7 +146,7 @@ EXPORT_SYMBOL(nand_ecc_prepare_io_req); int nand_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req) { - if (!nand->ecc.engine->ops->finish_io_req) + if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req) return 0; return nand->ecc.engine->ops->finish_io_req(nand, req); @@ -479,6 +480,137 @@ bool nand_ecc_is_strong_enough(struct nand_device *nand) } EXPORT_SYMBOL(nand_ecc_is_strong_enough); +/* ECC engine driver internal helpers */ +int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_device *nand) +{ + unsigned int total_buffer_size; + + ctx->nand = nand; + + /* Let the user decide the exact length of each buffer */ + if (!ctx->page_buffer_size) + ctx->page_buffer_size = nanddev_page_size(nand); + if (!ctx->oob_buffer_size) + ctx->oob_buffer_size = nanddev_per_page_oobsize(nand); + + total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size; + + ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL); + if (!ctx->spare_databuf) + return -ENOMEM; + + ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size; + + return 0; +} +EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking); + +void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx) +{ + kfree(ctx->spare_databuf); +} +EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking); + +/* + * Ensure data and OOB area is fully read/written otherwise the correction might + * not work as expected. + */ +void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req) +{ + struct nand_device *nand = ctx->nand; + struct nand_page_io_req *orig, *tweak; + + /* Save the original request */ + ctx->orig_req = *req; + ctx->bounce_data = false; + ctx->bounce_oob = false; + orig = &ctx->orig_req; + tweak = req; + + /* Ensure the request covers the entire page */ + if (orig->datalen < nanddev_page_size(nand)) { + ctx->bounce_data = true; + tweak->dataoffs = 0; + tweak->datalen = nanddev_page_size(nand); + tweak->databuf.in = ctx->spare_databuf; + memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size); + } + + if (orig->ooblen < nanddev_per_page_oobsize(nand)) { + ctx->bounce_oob = true; + tweak->ooboffs = 0; + tweak->ooblen = nanddev_per_page_oobsize(nand); + tweak->oobbuf.in = ctx->spare_oobbuf; + memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size); + } + + /* Copy the data that must be writen in the bounce buffers, if needed */ + if (orig->type == NAND_PAGE_WRITE) { + if (ctx->bounce_data) + memcpy((void *)tweak->databuf.out + orig->dataoffs, + orig->databuf.out, orig->datalen); + + if (ctx->bounce_oob) + memcpy((void *)tweak->oobbuf.out + orig->ooboffs, + orig->oobbuf.out, orig->ooblen); + } +} +EXPORT_SYMBOL_GPL(nand_ecc_tweak_req); + +void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req) +{ + struct nand_page_io_req *orig, *tweak; + + orig = &ctx->orig_req; + tweak = req; + + /* Restore the data read from the bounce buffers, if needed */ + if (orig->type == NAND_PAGE_READ) { + if (ctx->bounce_data) + memcpy(orig->databuf.in, + tweak->databuf.in + orig->dataoffs, + orig->datalen); + + if (ctx->bounce_oob) + memcpy(orig->oobbuf.in, + tweak->oobbuf.in + orig->ooboffs, + orig->ooblen); + } + + /* Ensure the original request is restored */ + *req = *orig; +} +EXPORT_SYMBOL_GPL(nand_ecc_restore_req); + +struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand) +{ + unsigned int algo = nand->ecc.user_conf.algo; + + if (algo == NAND_ECC_ALGO_UNKNOWN) + algo = nand->ecc.defaults.algo; + + switch (algo) { + case NAND_ECC_ALGO_HAMMING: + return nand_ecc_sw_hamming_get_engine(); + case NAND_ECC_ALGO_BCH: + return nand_ecc_sw_bch_get_engine(); + default: + break; + } + + return NULL; +} +EXPORT_SYMBOL(nand_ecc_get_sw_engine); + +struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand) +{ + return nand->ecc.ondie_engine; +} +EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>"); MODULE_DESCRIPTION("Generic ECC engine"); diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index 188b8061e1f7..a9fdea26ea46 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -132,7 +132,7 @@ static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = { .free = onenand_ooblayout_128_free, }; -/** +/* * onenand_oob_32_64 - oob info for large (2KB) page */ static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section, @@ -192,7 +192,7 @@ static const unsigned char ffchars[] = { /** * onenand_readw - [OneNAND Interface] Read OneNAND register - * @param addr address to read + * @addr: address to read * * Read OneNAND register */ @@ -203,8 +203,8 @@ static unsigned short onenand_readw(void __iomem *addr) /** * onenand_writew - [OneNAND Interface] Write OneNAND register with value - * @param value value to write - * @param addr address to write + * @value: value to write + * @addr: address to write * * Write OneNAND register with value */ @@ -215,8 +215,8 @@ static void onenand_writew(unsigned short value, void __iomem *addr) /** * onenand_block_address - [DEFAULT] Get block address - * @param this onenand chip data structure - * @param block the block + * @this: onenand chip data structure + * @block: the block * @return translated block address if DDP, otherwise same * * Setup Start Address 1 Register (F100h) @@ -232,8 +232,8 @@ static int onenand_block_address(struct onenand_chip *this, int block) /** * onenand_bufferram_address - [DEFAULT] Get bufferram address - * @param this onenand chip data structure - * @param block the block + * @this: onenand chip data structure + * @block: the block * @return set DBS value if DDP, otherwise 0 * * Setup Start Address 2 Register (F101h) for DDP @@ -249,8 +249,8 @@ static int onenand_bufferram_address(struct onenand_chip *this, int block) /** * onenand_page_address - [DEFAULT] Get page address - * @param page the page address - * @param sector the sector address + * @page: the page address + * @sector: the sector address * @return combined page and sector address * * Setup Start Address 8 Register (F107h) @@ -268,10 +268,10 @@ static int onenand_page_address(int page, int sector) /** * onenand_buffer_address - [DEFAULT] Get buffer address - * @param dataram1 DataRAM index - * @param sectors the sector address - * @param count the number of sectors - * @return the start buffer value + * @dataram1: DataRAM index + * @sectors: the sector address + * @count: the number of sectors + * Return: the start buffer value * * Setup Start Buffer Register (F200h) */ @@ -295,8 +295,8 @@ static int onenand_buffer_address(int dataram1, int sectors, int count) /** * flexonenand_block- For given address return block number - * @param this - OneNAND device structure - * @param addr - Address for which block number is needed + * @this: - OneNAND device structure + * @addr: - Address for which block number is needed */ static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr) { @@ -359,7 +359,7 @@ EXPORT_SYMBOL(onenand_addr); /** * onenand_get_density - [DEFAULT] Get OneNAND density - * @param dev_id OneNAND device ID + * @dev_id: OneNAND device ID * * Get OneNAND density from device ID */ @@ -371,8 +371,8 @@ static inline int onenand_get_density(int dev_id) /** * flexonenand_region - [Flex-OneNAND] Return erase region of addr - * @param mtd MTD device structure - * @param addr address whose erase region needs to be identified + * @mtd: MTD device structure + * @addr: address whose erase region needs to be identified */ int flexonenand_region(struct mtd_info *mtd, loff_t addr) { @@ -387,10 +387,10 @@ EXPORT_SYMBOL(flexonenand_region); /** * onenand_command - [DEFAULT] Send command to OneNAND device - * @param mtd MTD device structure - * @param cmd the command to be sent - * @param addr offset to read from or write to - * @param len number of bytes to read or write + * @mtd: MTD device structure + * @cmd: the command to be sent + * @addr: offset to read from or write to + * @len: number of bytes to read or write * * Send command to OneNAND device. This function is used for middle/large page * devices (1KB/2KB Bytes per page) @@ -519,7 +519,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le /** * onenand_read_ecc - return ecc status - * @param this onenand chip structure + * @this: onenand chip structure */ static inline int onenand_read_ecc(struct onenand_chip *this) { @@ -543,8 +543,8 @@ static inline int onenand_read_ecc(struct onenand_chip *this) /** * onenand_wait - [DEFAULT] wait until the command is done - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Wait for command done. This applies to all OneNAND command * Read can take up to 30us, erase up to 2ms and program up to 350us @@ -625,8 +625,8 @@ static int onenand_wait(struct mtd_info *mtd, int state) /* * onenand_interrupt - [DEFAULT] onenand interrupt handler - * @param irq onenand interrupt number - * @param dev_id interrupt data + * @irq: onenand interrupt number + * @dev_id: interrupt data * * complete the work */ @@ -643,8 +643,8 @@ static irqreturn_t onenand_interrupt(int irq, void *data) /* * onenand_interrupt_wait - [DEFAULT] wait until the command is done - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Wait for command done. */ @@ -659,8 +659,8 @@ static int onenand_interrupt_wait(struct mtd_info *mtd, int state) /* * onenand_try_interrupt_wait - [DEFAULT] try interrupt wait - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Try interrupt based wait (It is used one-time) */ @@ -689,7 +689,7 @@ static int onenand_try_interrupt_wait(struct mtd_info *mtd, int state) /* * onenand_setup_wait - [OneNAND Interface] setup onenand wait method - * @param mtd MTD device structure + * @mtd: MTD device structure * * There's two method to wait onenand work * 1. polling - read interrupt status register @@ -724,8 +724,8 @@ static void onenand_setup_wait(struct mtd_info *mtd) /** * onenand_bufferram_offset - [DEFAULT] BufferRAM offset - * @param mtd MTD data structure - * @param area BufferRAM area + * @mtd: MTD data structure + * @area: BufferRAM area * @return offset given area * * Return BufferRAM offset given area @@ -747,11 +747,11 @@ static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area) /** * onenand_read_bufferram - [OneNAND Interface] Read the bufferram area - * @param mtd MTD data structure - * @param area BufferRAM area - * @param buffer the databuffer to put/get data - * @param offset offset to read from or write to - * @param count number of bytes to read/write + * @mtd: MTD data structure + * @area: BufferRAM area + * @buffer: the databuffer to put/get data + * @offset: offset to read from or write to + * @count: number of bytes to read/write * * Read the BufferRAM area */ @@ -783,11 +783,11 @@ static int onenand_read_bufferram(struct mtd_info *mtd, int area, /** * onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode - * @param mtd MTD data structure - * @param area BufferRAM area - * @param buffer the databuffer to put/get data - * @param offset offset to read from or write to - * @param count number of bytes to read/write + * @mtd: MTD data structure + * @area: BufferRAM area + * @buffer: the databuffer to put/get data + * @offset: offset to read from or write to + * @count: number of bytes to read/write * * Read the BufferRAM area with Sync. Burst Mode */ @@ -823,11 +823,11 @@ static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area, /** * onenand_write_bufferram - [OneNAND Interface] Write the bufferram area - * @param mtd MTD data structure - * @param area BufferRAM area - * @param buffer the databuffer to put/get data - * @param offset offset to read from or write to - * @param count number of bytes to read/write + * @mtd: MTD data structure + * @area: BufferRAM area + * @buffer: the databuffer to put/get data + * @offset: offset to read from or write to + * @count: number of bytes to read/write * * Write the BufferRAM area */ @@ -864,8 +864,8 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area, /** * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode - * @param mtd MTD data structure - * @param addr address to check + * @mtd: MTD data structure + * @addr: address to check * @return blockpage address * * Get blockpage address at 2x program mode @@ -888,8 +888,8 @@ static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr) /** * onenand_check_bufferram - [GENERIC] Check BufferRAM information - * @param mtd MTD data structure - * @param addr address to check + * @mtd: MTD data structure + * @addr: address to check * @return 1 if there are valid data, otherwise 0 * * Check bufferram if there is data we required @@ -930,9 +930,9 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr) /** * onenand_update_bufferram - [GENERIC] Update BufferRAM information - * @param mtd MTD data structure - * @param addr address to update - * @param valid valid flag + * @mtd: MTD data structure + * @addr: address to update + * @valid: valid flag * * Update BufferRAM information */ @@ -963,9 +963,9 @@ static void onenand_update_bufferram(struct mtd_info *mtd, loff_t addr, /** * onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information - * @param mtd MTD data structure - * @param addr start address to invalidate - * @param len length to invalidate + * @mtd: MTD data structure + * @addr: start address to invalidate + * @len: length to invalidate * * Invalidate BufferRAM information */ @@ -986,8 +986,8 @@ static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr, /** * onenand_get_device - [GENERIC] Get chip for selected access - * @param mtd MTD device structure - * @param new_state the state which is requested + * @mtd: MTD device structure + * @new_state: the state which is requested * * Get the device and lock it for exclusive access */ @@ -1024,7 +1024,7 @@ static int onenand_get_device(struct mtd_info *mtd, int new_state) /** * onenand_release_device - [GENERIC] release chip - * @param mtd MTD device structure + * @mtd: MTD device structure * * Deselect, release chip lock and wake up anyone waiting on the device */ @@ -1043,10 +1043,10 @@ static void onenand_release_device(struct mtd_info *mtd) /** * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer - * @param mtd MTD device structure - * @param buf destination address - * @param column oob offset to read from - * @param thislen oob length to read + * @mtd: MTD device structure + * @buf: destination address + * @column: oob offset to read from + * @thislen: oob length to read */ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int column, int thislen) @@ -1061,9 +1061,9 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col /** * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data - * @param mtd MTD device structure - * @param addr address to recover - * @param status return value from onenand_wait / onenand_bbt_wait + * @mtd: MTD device structure + * @addr: address to recover + * @status: return value from onenand_wait / onenand_bbt_wait * * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has * lower page address and MSB page has higher page address in paired pages. @@ -1104,9 +1104,9 @@ static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status) /** * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band - * @param mtd MTD device structure - * @param from offset to read from - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram. * So, read-while-load is not present. @@ -1206,9 +1206,9 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, /** * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band - * @param mtd MTD device structure - * @param from offset to read from - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * OneNAND read main and/or out-of-band data */ @@ -1335,9 +1335,9 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, /** * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band - * @param mtd MTD device structure - * @param from offset to read from - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * OneNAND read out-of-band data from the spare area */ @@ -1430,10 +1430,10 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, /** * onenand_read_oob - [MTD Interface] Read main and/or out-of-band - * @param mtd: MTD device structure - * @param from: offset to read from - * @param ops: oob operation description structure - + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure + * * Read main and/or out-of-band */ static int onenand_read_oob(struct mtd_info *mtd, loff_t from, @@ -1466,8 +1466,8 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from, /** * onenand_bbt_wait - [DEFAULT] wait until the command is done - * @param mtd MTD device structure - * @param state state to select the max. timeout value + * @mtd: MTD device structure + * @state: state to select the max. timeout value * * Wait for command done. */ @@ -1517,9 +1517,9 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state) /** * onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan - * @param mtd MTD device structure - * @param from offset to read from - * @param ops oob operation description structure + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure * * OneNAND read out-of-band data from the spare area for bbt scan */ @@ -1594,9 +1594,9 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, #ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE /** * onenand_verify_oob - [GENERIC] verify the oob contents after a write - * @param mtd MTD device structure - * @param buf the databuffer to verify - * @param to offset to read from + * @mtd: MTD device structure + * @buf: the databuffer to verify + * @to: offset to read from */ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) { @@ -1622,10 +1622,10 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to /** * onenand_verify - [GENERIC] verify the chip contents after a write - * @param mtd MTD device structure - * @param buf the databuffer to verify - * @param addr offset to read from - * @param len number of bytes to read and compare + * @mtd: MTD device structure + * @buf: the databuffer to verify + * @addr: offset to read from + * @len: number of bytes to read and compare */ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len) { @@ -1684,11 +1684,11 @@ static void onenand_panic_wait(struct mtd_info *mtd) /** * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context - * @param mtd MTD device structure - * @param to offset to write to - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of written bytes - * @param buf the data to write + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write * * Write with ECC */ @@ -1762,11 +1762,11 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, /** * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer - * @param mtd MTD device structure - * @param oob_buf oob buffer - * @param buf source address - * @param column oob offset to write to - * @param thislen oob length to write + * @mtd: MTD device structure + * @oob_buf: oob buffer + * @buf: source address + * @column: oob offset to write to + * @thislen: oob length to write */ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf, const u_char *buf, int column, int thislen) @@ -1776,9 +1776,9 @@ static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf, /** * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band - * @param mtd MTD device structure - * @param to offset to write to - * @param ops oob operation description structure + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure * * Write main and/or oob with ECC */ @@ -1957,12 +1957,9 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, /** * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band - * @param mtd MTD device structure - * @param to offset to write to - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of written bytes - * @param buf the data to write - * @param mode operation mode + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure * * OneNAND write out-of-band */ @@ -2070,9 +2067,9 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, /** * onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band - * @param mtd: MTD device structure - * @param to: offset to write - * @param ops: oob operation description structure + * @mtd: MTD device structure + * @to: offset to write + * @ops: oob operation description structure */ static int onenand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) @@ -2101,9 +2098,9 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to, /** * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad - * @param mtd MTD device structure - * @param ofs offset from device start - * @param allowbbt 1, if its allowed to access the bbt area + * @mtd: MTD device structure + * @ofs: offset from device start + * @allowbbt: 1, if its allowed to access the bbt area * * Check, if the block is bad. Either by reading the bad block table or * calling of the scan function. @@ -2144,9 +2141,9 @@ static int onenand_multiblock_erase_verify(struct mtd_info *mtd, /** * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase - * @param mtd MTD device structure - * @param instr erase instruction - * @param region erase region + * @mtd: MTD device structure + * @instr: erase instruction + * @block_size: block size * * Erase one or more blocks up to 64 block at a time */ @@ -2254,10 +2251,10 @@ static int onenand_multiblock_erase(struct mtd_info *mtd, /** * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase - * @param mtd MTD device structure - * @param instr erase instruction - * @param region erase region - * @param block_size erase block size + * @mtd: MTD device structure + * @instr: erase instruction + * @region: erase region + * @block_size: erase block size * * Erase one or more blocks one block at a time */ @@ -2326,8 +2323,8 @@ static int onenand_block_by_block_erase(struct mtd_info *mtd, /** * onenand_erase - [MTD Interface] erase block(s) - * @param mtd MTD device structure - * @param instr erase instruction + * @mtd: MTD device structure + * @instr: erase instruction * * Erase one or more blocks */ @@ -2391,7 +2388,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) /** * onenand_sync - [MTD Interface] sync - * @param mtd MTD device structure + * @mtd: MTD device structure * * Sync is actually a wait for chip ready function */ @@ -2408,8 +2405,8 @@ static void onenand_sync(struct mtd_info *mtd) /** * onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad - * @param mtd MTD device structure - * @param ofs offset relative to mtd start + * @mtd: MTD device structure + * @ofs: offset relative to mtd start * * Check whether the block is bad */ @@ -2425,8 +2422,8 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs) /** * onenand_default_block_markbad - [DEFAULT] mark a block bad - * @param mtd MTD device structure - * @param ofs offset from device start + * @mtd: MTD device structure + * @ofs: offset from device start * * This is the default implementation, which can be overridden by * a hardware specific driver. @@ -2460,8 +2457,8 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) /** * onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad - * @param mtd MTD device structure - * @param ofs offset relative to mtd start + * @mtd: MTD device structure + * @ofs: offset relative to mtd start * * Mark the block as bad */ @@ -2486,10 +2483,10 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) /** * onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s) - * @param mtd MTD device structure - * @param ofs offset relative to mtd start - * @param len number of bytes to lock or unlock - * @param cmd lock or unlock command + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + * @len: number of bytes to lock or unlock + * @cmd: lock or unlock command * * Lock or unlock one or more blocks */ @@ -2566,9 +2563,9 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int /** * onenand_lock - [MTD Interface] Lock block(s) - * @param mtd MTD device structure - * @param ofs offset relative to mtd start - * @param len number of bytes to unlock + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + * @len: number of bytes to unlock * * Lock one or more blocks */ @@ -2584,9 +2581,9 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) /** * onenand_unlock - [MTD Interface] Unlock block(s) - * @param mtd MTD device structure - * @param ofs offset relative to mtd start - * @param len number of bytes to unlock + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + * @len: number of bytes to unlock * * Unlock one or more blocks */ @@ -2602,7 +2599,7 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) /** * onenand_check_lock_status - [OneNAND Interface] Check lock status - * @param this onenand chip data structure + * @this: onenand chip data structure * * Check lock status */ @@ -2636,7 +2633,7 @@ static int onenand_check_lock_status(struct onenand_chip *this) /** * onenand_unlock_all - [OneNAND Interface] unlock all blocks - * @param mtd MTD device structure + * @mtd: MTD device structure * * Unlock all blocks */ @@ -2683,10 +2680,10 @@ static void onenand_unlock_all(struct mtd_info *mtd) /** * onenand_otp_command - Send OTP specific command to OneNAND device - * @param mtd MTD device structure - * @param cmd the command to be sent - * @param addr offset to read from or write to - * @param len number of bytes to read or write + * @mtd: MTD device structure + * @cmd: the command to be sent + * @addr: offset to read from or write to + * @len: number of bytes to read or write */ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) @@ -2758,11 +2755,9 @@ static int onenand_otp_command(struct mtd_info *mtd, int cmd, loff_t addr, /** * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP - * @param mtd MTD device structure - * @param to offset to write to - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of written bytes - * @param buf the data to write + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure * * OneNAND write out-of-band only for OTP */ @@ -2889,11 +2884,11 @@ typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, /** * do_otp_read - [DEFAULT] Read OTP block area - * @param mtd MTD device structure - * @param from The offset to read - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of readbytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to read + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of readbytes + * @buf: the databuffer to put/get data * * Read OTP block area. */ @@ -2926,11 +2921,11 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, /** * do_otp_write - [DEFAULT] Write OTP block area - * @param mtd MTD device structure - * @param to The offset to write - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of write bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @to: The offset to write + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of write bytes + * @buf: the databuffer to put/get data * * Write OTP block area. */ @@ -2970,11 +2965,11 @@ static int do_otp_write(struct mtd_info *mtd, loff_t to, size_t len, /** * do_otp_lock - [DEFAULT] Lock OTP block area - * @param mtd MTD device structure - * @param from The offset to lock - * @param len number of bytes to lock - * @param retlen pointer to variable to store the number of lock bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to lock + * @len: number of bytes to lock + * @retlen: pointer to variable to store the number of lock bytes + * @buf: the databuffer to put/get data * * Lock OTP block area. */ @@ -3018,13 +3013,13 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len, /** * onenand_otp_walk - [DEFAULT] Handle OTP operation - * @param mtd MTD device structure - * @param from The offset to read/write - * @param len number of bytes to read/write - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data - * @param action do given action - * @param mode specify user and factory + * @mtd: MTD device structure + * @from: The offset to read/write + * @len: number of bytes to read/write + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data + * @action: do given action + * @mode: specify user and factory * * Handle OTP operation. */ @@ -3099,10 +3094,10 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, /** * onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info - * @param mtd MTD device structure - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data * * Read factory OTP info. */ @@ -3115,11 +3110,11 @@ static int onenand_get_fact_prot_info(struct mtd_info *mtd, size_t len, /** * onenand_read_fact_prot_reg - [MTD Interface] Read factory OTP area - * @param mtd MTD device structure - * @param from The offset to read - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to read + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data * * Read factory OTP area. */ @@ -3131,10 +3126,10 @@ static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_get_user_prot_info - [MTD Interface] Read user OTP info - * @param mtd MTD device structure - * @param retlen pointer to variable to store the number of read bytes - * @param len number of bytes to read - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @retlen: pointer to variable to store the number of read bytes + * @len: number of bytes to read + * @buf: the databuffer to put/get data * * Read user OTP info. */ @@ -3147,11 +3142,11 @@ static int onenand_get_user_prot_info(struct mtd_info *mtd, size_t len, /** * onenand_read_user_prot_reg - [MTD Interface] Read user OTP area - * @param mtd MTD device structure - * @param from The offset to read - * @param len number of bytes to read - * @param retlen pointer to variable to store the number of read bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to read + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put/get data * * Read user OTP area. */ @@ -3163,11 +3158,11 @@ static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_write_user_prot_reg - [MTD Interface] Write user OTP area - * @param mtd MTD device structure - * @param from The offset to write - * @param len number of bytes to write - * @param retlen pointer to variable to store the number of write bytes - * @param buf the databuffer to put/get data + * @mtd: MTD device structure + * @from: The offset to write + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of write bytes + * @buf: the databuffer to put/get data * * Write user OTP area. */ @@ -3179,9 +3174,9 @@ static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_lock_user_prot_reg - [MTD Interface] Lock user OTP area - * @param mtd MTD device structure - * @param from The offset to lock - * @param len number of bytes to unlock + * @mtd: MTD device structure + * @from: The offset to lock + * @len: number of bytes to unlock * * Write lock mark on spare area in page 0 in OTP block */ @@ -3234,7 +3229,7 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, /** * onenand_check_features - Check and set OneNAND features - * @param mtd MTD data structure + * @mtd: MTD data structure * * Check and set OneNAND features * - lock scheme @@ -3324,8 +3319,8 @@ static void onenand_check_features(struct mtd_info *mtd) /** * onenand_print_device_info - Print device & version ID - * @param device device ID - * @param version version ID + * @device: device ID + * @version: version ID * * Print device & version ID */ @@ -3355,7 +3350,7 @@ static const struct onenand_manufacturers onenand_manuf_ids[] = { /** * onenand_check_maf - Check manufacturer ID - * @param manuf manufacturer ID + * @manuf: manufacturer ID * * Check manufacturer ID */ @@ -3380,9 +3375,9 @@ static int onenand_check_maf(int manuf) } /** -* flexonenand_get_boundary - Reads the SLC boundary -* @param onenand_info - onenand info structure -**/ + * flexonenand_get_boundary - Reads the SLC boundary + * @mtd: MTD data structure + */ static int flexonenand_get_boundary(struct mtd_info *mtd) { struct onenand_chip *this = mtd->priv; @@ -3422,7 +3417,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd) /** * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info * boundary[], diesize[], mtd->size, mtd->erasesize - * @param mtd - MTD device structure + * @mtd: - MTD device structure */ static void flexonenand_get_size(struct mtd_info *mtd) { @@ -3493,9 +3488,9 @@ static void flexonenand_get_size(struct mtd_info *mtd) /** * flexonenand_check_blocks_erased - Check if blocks are erased - * @param mtd_info - mtd info structure - * @param start - first erase block to check - * @param end - last erase block to check + * @mtd: mtd info structure + * @start: first erase block to check + * @end: last erase block to check * * Converting an unerased block from MLC to SLC * causes byte values to change. Since both data and its ECC @@ -3548,9 +3543,8 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int return 0; } -/** +/* * flexonenand_set_boundary - Writes the SLC boundary - * @param mtd - mtd info structure */ static int flexonenand_set_boundary(struct mtd_info *mtd, int die, int boundary, int lock) @@ -3640,7 +3634,7 @@ out: /** * onenand_chip_probe - [OneNAND Interface] The generic chip probe - * @param mtd MTD device structure + * @mtd: MTD device structure * * OneNAND detection method: * Compare the values from command with ones from register @@ -3688,7 +3682,7 @@ static int onenand_chip_probe(struct mtd_info *mtd) /** * onenand_probe - [OneNAND Interface] Probe the OneNAND device - * @param mtd MTD device structure + * @mtd: MTD device structure */ static int onenand_probe(struct mtd_info *mtd) { @@ -3783,7 +3777,7 @@ static int onenand_probe(struct mtd_info *mtd) /** * onenand_suspend - [MTD Interface] Suspend the OneNAND flash - * @param mtd MTD device structure + * @mtd: MTD device structure */ static int onenand_suspend(struct mtd_info *mtd) { @@ -3792,7 +3786,7 @@ static int onenand_suspend(struct mtd_info *mtd) /** * onenand_resume - [MTD Interface] Resume the OneNAND flash - * @param mtd MTD device structure + * @mtd: MTD device structure */ static void onenand_resume(struct mtd_info *mtd) { @@ -3807,8 +3801,8 @@ static void onenand_resume(struct mtd_info *mtd) /** * onenand_scan - [OneNAND Interface] Scan for the OneNAND device - * @param mtd MTD device structure - * @param maxchips Number of chips to scan for + * @mtd: MTD device structure + * @maxchips: Number of chips to scan for * * This fills out all the not initialized function pointers * with the defaults. @@ -3985,7 +3979,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) /** * onenand_release - [OneNAND Interface] Free resources held by the OneNAND device - * @param mtd MTD device structure + * @mtd: MTD device structure */ void onenand_release(struct mtd_info *mtd) { diff --git a/drivers/mtd/nand/onenand/onenand_bbt.c b/drivers/mtd/nand/onenand/onenand_bbt.c index 57c31c81be18..def89f108007 100644 --- a/drivers/mtd/nand/onenand/onenand_bbt.c +++ b/drivers/mtd/nand/onenand/onenand_bbt.c @@ -18,10 +18,10 @@ /** * check_short_pattern - [GENERIC] check if a pattern is in the buffer - * @param buf the buffer to search - * @param len the length of buffer to search - * @param paglen the pagelength - * @param td search pattern descriptor + * @buf: the buffer to search + * @len: the length of buffer to search + * @paglen: the pagelength + * @td: search pattern descriptor * * Check for a pattern at the given place. Used to search bad block * tables and good / bad block identifiers. Same as check_pattern, but @@ -44,10 +44,10 @@ static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bb /** * create_bbt - [GENERIC] Create a bad block table by scanning the device - * @param mtd MTD device structure - * @param buf temporary buffer - * @param bd descriptor for the good/bad block search pattern - * @param chip create the table for a specific chip, -1 read all chips. + * @mtd: MTD device structure + * @buf: temporary buffer + * @bd: descriptor for the good/bad block search pattern + * @chip: create the table for a specific chip, -1 read all chips. * Applies only if NAND_BBT_PERCHIP option is set * * Create a bad block table by scanning the device @@ -122,8 +122,8 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr /** * onenand_memory_bbt - [GENERIC] create a memory based bad block table - * @param mtd MTD device structure - * @param bd descriptor for the good/bad block search pattern + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern * * The function creates a memory based bbt by scanning the device * for manufacturer / software marked good / bad blocks @@ -137,9 +137,9 @@ static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_desc /** * onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad - * @param mtd MTD device structure - * @param offs offset in the device - * @param allowbbt allow access to bad block table region + * @mtd: MTD device structure + * @offs: offset in the device + * @allowbbt: allow access to bad block table region */ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) { @@ -166,8 +166,8 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) /** * onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s) - * @param mtd MTD device structure - * @param bd descriptor for the good/bad block search pattern + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern * * The function checks, if a bad block table(s) is/are already * available. If not it scans the device for manufacturer @@ -221,7 +221,7 @@ static struct nand_bbt_descr largepage_memorybased = { /** * onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device - * @param mtd MTD device structure + * @mtd: MTD device structure * * This function selects the default bad block table * support for the device and calls the onenand_scan_bbt function diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c index d8c0bd002c2b..12825eb97938 100644 --- a/drivers/mtd/nand/onenand/onenand_omap2.c +++ b/drivers/mtd/nand/onenand/onenand_omap2.c @@ -371,12 +371,12 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area, bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; /* - * If the buffer address is not DMA-able, len is not long enough to make - * DMA transfers profitable or panic_write() may be in an interrupt - * context fallback to PIO mode. + * If the buffer address is not DMA-able, len is not long enough to + * make DMA transfers profitable or if invoked from panic_write() + * fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress) + count < 384 || mtd->oops_panic_write) goto out_copy; xtra = count & 3; @@ -418,12 +418,12 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area, bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset; /* - * If the buffer address is not DMA-able, len is not long enough to make - * DMA transfers profitable or panic_write() may be in an interrupt - * context fallback to PIO mode. + * If the buffer address is not DMA-able, len is not long enough to + * make DMA transfers profitable or if invoked from panic_write() + * fallback to PIO mode. */ if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 || - count < 384 || in_interrupt() || oops_in_progress) + count < 384 || mtd->oops_panic_write) goto out_copy; dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE); diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 6c46f25b57e2..442a039b92f3 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -1,20 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only -config MTD_NAND_ECC_SW_HAMMING - tristate - -config MTD_NAND_ECC_SW_HAMMING_SMC - bool "NAND ECC Smart Media byte order" - depends on MTD_NAND_ECC_SW_HAMMING - default n - help - Software ECC according to the Smart Media Specification. - The original Linux implementation had byte 0 and 1 swapped. - menuconfig MTD_RAW_NAND tristate "Raw/Parallel NAND Device Support" select MTD_NAND_CORE select MTD_NAND_ECC - select MTD_NAND_ECC_SW_HAMMING help This enables support for accessing all type of raw/parallel NAND flash devices. For further information see @@ -22,16 +10,6 @@ menuconfig MTD_RAW_NAND if MTD_RAW_NAND -config MTD_NAND_ECC_SW_BCH - bool "Support software BCH ECC" - select BCH - default n - help - This enables support for software BCH error correction. Binary BCH - codes are more powerful and cpu intensive than traditional Hamming - ECC codes. They are used with NAND devices requiring more than 1 bit - of error correction. - comment "Raw/parallel NAND flash controllers" config MTD_NAND_DENALI @@ -93,6 +71,7 @@ config MTD_NAND_AU1550 config MTD_NAND_NDFC tristate "IBM/MCC 4xx NAND controller" depends on 4xx + select MTD_NAND_ECC_SW_HAMMING select MTD_NAND_ECC_SW_HAMMING_SMC help NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs @@ -313,7 +292,7 @@ config MTD_NAND_VF610_NFC config MTD_NAND_MXC tristate "Freescale MXC NAND controller" depends on ARCH_MXC || COMPILE_TEST - depends on HAS_IOMEM + depends on HAS_IOMEM && OF help This enables the driver for the NAND flash controller on the MXC processors. @@ -462,6 +441,26 @@ config MTD_NAND_ARASAN Enables the driver for the Arasan NAND flash controller on Zynq Ultrascale+ MPSoC. +config MTD_NAND_INTEL_LGM + tristate "Support for NAND controller on Intel LGM SoC" + depends on OF || COMPILE_TEST + depends on HAS_IOMEM + help + Enables support for NAND Flash chips on Intel's LGM SoC. + NAND flash controller interfaced through the External Bus Unit. + +config MTD_NAND_ROCKCHIP + tristate "Rockchip NAND controller" + depends on ARCH_ROCKCHIP && HAS_IOMEM + help + Enables support for NAND controller on Rockchip SoCs. + There are four different versions of NAND FLASH Controllers, + including: + NFC v600: RK2928, RK3066, RK3188 + NFC v622: RK3036, RK3128 + NFC v800: RK3308, RV1108 + NFC v900: PX30, RK3326 + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 2930f5b9015d..32475a28d8f8 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MTD_RAW_NAND) += nand.o -obj-$(CONFIG_MTD_NAND_ECC_SW_HAMMING) += nand_ecc.o -nand-$(CONFIG_MTD_NAND_ECC_SW_BCH) += nand_bch.o obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o obj-$(CONFIG_MTD_NAND_CAFE) += cafe_nand.o @@ -58,6 +56,8 @@ obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o +obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o +obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index 0c352b39ad4b..ff1697f899ba 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -218,7 +218,9 @@ static int gpio_nand_setup_interface(struct nand_chip *this, int csline, static int gpio_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index fbb4ea751be8..549aac00228e 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -118,6 +118,7 @@ * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin * @len: Data transfer length * @read: Data transfer direction from the controller point of view + * @buf: Data buffer */ struct anfc_op { u32 pkt_reg; diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c index 7892022bd6dd..99116896cfd6 100644 --- a/drivers/mtd/nand/raw/au1550nd.c +++ b/drivers/mtd/nand/raw/au1550nd.c @@ -3,6 +3,7 @@ * Copyright (C) 2004 Embedded Edge, LLC */ +#include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> @@ -239,7 +240,9 @@ static int au1550nd_exec_op(struct nand_chip *this, static int au1550nd_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 2da39ab89286..659eaa6f0980 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1846,7 +1846,7 @@ static void brcmnand_write_buf(struct nand_chip *chip, const uint8_t *buf, } } -/** +/* * Kick EDU engine */ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf, @@ -1937,7 +1937,7 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf, return ret; } -/** +/* * Construct a FLASH_DMA descriptor as part of a linked list. You must know the * following ahead of time: * - Is this descriptor the beginning or end of a linked list? @@ -1970,7 +1970,7 @@ static int brcmnand_fill_dma_desc(struct brcmnand_host *host, return 0; } -/** +/* * Kick the FLASH_DMA engine, with a given DMA descriptor */ static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc) diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c index 2b94f385a1a8..d0e8ffd55c22 100644 --- a/drivers/mtd/nand/raw/cafe_nand.c +++ b/drivers/mtd/nand/raw/cafe_nand.c @@ -359,10 +359,10 @@ static int cafe_nand_read_oob(struct nand_chip *chip, int page) } /** * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read - * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data * @oob_required: caller expects OOB data read to chip->oob_poi + * @page: page number to read * * The hw generator calculates the error syndrome automatically. Therefore * we need a special oob layout and handling. diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c index 282203debd0c..6edf78c16fc8 100644 --- a/drivers/mtd/nand/raw/cs553x_nand.c +++ b/drivers/mtd/nand/raw/cs553x_nand.c @@ -19,7 +19,6 @@ #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/iopoll.h> @@ -252,7 +251,7 @@ static int cs553x_attach_chip(struct nand_chip *chip) chip->ecc.bytes = 3; chip->ecc.hwctl = cs_enable_hwecc; chip->ecc.calculate = cs_calculate_ecc; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; chip->ecc.strength = 1; return 0; diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c index f8c36d19ab47..118da9944e3b 100644 --- a/drivers/mtd/nand/raw/davinci_nand.c +++ b/drivers/mtd/nand/raw/davinci_nand.c @@ -586,10 +586,10 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) return PTR_ERR(pdata); /* Use board-specific ECC config */ - info->chip.ecc.engine_type = pdata->engine_type; - info->chip.ecc.placement = pdata->ecc_placement; + chip->ecc.engine_type = pdata->engine_type; + chip->ecc.placement = pdata->ecc_placement; - switch (info->chip.ecc.engine_type) { + switch (chip->ecc.engine_type) { case NAND_ECC_ENGINE_TYPE_NONE: pdata->ecc_bits = 0; break; @@ -601,7 +601,7 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) * NAND_ECC_ALGO_HAMMING to avoid adding an extra ->ecc_algo * field to davinci_nand_pdata. */ - info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING; + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; break; case NAND_ECC_ENGINE_TYPE_ON_HOST: if (pdata->ecc_bits == 4) { @@ -628,12 +628,12 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) if (ret == -EBUSY) return ret; - info->chip.ecc.calculate = nand_davinci_calculate_4bit; - info->chip.ecc.correct = nand_davinci_correct_4bit; - info->chip.ecc.hwctl = nand_davinci_hwctl_4bit; - info->chip.ecc.bytes = 10; - info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; - info->chip.ecc.algo = NAND_ECC_ALGO_BCH; + chip->ecc.calculate = nand_davinci_calculate_4bit; + chip->ecc.correct = nand_davinci_correct_4bit; + chip->ecc.hwctl = nand_davinci_hwctl_4bit; + chip->ecc.bytes = 10; + chip->ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; + chip->ecc.algo = NAND_ECC_ALGO_BCH; /* * Update ECC layout if needed ... for 1-bit HW ECC, the @@ -651,20 +651,20 @@ static int davinci_nand_attach_chip(struct nand_chip *chip) } else if (chunks == 4 || chunks == 8) { mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); - info->chip.ecc.read_page = nand_davinci_read_page_hwecc_oob_first; + chip->ecc.read_page = nand_davinci_read_page_hwecc_oob_first; } else { return -EIO; } } else { /* 1bit ecc hamming */ - info->chip.ecc.calculate = nand_davinci_calculate_1bit; - info->chip.ecc.correct = nand_davinci_correct_1bit; - info->chip.ecc.hwctl = nand_davinci_hwctl_1bit; - info->chip.ecc.bytes = 3; - info->chip.ecc.algo = NAND_ECC_ALGO_HAMMING; + chip->ecc.calculate = nand_davinci_calculate_1bit; + chip->ecc.correct = nand_davinci_correct_1bit; + chip->ecc.hwctl = nand_davinci_hwctl_1bit; + chip->ecc.bytes = 3; + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; } - info->chip.ecc.size = 512; - info->chip.ecc.strength = pdata->ecc_bits; + chip->ecc.size = 512; + chip->ecc.strength = pdata->ecc_bits; break; default: return -EINVAL; @@ -899,7 +899,7 @@ static int nand_davinci_remove(struct platform_device *pdev) int ret; spin_lock_irq(&davinci_nand_lock); - if (info->chip.ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED) + if (chip->ecc.placement == NAND_ECC_PLACEMENT_INTERLEAVED) ecc4_busy = false; spin_unlock_irq(&davinci_nand_lock); diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 26b265e4384a..5d2ddb037a9a 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -216,7 +216,7 @@ static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc) static void DoC_Delay(struct doc_priv *doc, unsigned short cycles) { - volatile char dummy; + volatile char __always_unused dummy; int i; for (i = 0; i < cycles; i++) { @@ -703,7 +703,7 @@ static int doc200x_calculate_ecc(struct nand_chip *this, const u_char *dat, struct doc_priv *doc = nand_get_controller_data(this); void __iomem *docptr = doc->virtadr; int i; - int emptymatch = 1; + int __always_unused emptymatch = 1; /* flush the pipeline */ if (DoC_is_2000(doc)) { diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c index b2af7f81fdf8..aab93b9e6052 100644 --- a/drivers/mtd/nand/raw/fsl_elbc_nand.c +++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c @@ -22,7 +22,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/io.h> diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index e345f9d9f8e8..02d500176838 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -15,7 +15,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> #include <linux/mtd/partitions.h> -#include <linux/mtd/nand_ecc.h> #include <linux/fsl_ifc.h> #include <linux/iopoll.h> diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index d5813b9abc8e..b3cc427100a2 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -11,7 +11,6 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/mtd.h> #include <linux/of_platform.h> diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index c88421a1c078..0101c0fab50a 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -26,7 +26,6 @@ #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/mtd/partitions.h> @@ -918,7 +917,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) case NAND_ECC_ENGINE_TYPE_ON_HOST: dev_info(host->dev, "Using 1-bit HW ECC scheme\n"); nand->ecc.calculate = fsmc_read_hwecc_ecc1; - nand->ecc.correct = nand_correct_data; + nand->ecc.correct = rawnand_sw_hamming_correct; nand->ecc.hwctl = fsmc_enable_hwecc; nand->ecc.bytes = 3; nand->ecc.strength = 1; @@ -942,7 +941,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) /* * Don't set layout for BCH4 SW ECC. This will be - * generated later in nand_bch_init() later. + * generated later during BCH initialization. */ if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) { switch (mtd->oobsize) { diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c index eb03b8cea1cb..fb7a086de35e 100644 --- a/drivers/mtd/nand/raw/gpio.c +++ b/drivers/mtd/nand/raw/gpio.c @@ -164,7 +164,9 @@ static int gpio_nand_exec_op(struct nand_chip *chip, static int gpio_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/gpmi-nand/Makefile b/drivers/mtd/nand/raw/gpmi-nand/Makefile index 9bd81a31e02e..247cbfceaa19 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/Makefile +++ b/drivers/mtd/nand/raw/gpmi-nand/Makefile @@ -1,3 +1,2 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o -gpmi_nand-objs += gpmi-nand.o +obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand.o diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index dc8104e67506..5cdf05bcbf8f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -149,8 +149,10 @@ static int gpmi_init(struct gpmi_nand_data *this) int ret; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(this->dev); return ret; + } ret = gpmi_reset_block(r->gpmi_regs, false); if (ret) @@ -179,9 +181,11 @@ static int gpmi_init(struct gpmi_nand_data *this) /* * Decouple the chip select from dma channel. We use dma0 for all - * the chips. + * the chips, force all NAND RDY_BUSY inputs to be sourced from + * RDY_BUSY0. */ - writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET); + writel(BM_GPMI_CTRL1_DECOUPLE_CS | BM_GPMI_CTRL1_GANGED_RDYBUSY, + r->gpmi_regs + HW_GPMI_CTRL1_SET); err_out: pm_runtime_mark_last_busy(this->dev); @@ -2252,7 +2256,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, void *buf_read = NULL; const void *buf_write = NULL; bool direct = false; - struct completion *completion; + struct completion *dma_completion, *bch_completion; unsigned long to; if (check_only) @@ -2263,8 +2267,10 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->transfers[i].direction = DMA_NONE; ret = pm_runtime_get_sync(this->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(this->dev); return ret; + } /* * This driver currently supports only one NAND chip. Plus, dies share @@ -2347,22 +2353,24 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1); } + desc->callback = dma_irq_callback; + desc->callback_param = this; + dma_completion = &this->dma_done; + bch_completion = NULL; + + init_completion(dma_completion); + if (this->bch && buf_read) { writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, this->resources.bch_regs + HW_BCH_CTRL_SET); - completion = &this->bch_done; - } else { - desc->callback = dma_irq_callback; - desc->callback_param = this; - completion = &this->dma_done; + bch_completion = &this->bch_done; + init_completion(bch_completion); } - init_completion(completion); - dmaengine_submit(desc); dma_async_issue_pending(get_dma_chan(this)); - to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000)); + to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000)); if (!to) { dev_err(this->dev, "DMA timeout, last DMA\n"); gpmi_dump_info(this); @@ -2370,6 +2378,16 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip, goto unmap; } + if (this->bch && buf_read) { + to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000)); + if (!to) { + dev_err(this->dev, "BCH timeout, last DMA\n"); + gpmi_dump_info(this); + ret = -ETIMEDOUT; + goto unmap; + } + } + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN, this->resources.bch_regs + HW_BCH_CTRL_CLR); gpmi_clear_bch(this); @@ -2461,43 +2479,25 @@ err_out: } static const struct of_device_id gpmi_nand_id_table[] = { - { - .compatible = "fsl,imx23-gpmi-nand", - .data = &gpmi_devdata_imx23, - }, { - .compatible = "fsl,imx28-gpmi-nand", - .data = &gpmi_devdata_imx28, - }, { - .compatible = "fsl,imx6q-gpmi-nand", - .data = &gpmi_devdata_imx6q, - }, { - .compatible = "fsl,imx6sx-gpmi-nand", - .data = &gpmi_devdata_imx6sx, - }, { - .compatible = "fsl,imx7d-gpmi-nand", - .data = &gpmi_devdata_imx7d, - }, {} + { .compatible = "fsl,imx23-gpmi-nand", .data = &gpmi_devdata_imx23, }, + { .compatible = "fsl,imx28-gpmi-nand", .data = &gpmi_devdata_imx28, }, + { .compatible = "fsl,imx6q-gpmi-nand", .data = &gpmi_devdata_imx6q, }, + { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, }, + { .compatible = "fsl,imx7d-gpmi-nand", .data = &gpmi_devdata_imx7d,}, + {} }; MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); static int gpmi_nand_probe(struct platform_device *pdev) { struct gpmi_nand_data *this; - const struct of_device_id *of_id; int ret; this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL); if (!this) return -ENOMEM; - of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); - if (of_id) { - this->devdata = of_id->data; - } else { - dev_err(&pdev->dev, "Failed to find the right device id.\n"); - return -ENODEV; - } - + this->devdata = of_device_get_match_data(&pdev->dev); platform_set_drvdata(pdev, this); this->pdev = pdev; this->dev = &pdev->dev; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h index f5e4f26c34da..fc31fd084dcf 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-regs.h @@ -107,6 +107,7 @@ #define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS 0x2 #define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY 0x3 +#define BM_GPMI_CTRL1_GANGED_RDYBUSY (1 << 19) #define BM_GPMI_CTRL1_BCH_MODE (1 << 18) #define BP_GPMI_CTRL1_DLL_ENABLE 17 diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c index 8e22cd6ec71f..efe0ffe4f1ab 100644 --- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c +++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c @@ -71,8 +71,6 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np) if (!pdev || !platform_get_drvdata(pdev)) return ERR_PTR(-EPROBE_DEFER); - get_device(&pdev->dev); - ecc = platform_get_drvdata(pdev); clk_prepare_enable(ecc->clk); diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c new file mode 100644 index 000000000000..fdb112e8a90d --- /dev/null +++ b/drivers/mtd/nand/raw/intel-nand-controller.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2020 Intel Corporation. */ + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/dmaengine.h> +#include <linux/dma-direction.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/mtd/nand.h> + +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <asm/unaligned.h> + +#define EBU_CLC 0x000 +#define EBU_CLC_RST 0x00000000u + +#define EBU_ADDR_SEL(n) (0x020 + (n) * 4) +/* 5 bits 26:22 included for comparison in the ADDR_SELx */ +#define EBU_ADDR_MASK(x) ((x) << 4) +#define EBU_ADDR_SEL_REGEN 0x1 + +#define EBU_BUSCON(n) (0x060 + (n) * 4) +#define EBU_BUSCON_CMULT_V4 0x1 +#define EBU_BUSCON_RECOVC(n) ((n) << 2) +#define EBU_BUSCON_HOLDC(n) ((n) << 4) +#define EBU_BUSCON_WAITRDC(n) ((n) << 6) +#define EBU_BUSCON_WAITWRC(n) ((n) << 8) +#define EBU_BUSCON_BCGEN_CS 0x0 +#define EBU_BUSCON_SETUP_EN BIT(22) +#define EBU_BUSCON_ALEC 0xC000 + +#define EBU_CON 0x0B0 +#define EBU_CON_NANDM_EN BIT(0) +#define EBU_CON_NANDM_DIS 0x0 +#define EBU_CON_CSMUX_E_EN BIT(1) +#define EBU_CON_ALE_P_LOW BIT(2) +#define EBU_CON_CLE_P_LOW BIT(3) +#define EBU_CON_CS_P_LOW BIT(4) +#define EBU_CON_SE_P_LOW BIT(5) +#define EBU_CON_WP_P_LOW BIT(6) +#define EBU_CON_PRE_P_LOW BIT(7) +#define EBU_CON_IN_CS_S(n) ((n) << 8) +#define EBU_CON_OUT_CS_S(n) ((n) << 10) +#define EBU_CON_LAT_EN_CS_P ((0x3D) << 18) + +#define EBU_WAIT 0x0B4 +#define EBU_WAIT_RDBY BIT(0) +#define EBU_WAIT_WR_C BIT(3) + +#define HSNAND_CTL1 0x110 +#define HSNAND_CTL1_ADDR_SHIFT 24 + +#define HSNAND_CTL2 0x114 +#define HSNAND_CTL2_ADDR_SHIFT 8 +#define HSNAND_CTL2_CYC_N_V5 (0x2 << 16) + +#define HSNAND_INT_MSK_CTL 0x124 +#define HSNAND_INT_MSK_CTL_WR_C BIT(4) + +#define HSNAND_INT_STA 0x128 +#define HSNAND_INT_STA_WR_C BIT(4) + +#define HSNAND_CTL 0x130 +#define HSNAND_CTL_ENABLE_ECC BIT(0) +#define HSNAND_CTL_GO BIT(2) +#define HSNAND_CTL_CE_SEL_CS(n) BIT(3 + (n)) +#define HSNAND_CTL_RW_READ 0x0 +#define HSNAND_CTL_RW_WRITE BIT(10) +#define HSNAND_CTL_ECC_OFF_V8TH BIT(11) +#define HSNAND_CTL_CKFF_EN 0x0 +#define HSNAND_CTL_MSG_EN BIT(17) + +#define HSNAND_PARA0 0x13c +#define HSNAND_PARA0_PAGE_V8192 0x3 +#define HSNAND_PARA0_PIB_V256 (0x3 << 4) +#define HSNAND_PARA0_BYP_EN_NP 0x0 +#define HSNAND_PARA0_BYP_DEC_NP 0x0 +#define HSNAND_PARA0_TYPE_ONFI BIT(18) +#define HSNAND_PARA0_ADEP_EN BIT(21) + +#define HSNAND_CMSG_0 0x150 +#define HSNAND_CMSG_1 0x154 + +#define HSNAND_ALE_OFFS BIT(2) +#define HSNAND_CLE_OFFS BIT(3) +#define HSNAND_CS_OFFS BIT(4) + +#define HSNAND_ECC_OFFSET 0x008 + +#define NAND_DATA_IFACE_CHECK_ONLY -1 + +#define MAX_CS 2 + +#define HZ_PER_MHZ 1000000L +#define USEC_PER_SEC 1000000L + +struct ebu_nand_cs { + void __iomem *chipaddr; + dma_addr_t nand_pa; + u32 addr_sel; +}; + +struct ebu_nand_controller { + struct nand_controller controller; + struct nand_chip chip; + struct device *dev; + void __iomem *ebu; + void __iomem *hsnand; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; + struct completion dma_access_complete; + unsigned long clk_rate; + struct clk *clk; + u32 nd_para0; + u8 cs_num; + struct ebu_nand_cs cs[MAX_CS]; +}; + +static inline struct ebu_nand_controller *nand_to_ebu(struct nand_chip *chip) +{ + return container_of(chip, struct ebu_nand_controller, chip); +} + +static int ebu_nand_waitrdy(struct nand_chip *chip, int timeout_ms) +{ + struct ebu_nand_controller *ctrl = nand_to_ebu(chip); + u32 status; + + return readl_poll_timeout(ctrl->ebu + EBU_WAIT, status, + (status & EBU_WAIT_RDBY) || + (status & EBU_WAIT_WR_C), 20, timeout_ms); +} + +static u8 ebu_nand_readb(struct nand_chip *chip) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + u8 cs_num = ebu_host->cs_num; + u8 val; + + val = readb(ebu_host->cs[cs_num].chipaddr + HSNAND_CS_OFFS); + ebu_nand_waitrdy(chip, 1000); + return val; +} + +static void ebu_nand_writeb(struct nand_chip *chip, u32 offset, u8 value) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + u8 cs_num = ebu_host->cs_num; + + writeb(value, ebu_host->cs[cs_num].chipaddr + offset); + ebu_nand_waitrdy(chip, 1000); +} + +static void ebu_read_buf(struct nand_chip *chip, u_char *buf, unsigned int len) +{ + int i; + + for (i = 0; i < len; i++) + buf[i] = ebu_nand_readb(chip); +} + +static void ebu_write_buf(struct nand_chip *chip, const u_char *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + ebu_nand_writeb(chip, HSNAND_CS_OFFS, buf[i]); +} + +static void ebu_nand_disable(struct nand_chip *chip) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + + writel(0, ebu_host->ebu + EBU_CON); +} + +static void ebu_select_chip(struct nand_chip *chip) +{ + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + void __iomem *nand_con = ebu_host->ebu + EBU_CON; + u32 cs = ebu_host->cs_num; + + writel(EBU_CON_NANDM_EN | EBU_CON_CSMUX_E_EN | EBU_CON_CS_P_LOW | + EBU_CON_SE_P_LOW | EBU_CON_WP_P_LOW | EBU_CON_PRE_P_LOW | + EBU_CON_IN_CS_S(cs) | EBU_CON_OUT_CS_S(cs) | + EBU_CON_LAT_EN_CS_P, nand_con); +} + +static int ebu_nand_set_timings(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) +{ + struct ebu_nand_controller *ctrl = nand_to_ebu(chip); + unsigned int rate = clk_get_rate(ctrl->clk) / HZ_PER_MHZ; + unsigned int period = DIV_ROUND_UP(USEC_PER_SEC, rate); + const struct nand_sdr_timings *timings; + u32 trecov, thold, twrwait, trdwait; + u32 reg = 0; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return PTR_ERR(timings); + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + trecov = DIV_ROUND_UP(max(timings->tREA_max, timings->tREH_min), + period); + reg |= EBU_BUSCON_RECOVC(trecov); + + thold = DIV_ROUND_UP(max(timings->tDH_min, timings->tDS_min), period); + reg |= EBU_BUSCON_HOLDC(thold); + + trdwait = DIV_ROUND_UP(max(timings->tRC_min, timings->tREH_min), + period); + reg |= EBU_BUSCON_WAITRDC(trdwait); + + twrwait = DIV_ROUND_UP(max(timings->tWC_min, timings->tWH_min), period); + reg |= EBU_BUSCON_WAITWRC(twrwait); + + reg |= EBU_BUSCON_CMULT_V4 | EBU_BUSCON_BCGEN_CS | EBU_BUSCON_ALEC | + EBU_BUSCON_SETUP_EN; + + writel(reg, ctrl->ebu + EBU_BUSCON(ctrl->cs_num)); + + return 0; +} + +static int ebu_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = HSNAND_ECC_OFFSET; + oobregion->length = chip->ecc.total; + + return 0; +} + +static int ebu_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section) + return -ERANGE; + + oobregion->offset = chip->ecc.total + HSNAND_ECC_OFFSET; + oobregion->length = mtd->oobsize - oobregion->offset; + + return 0; +} + +static const struct mtd_ooblayout_ops ebu_nand_ooblayout_ops = { + .ecc = ebu_nand_ooblayout_ecc, + .free = ebu_nand_ooblayout_free, +}; + +static void ebu_dma_rx_callback(void *cookie) +{ + struct ebu_nand_controller *ebu_host = cookie; + + dmaengine_terminate_async(ebu_host->dma_rx); + + complete(&ebu_host->dma_access_complete); +} + +static void ebu_dma_tx_callback(void *cookie) +{ + struct ebu_nand_controller *ebu_host = cookie; + + dmaengine_terminate_async(ebu_host->dma_tx); + + complete(&ebu_host->dma_access_complete); +} + +static int ebu_dma_start(struct ebu_nand_controller *ebu_host, u32 dir, + const u8 *buf, u32 len) +{ + struct dma_async_tx_descriptor *tx; + struct completion *dma_completion; + dma_async_tx_callback callback; + struct dma_chan *chan; + dma_cookie_t cookie; + unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + dma_addr_t buf_dma; + int ret; + u32 timeout; + + if (dir == DMA_DEV_TO_MEM) { + chan = ebu_host->dma_rx; + dma_completion = &ebu_host->dma_access_complete; + callback = ebu_dma_rx_callback; + } else { + chan = ebu_host->dma_tx; + dma_completion = &ebu_host->dma_access_complete; + callback = ebu_dma_tx_callback; + } + + buf_dma = dma_map_single(chan->device->dev, (void *)buf, len, dir); + if (dma_mapping_error(chan->device->dev, buf_dma)) { + dev_err(ebu_host->dev, "Failed to map DMA buffer\n"); + ret = -EIO; + goto err_unmap; + } + + tx = dmaengine_prep_slave_single(chan, buf_dma, len, dir, flags); + if (!tx) + return -ENXIO; + + tx->callback = callback; + tx->callback_param = ebu_host; + cookie = tx->tx_submit(tx); + + ret = dma_submit_error(cookie); + if (ret) { + dev_err(ebu_host->dev, "dma_submit_error %d\n", cookie); + ret = -EIO; + goto err_unmap; + } + + init_completion(dma_completion); + dma_async_issue_pending(chan); + + /* Wait DMA to finish the data transfer.*/ + timeout = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000)); + if (!timeout) { + dev_err(ebu_host->dev, "I/O Error in DMA RX (status %d)\n", + dmaengine_tx_status(chan, cookie, NULL)); + dmaengine_terminate_sync(chan); + ret = -ETIMEDOUT; + goto err_unmap; + } + + return 0; + +err_unmap: + dma_unmap_single(ebu_host->dev, buf_dma, len, dir); + + return ret; +} + +static void ebu_nand_trigger(struct ebu_nand_controller *ebu_host, + int page, u32 cmd) +{ + unsigned int val; + + val = cmd | (page & 0xFF) << HSNAND_CTL1_ADDR_SHIFT; + writel(val, ebu_host->hsnand + HSNAND_CTL1); + val = (page & 0xFFFF00) >> 8 | HSNAND_CTL2_CYC_N_V5; + writel(val, ebu_host->hsnand + HSNAND_CTL2); + + writel(ebu_host->nd_para0, ebu_host->hsnand + HSNAND_PARA0); + + /* clear first, will update later */ + writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_0); + writel(0xFFFFFFFF, ebu_host->hsnand + HSNAND_CMSG_1); + + writel(HSNAND_INT_MSK_CTL_WR_C, + ebu_host->hsnand + HSNAND_INT_MSK_CTL); + + if (!cmd) + val = HSNAND_CTL_RW_READ; + else + val = HSNAND_CTL_RW_WRITE; + + writel(HSNAND_CTL_MSG_EN | HSNAND_CTL_CKFF_EN | + HSNAND_CTL_ECC_OFF_V8TH | HSNAND_CTL_CE_SEL_CS(ebu_host->cs_num) | + HSNAND_CTL_ENABLE_ECC | HSNAND_CTL_GO | val, + ebu_host->hsnand + HSNAND_CTL); +} + +static int ebu_nand_read_page_hwecc(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + int ret, reg_data; + + ebu_nand_trigger(ebu_host, page, NAND_CMD_READ0); + + ret = ebu_dma_start(ebu_host, DMA_DEV_TO_MEM, buf, mtd->writesize); + if (ret) + return ret; + + if (oob_required) + chip->ecc.read_oob(chip, page); + + reg_data = readl(ebu_host->hsnand + HSNAND_CTL); + reg_data &= ~HSNAND_CTL_GO; + writel(reg_data, ebu_host->hsnand + HSNAND_CTL); + + return 0; +} + +static int ebu_nand_write_page_hwecc(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + void __iomem *int_sta = ebu_host->hsnand + HSNAND_INT_STA; + int reg_data, ret, val; + u32 reg; + + ebu_nand_trigger(ebu_host, page, NAND_CMD_SEQIN); + + ret = ebu_dma_start(ebu_host, DMA_MEM_TO_DEV, buf, mtd->writesize); + if (ret) + return ret; + + if (oob_required) { + reg = get_unaligned_le32(chip->oob_poi); + writel(reg, ebu_host->hsnand + HSNAND_CMSG_0); + + reg = get_unaligned_le32(chip->oob_poi + 4); + writel(reg, ebu_host->hsnand + HSNAND_CMSG_1); + } + + ret = readl_poll_timeout_atomic(int_sta, val, !(val & HSNAND_INT_STA_WR_C), + 10, 1000); + if (ret) + return ret; + + reg_data = readl(ebu_host->hsnand + HSNAND_CTL); + reg_data &= ~HSNAND_CTL_GO; + writel(reg_data, ebu_host->hsnand + HSNAND_CTL); + + return 0; +} + +static const u8 ecc_strength[] = { 1, 1, 4, 8, 24, 32, 40, 60, }; + +static int ebu_nand_attach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct ebu_nand_controller *ebu_host = nand_get_controller_data(chip); + u32 ecc_steps, ecc_bytes, ecc_total, pagesize, pg_per_blk; + u32 ecc_strength_ds = chip->ecc.strength; + u32 ecc_size = chip->ecc.size; + u32 writesize = mtd->writesize; + u32 blocksize = mtd->erasesize; + int bch_algo, start, val; + + /* Default to an ECC size of 512 */ + if (!chip->ecc.size) + chip->ecc.size = 512; + + switch (ecc_size) { + case 512: + start = 1; + if (!ecc_strength_ds) + ecc_strength_ds = 4; + break; + case 1024: + start = 4; + if (!ecc_strength_ds) + ecc_strength_ds = 32; + break; + default: + return -EINVAL; + } + + /* BCH ECC algorithm Settings for number of bits per 512B/1024B */ + bch_algo = round_up(start + 1, 4); + for (val = start; val < bch_algo; val++) { + if (ecc_strength_ds == ecc_strength[val]) + break; + } + if (val == bch_algo) + return -EINVAL; + + if (ecc_strength_ds == 8) + ecc_bytes = 14; + else + ecc_bytes = DIV_ROUND_UP(ecc_strength_ds * fls(8 * ecc_size), 8); + + ecc_steps = writesize / ecc_size; + ecc_total = ecc_steps * ecc_bytes; + if ((ecc_total + 8) > mtd->oobsize) + return -ERANGE; + + chip->ecc.total = ecc_total; + pagesize = fls(writesize >> 11); + if (pagesize > HSNAND_PARA0_PAGE_V8192) + return -ERANGE; + + pg_per_blk = fls((blocksize / writesize) >> 6) / 8; + if (pg_per_blk > HSNAND_PARA0_PIB_V256) + return -ERANGE; + + ebu_host->nd_para0 = pagesize | pg_per_blk | HSNAND_PARA0_BYP_EN_NP | + HSNAND_PARA0_BYP_DEC_NP | HSNAND_PARA0_ADEP_EN | + HSNAND_PARA0_TYPE_ONFI | (val << 29); + + mtd_set_ooblayout(mtd, &ebu_nand_ooblayout_ops); + chip->ecc.read_page = ebu_nand_read_page_hwecc; + chip->ecc.write_page = ebu_nand_write_page_hwecc; + + return 0; +} + +static int ebu_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, bool check_only) +{ + const struct nand_op_instr *instr = NULL; + unsigned int op_id; + int i, timeout_ms, ret = 0; + + if (check_only) + return 0; + + ebu_select_chip(chip); + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + ebu_nand_writeb(chip, HSNAND_CLE_OFFS | HSNAND_CS_OFFS, + instr->ctx.cmd.opcode); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + ebu_nand_writeb(chip, + HSNAND_ALE_OFFS | HSNAND_CS_OFFS, + instr->ctx.addr.addrs[i]); + break; + + case NAND_OP_DATA_IN_INSTR: + ebu_read_buf(chip, instr->ctx.data.buf.in, + instr->ctx.data.len); + break; + + case NAND_OP_DATA_OUT_INSTR: + ebu_write_buf(chip, instr->ctx.data.buf.out, + instr->ctx.data.len); + break; + + case NAND_OP_WAITRDY_INSTR: + timeout_ms = instr->ctx.waitrdy.timeout_ms * 1000; + ret = ebu_nand_waitrdy(chip, timeout_ms); + break; + } + } + + return ret; +} + +static const struct nand_controller_ops ebu_nand_controller_ops = { + .attach_chip = ebu_nand_attach_chip, + .setup_interface = ebu_nand_set_timings, + .exec_op = ebu_nand_exec_op, +}; + +static void ebu_dma_cleanup(struct ebu_nand_controller *ebu_host) +{ + if (ebu_host->dma_rx) + dma_release_channel(ebu_host->dma_rx); + + if (ebu_host->dma_tx) + dma_release_channel(ebu_host->dma_tx); +} + +static int ebu_nand_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ebu_nand_controller *ebu_host; + struct nand_chip *nand; + struct mtd_info *mtd = NULL; + struct resource *res; + char *resname; + int ret; + u32 cs; + + ebu_host = devm_kzalloc(dev, sizeof(*ebu_host), GFP_KERNEL); + if (!ebu_host) + return -ENOMEM; + + ebu_host->dev = dev; + nand_controller_init(&ebu_host->controller); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ebunand"); + ebu_host->ebu = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ebu_host->ebu)) + return PTR_ERR(ebu_host->ebu); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsnand"); + ebu_host->hsnand = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ebu_host->hsnand)) + return PTR_ERR(ebu_host->hsnand); + + ret = device_property_read_u32(dev, "reg", &cs); + if (ret) { + dev_err(dev, "failed to get chip select: %d\n", ret); + return ret; + } + ebu_host->cs_num = cs; + + resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname); + ebu_host->cs[cs].chipaddr = devm_ioremap_resource(dev, res); + ebu_host->cs[cs].nand_pa = res->start; + if (IS_ERR(ebu_host->cs[cs].chipaddr)) + return PTR_ERR(ebu_host->cs[cs].chipaddr); + + ebu_host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ebu_host->clk)) + return dev_err_probe(dev, PTR_ERR(ebu_host->clk), + "failed to get clock\n"); + + ret = clk_prepare_enable(ebu_host->clk); + if (ret) { + dev_err(dev, "failed to enable clock: %d\n", ret); + return ret; + } + ebu_host->clk_rate = clk_get_rate(ebu_host->clk); + + ebu_host->dma_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(ebu_host->dma_tx)) + return dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx), + "failed to request DMA tx chan!.\n"); + + ebu_host->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(ebu_host->dma_rx)) + return dev_err_probe(dev, PTR_ERR(ebu_host->dma_rx), + "failed to request DMA rx chan!.\n"); + + resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname); + if (!res) + return -EINVAL; + ebu_host->cs[cs].addr_sel = res->start; + writel(ebu_host->cs[cs].addr_sel | EBU_ADDR_MASK(5) | EBU_ADDR_SEL_REGEN, + ebu_host->ebu + EBU_ADDR_SEL(cs)); + + nand_set_flash_node(&ebu_host->chip, dev->of_node); + if (!mtd->name) { + dev_err(ebu_host->dev, "NAND label property is mandatory\n"); + return -EINVAL; + } + + mtd = nand_to_mtd(&ebu_host->chip); + mtd->dev.parent = dev; + ebu_host->dev = dev; + + platform_set_drvdata(pdev, ebu_host); + nand_set_controller_data(&ebu_host->chip, ebu_host); + + nand = &ebu_host->chip; + nand->controller = &ebu_host->controller; + nand->controller->ops = &ebu_nand_controller_ops; + + /* Scan to find existence of the device */ + ret = nand_scan(&ebu_host->chip, 1); + if (ret) + goto err_cleanup_dma; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + goto err_clean_nand; + + return 0; + +err_clean_nand: + nand_cleanup(&ebu_host->chip); +err_cleanup_dma: + ebu_dma_cleanup(ebu_host); + clk_disable_unprepare(ebu_host->clk); + + return ret; +} + +static int ebu_nand_remove(struct platform_device *pdev) +{ + struct ebu_nand_controller *ebu_host = platform_get_drvdata(pdev); + int ret; + + ret = mtd_device_unregister(nand_to_mtd(&ebu_host->chip)); + WARN_ON(ret); + nand_cleanup(&ebu_host->chip); + ebu_nand_disable(&ebu_host->chip); + ebu_dma_cleanup(ebu_host); + clk_disable_unprepare(ebu_host->clk); + + return 0; +} + +static const struct of_device_id ebu_nand_match[] = { + { .compatible = "intel,nand-controller" }, + { .compatible = "intel,lgm-ebunand" }, + {} +}; +MODULE_DEVICE_TABLE(of, ebu_nand_match); + +static struct platform_driver ebu_nand_driver = { + .probe = ebu_nand_probe, + .remove = ebu_nand_remove, + .driver = { + .name = "intel-nand-controller", + .of_match_table = ebu_nand_match, + }, + +}; +module_platform_driver(ebu_nand_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>"); +MODULE_DESCRIPTION("Intel's LGM External Bus NAND Controller driver"); diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c index 9e728c731795..452ecaf7775a 100644 --- a/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -31,7 +31,6 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/mtd/nand_ecc.h> #define DRV_NAME "lpc32xx_mlc" diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c index dc7785e30d2f..6b7269cfb7d8 100644 --- a/drivers/mtd/nand/raw/lpc32xx_slc.c +++ b/drivers/mtd/nand/raw/lpc32xx_slc.c @@ -23,7 +23,6 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> -#include <linux/mtd/nand_ecc.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_gpio.h> @@ -803,7 +802,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip) chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome; chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome; chip->ecc.calculate = lpc32xx_nand_ecc_calculate; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; chip->ecc.hwctl = lpc32xx_nand_ecc_enable; /* diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index f5ca2002d08e..42d4881d598d 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2679,12 +2679,6 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, mtd->dev.parent = dev; /* - * Default to HW ECC engine mode. If the nand-ecc-mode property is given - * in the DT node, this entry will be overwritten in nand_scan_ident(). - */ - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; - - /* * Save a reference value for timing registers before * ->setup_interface() is called. */ diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 48e6dac96be6..817bddccb775 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -510,7 +510,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf, } static void meson_nfc_dma_buffer_release(struct nand_chip *nand, - int infolen, int datalen, + int datalen, int infolen, enum dma_data_direction dir) { struct meson_nfc *nfc = nand_get_controller_data(nand); @@ -1044,9 +1044,12 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc) ret = clk_set_rate(nfc->device_clk, 24000000); if (ret) - goto err_phase_rx; + goto err_disable_rx; return 0; + +err_disable_rx: + clk_disable_unprepare(nfc->phase_rx); err_phase_rx: clk_disable_unprepare(nfc->phase_tx); err_phase_tx: diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c index fb4c0b11689f..bcd4a556c959 100644 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c @@ -606,7 +606,9 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd) static int mpc5121_nfc_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 684c51e5e60d..fd705dd1768d 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -21,7 +21,6 @@ #include <linux/completion.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/platform_data/mtd-mxc_nand.h> #define DRIVER_NAME "mxc_nand" @@ -184,7 +183,6 @@ struct mxc_nand_host { unsigned int buf_start; const struct mxc_nand_devtype_data *devtype_data; - struct mxc_nand_platform_data pdata; }; static const char * const part_probes[] = { @@ -1611,70 +1609,16 @@ static inline int is_imx53_nfc(struct mxc_nand_host *host) return host->devtype_data == &imx53_nand_devtype_data; } -static const struct platform_device_id mxcnd_devtype[] = { - { - .name = "imx21-nand", - .driver_data = (kernel_ulong_t) &imx21_nand_devtype_data, - }, { - .name = "imx27-nand", - .driver_data = (kernel_ulong_t) &imx27_nand_devtype_data, - }, { - .name = "imx25-nand", - .driver_data = (kernel_ulong_t) &imx25_nand_devtype_data, - }, { - .name = "imx51-nand", - .driver_data = (kernel_ulong_t) &imx51_nand_devtype_data, - }, { - .name = "imx53-nand", - .driver_data = (kernel_ulong_t) &imx53_nand_devtype_data, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, mxcnd_devtype); - -#ifdef CONFIG_OF static const struct of_device_id mxcnd_dt_ids[] = { - { - .compatible = "fsl,imx21-nand", - .data = &imx21_nand_devtype_data, - }, { - .compatible = "fsl,imx27-nand", - .data = &imx27_nand_devtype_data, - }, { - .compatible = "fsl,imx25-nand", - .data = &imx25_nand_devtype_data, - }, { - .compatible = "fsl,imx51-nand", - .data = &imx51_nand_devtype_data, - }, { - .compatible = "fsl,imx53-nand", - .data = &imx53_nand_devtype_data, - }, + { .compatible = "fsl,imx21-nand", .data = &imx21_nand_devtype_data, }, + { .compatible = "fsl,imx27-nand", .data = &imx27_nand_devtype_data, }, + { .compatible = "fsl,imx25-nand", .data = &imx25_nand_devtype_data, }, + { .compatible = "fsl,imx51-nand", .data = &imx51_nand_devtype_data, }, + { .compatible = "fsl,imx53-nand", .data = &imx53_nand_devtype_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxcnd_dt_ids); -static int mxcnd_probe_dt(struct mxc_nand_host *host) -{ - struct device_node *np = host->dev->of_node; - const struct of_device_id *of_id = - of_match_device(mxcnd_dt_ids, host->dev); - - if (!np) - return 1; - - host->devtype_data = of_id->data; - - return 0; -} -#else -static int mxcnd_probe_dt(struct mxc_nand_host *host) -{ - return 1; -} -#endif - static int mxcnd_attach_chip(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); @@ -1800,20 +1744,7 @@ static int mxcnd_probe(struct platform_device *pdev) if (IS_ERR(host->clk)) return PTR_ERR(host->clk); - err = mxcnd_probe_dt(host); - if (err > 0) { - struct mxc_nand_platform_data *pdata = - dev_get_platdata(&pdev->dev); - if (pdata) { - host->pdata = *pdata; - host->devtype_data = (struct mxc_nand_devtype_data *) - pdev->id_entry->driver_data; - } else { - err = -ENODEV; - } - } - if (err < 0) - return err; + host->devtype_data = device_get_match_data(&pdev->dev); if (!host->devtype_data->setup_interface) this->options |= NAND_KEEP_TIMINGS; @@ -1843,14 +1774,6 @@ static int mxcnd_probe(struct platform_device *pdev) this->legacy.select_chip = host->devtype_data->select_chip; - /* NAND bus width determines access functions used by upper layer */ - if (host->pdata.width == 2) - this->options |= NAND_BUSWIDTH_16; - - /* update flash based bbt */ - if (host->pdata.flash_bbt) - this->bbt_options |= NAND_BBT_USE_FLASH; - init_completion(&host->op_completion); host->irq = platform_get_irq(pdev, 0); @@ -1891,9 +1814,7 @@ static int mxcnd_probe(struct platform_device *pdev) goto escan; /* Register the partitions */ - err = mtd_device_parse_register(mtd, part_probes, NULL, - host->pdata.parts, - host->pdata.nr_parts); + err = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0); if (err) goto cleanup_nand; @@ -1930,7 +1851,6 @@ static struct platform_driver mxcnd_driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(mxcnd_dt_ids), }, - .id_table = mxcnd_devtype, .probe = mxcnd_probe, .remove = mxcnd_remove, }; diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index d66b5b0971fa..da1070993994 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -12,8 +12,8 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mtd/mtd.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/platform_device.h> #include "internals.h" diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 1f0d542d5923..c33fa1b1847f 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -35,8 +35,8 @@ #include <linux/types.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> -#include <linux/mtd/nand_ecc.h> -#include <linux/mtd/nand_bch.h> +#include <linux/mtd/nand-ecc-sw-hamming.h> +#include <linux/mtd/nand-ecc-sw-bch.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/io.h> @@ -5139,6 +5139,118 @@ static void nand_scan_ident_cleanup(struct nand_chip *chip) kfree(chip->parameters.onfi); } +int rawnand_sw_hamming_init(struct nand_chip *chip) +{ + struct nand_ecc_sw_hamming_conf *engine_conf; + struct nand_device *base = &chip->base; + int ret; + + base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING; + base->ecc.user_conf.strength = chip->ecc.strength; + base->ecc.user_conf.step_size = chip->ecc.size; + + ret = nand_ecc_sw_hamming_init_ctx(base); + if (ret) + return ret; + + engine_conf = base->ecc.ctx.priv; + + if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER) + engine_conf->sm_order = true; + + chip->ecc.size = base->ecc.ctx.conf.step_size; + chip->ecc.strength = base->ecc.ctx.conf.strength; + chip->ecc.total = base->ecc.ctx.total; + chip->ecc.steps = engine_conf->nsteps; + chip->ecc.bytes = engine_conf->code_size; + + return 0; +} +EXPORT_SYMBOL(rawnand_sw_hamming_init); + +int rawnand_sw_hamming_calculate(struct nand_chip *chip, + const unsigned char *buf, + unsigned char *code) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_hamming_calculate(base, buf, code); +} +EXPORT_SYMBOL(rawnand_sw_hamming_calculate); + +int rawnand_sw_hamming_correct(struct nand_chip *chip, + unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc); +} +EXPORT_SYMBOL(rawnand_sw_hamming_correct); + +void rawnand_sw_hamming_cleanup(struct nand_chip *chip) +{ + struct nand_device *base = &chip->base; + + nand_ecc_sw_hamming_cleanup_ctx(base); +} +EXPORT_SYMBOL(rawnand_sw_hamming_cleanup); + +int rawnand_sw_bch_init(struct nand_chip *chip) +{ + struct nand_device *base = &chip->base; + struct nand_ecc_sw_bch_conf *engine_conf; + int ret; + + base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH; + base->ecc.user_conf.step_size = chip->ecc.size; + base->ecc.user_conf.strength = chip->ecc.strength; + + ret = nand_ecc_sw_bch_init_ctx(base); + if (ret) + return ret; + + engine_conf = base->ecc.ctx.priv; + + chip->ecc.size = base->ecc.ctx.conf.step_size; + chip->ecc.strength = base->ecc.ctx.conf.strength; + chip->ecc.total = base->ecc.ctx.total; + chip->ecc.steps = engine_conf->nsteps; + chip->ecc.bytes = engine_conf->code_size; + + return 0; +} +EXPORT_SYMBOL(rawnand_sw_bch_init); + +static int rawnand_sw_bch_calculate(struct nand_chip *chip, + const unsigned char *buf, + unsigned char *code) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_bch_calculate(base, buf, code); +} + +int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + struct nand_device *base = &chip->base; + + return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc); +} +EXPORT_SYMBOL(rawnand_sw_bch_correct); + +void rawnand_sw_bch_cleanup(struct nand_chip *chip) +{ + struct nand_device *base = &chip->base; + + nand_ecc_sw_bch_cleanup_ctx(base); +} +EXPORT_SYMBOL(rawnand_sw_bch_cleanup); + static int nand_set_ecc_on_host_ops(struct nand_chip *chip) { struct nand_ecc_ctrl *ecc = &chip->ecc; @@ -5203,14 +5315,15 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) struct mtd_info *mtd = nand_to_mtd(chip); struct nand_device *nanddev = mtd_to_nanddev(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; + int ret; if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT)) return -EINVAL; switch (ecc->algo) { case NAND_ECC_ALGO_HAMMING: - ecc->calculate = nand_calculate_ecc; - ecc->correct = nand_correct_data; + ecc->calculate = rawnand_sw_hamming_calculate; + ecc->correct = rawnand_sw_hamming_correct; ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; @@ -5228,14 +5341,20 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC)) ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER; + ret = rawnand_sw_hamming_init(chip); + if (ret) { + WARN(1, "Hamming ECC initialization failed!\n"); + return ret; + } + return 0; case NAND_ECC_ALGO_BCH: - if (!mtd_nand_has_bch()) { + if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n"); return -EINVAL; } - ecc->calculate = nand_bch_calculate_ecc; - ecc->correct = nand_bch_correct_data; + ecc->calculate = rawnand_sw_bch_calculate; + ecc->correct = rawnand_sw_bch_correct; ecc->read_page = nand_read_page_swecc; ecc->read_subpage = nand_read_subpage; ecc->write_page = nand_write_page_swecc; @@ -5247,55 +5366,20 @@ static int nand_set_ecc_soft_ops(struct nand_chip *chip) ecc->write_oob = nand_write_oob_std; /* - * Board driver should supply ecc.size and ecc.strength - * values to select how many bits are correctable. - * Otherwise, default to 4 bits for large page devices. - */ - if (!ecc->size && (mtd->oobsize >= 64)) { - ecc->size = 512; - ecc->strength = 4; - } - - /* - * if no ecc placement scheme was provided pickup the default - * large page one. - */ - if (!mtd->ooblayout) { - /* handle large page devices only */ - if (mtd->oobsize < 64) { - WARN(1, "OOB layout is required when using software BCH on small pages\n"); - return -EINVAL; - } - - mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); - - } - - /* * We can only maximize ECC config when the default layout is * used, otherwise we don't know how many bytes can really be * used. */ - if (mtd->ooblayout == nand_get_large_page_ooblayout() && - nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) { - int steps, bytes; - - /* Always prefer 1k blocks over 512bytes ones */ - ecc->size = 1024; - steps = mtd->writesize / ecc->size; + if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH && + mtd->ooblayout != nand_get_large_page_ooblayout()) + nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH; - /* Reserve 2 bytes for the BBM */ - bytes = (mtd->oobsize - 2) / steps; - ecc->strength = bytes * 8 / fls(8 * ecc->size); - } - - /* See nand_bch_init() for details. */ - ecc->bytes = 0; - ecc->priv = nand_bch_init(mtd); - if (!ecc->priv) { + ret = rawnand_sw_bch_init(chip); + if (ret) { WARN(1, "BCH ECC initialization failed!\n"); - return -EINVAL; + return ret; } + return 0; default: WARN(1, "Unsupported ECC algorithm!\n"); @@ -5639,7 +5723,9 @@ static int nand_scan_tail(struct nand_chip *chip) */ if (!mtd->ooblayout && !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && - ecc->algo == NAND_ECC_ALGO_BCH)) { + ecc->algo == NAND_ECC_ALGO_BCH) && + !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT && + ecc->algo == NAND_ECC_ALGO_HAMMING)) { switch (mtd->oobsize) { case 8: case 16: @@ -5756,15 +5842,18 @@ static int nand_scan_tail(struct nand_chip *chip) * Set the number of read / write steps for one page depending on ECC * mode. */ - ecc->steps = mtd->writesize / ecc->size; + if (!ecc->steps) + ecc->steps = mtd->writesize / ecc->size; if (ecc->steps * ecc->size != mtd->writesize) { WARN(1, "Invalid ECC parameters\n"); ret = -EINVAL; goto err_nand_manuf_cleanup; } - ecc->total = ecc->steps * ecc->bytes; - chip->base.ecc.ctx.total = ecc->total; + if (!ecc->total) { + ecc->total = ecc->steps * ecc->bytes; + chip->base.ecc.ctx.total = ecc->total; + } if (ecc->total > mtd->oobsize) { WARN(1, "Total number of ECC bytes exceeded oobsize\n"); @@ -5953,9 +6042,12 @@ EXPORT_SYMBOL(nand_scan_with_ids); */ void nand_cleanup(struct nand_chip *chip) { - if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT && - chip->ecc.algo == NAND_ECC_ALGO_BCH) - nand_bch_free((struct nand_bch_control *)chip->ecc.priv); + if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) { + if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) + rawnand_sw_hamming_cleanup(chip); + else if (chip->ecc.algo == NAND_ECC_ALGO_BCH) + rawnand_sw_bch_cleanup(chip); + } nanddev_cleanup(&chip->base); diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 344a24fd2ca8..dced32a126d9 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -1087,7 +1087,7 @@ static int nand_update_bbt(struct nand_chip *this, loff_t offs) } /** - * mark_bbt_regions - [GENERIC] mark the bad block table regions + * mark_bbt_region - [GENERIC] mark the bad block table regions * @this: the NAND device * @td: bad block table descriptor * diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c deleted file mode 100644 index 9d19ac14c196..000000000000 --- a/drivers/mtd/nand/raw/nand_bch.c +++ /dev/null @@ -1,219 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * This file provides ECC correction for more than 1 bit per block of data, - * using binary BCH codes. It relies on the generic BCH library lib/bch.c. - * - * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> - */ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/bitops.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_bch.h> -#include <linux/bch.h> - -/** - * struct nand_bch_control - private NAND BCH control structure - * @bch: BCH control structure - * @errloc: error location array - * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid - */ -struct nand_bch_control { - struct bch_control *bch; - unsigned int *errloc; - unsigned char *eccmask; -}; - -/** - * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block - * @chip: NAND chip object - * @buf: input buffer with raw data - * @code: output buffer with ECC - */ -int nand_bch_calculate_ecc(struct nand_chip *chip, const unsigned char *buf, - unsigned char *code) -{ - struct nand_bch_control *nbc = chip->ecc.priv; - unsigned int i; - - memset(code, 0, chip->ecc.bytes); - bch_encode(nbc->bch, buf, chip->ecc.size, code); - - /* apply mask so that an erased page is a valid codeword */ - for (i = 0; i < chip->ecc.bytes; i++) - code[i] ^= nbc->eccmask[i]; - - return 0; -} -EXPORT_SYMBOL(nand_bch_calculate_ecc); - -/** - * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s) - * @chip: NAND chip object - * @buf: raw data read from the chip - * @read_ecc: ECC from the chip - * @calc_ecc: the ECC calculated from raw data - * - * Detect and correct bit errors for a data byte block - */ -int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf, - unsigned char *read_ecc, unsigned char *calc_ecc) -{ - struct nand_bch_control *nbc = chip->ecc.priv; - unsigned int *errloc = nbc->errloc; - int i, count; - - count = bch_decode(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, - NULL, errloc); - if (count > 0) { - for (i = 0; i < count; i++) { - if (errloc[i] < (chip->ecc.size*8)) - /* error is located in data, correct it */ - buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); - /* else error in ecc, no action needed */ - - pr_debug("%s: corrected bitflip %u\n", __func__, - errloc[i]); - } - } else if (count < 0) { - pr_err("ecc unrecoverable error\n"); - count = -EBADMSG; - } - return count; -} -EXPORT_SYMBOL(nand_bch_correct_data); - -/** - * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction - * @mtd: MTD block structure - * - * Returns: - * a pointer to a new NAND BCH control structure, or NULL upon failure - * - * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes - * are used to compute BCH parameters m (Galois field order) and t (error - * correction capability). @eccbytes should be equal to the number of bytes - * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8. - * - * Example: to configure 4 bit correction per 512 bytes, you should pass - * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) - * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits) - */ -struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) -{ - struct nand_chip *nand = mtd_to_nand(mtd); - unsigned int m, t, eccsteps, i; - struct nand_bch_control *nbc = NULL; - unsigned char *erased_page; - unsigned int eccsize = nand->ecc.size; - unsigned int eccbytes = nand->ecc.bytes; - unsigned int eccstrength = nand->ecc.strength; - - if (!eccbytes && eccstrength) { - eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8); - nand->ecc.bytes = eccbytes; - } - - if (!eccsize || !eccbytes) { - pr_warn("ecc parameters not supplied\n"); - goto fail; - } - - m = fls(1+8*eccsize); - t = (eccbytes*8)/m; - - nbc = kzalloc(sizeof(*nbc), GFP_KERNEL); - if (!nbc) - goto fail; - - nbc->bch = bch_init(m, t, 0, false); - if (!nbc->bch) - goto fail; - - /* verify that eccbytes has the expected value */ - if (nbc->bch->ecc_bytes != eccbytes) { - pr_warn("invalid eccbytes %u, should be %u\n", - eccbytes, nbc->bch->ecc_bytes); - goto fail; - } - - eccsteps = mtd->writesize/eccsize; - - /* Check that we have an oob layout description. */ - if (!mtd->ooblayout) { - pr_warn("missing oob scheme"); - goto fail; - } - - /* sanity checks */ - if (8*(eccsize+eccbytes) >= (1 << m)) { - pr_warn("eccsize %u is too large\n", eccsize); - goto fail; - } - - /* - * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(), - * which is called by mtd_ooblayout_count_eccbytes(). - * Make sure they are properly initialized before calling - * mtd_ooblayout_count_eccbytes(). - * FIXME: we should probably rework the sequencing in nand_scan_tail() - * to avoid setting those fields twice. - */ - nand->ecc.steps = eccsteps; - nand->ecc.total = eccsteps * eccbytes; - nand->base.ecc.ctx.total = nand->ecc.total; - if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) { - pr_warn("invalid ecc layout\n"); - goto fail; - } - - nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL); - nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL); - if (!nbc->eccmask || !nbc->errloc) - goto fail; - /* - * compute and store the inverted ecc of an erased ecc block - */ - erased_page = kmalloc(eccsize, GFP_KERNEL); - if (!erased_page) - goto fail; - - memset(erased_page, 0xff, eccsize); - bch_encode(nbc->bch, erased_page, eccsize, nbc->eccmask); - kfree(erased_page); - - for (i = 0; i < eccbytes; i++) - nbc->eccmask[i] ^= 0xff; - - if (!eccstrength) - nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize); - - return nbc; -fail: - nand_bch_free(nbc); - return NULL; -} -EXPORT_SYMBOL(nand_bch_init); - -/** - * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources - * @nbc: NAND BCH control structure - */ -void nand_bch_free(struct nand_bch_control *nbc) -{ - if (nbc) { - bch_free(nbc->bch); - kfree(nbc->errloc); - kfree(nbc->eccmask); - kfree(nbc); - } -} -EXPORT_SYMBOL(nand_bch_free); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); -MODULE_DESCRIPTION("NAND software BCH ECC support"); diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 2bcc03714432..eccc18b266d5 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -192,9 +192,10 @@ static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo) */ void nand_wait_ready(struct nand_chip *chip) { + struct mtd_info *mtd = nand_to_mtd(chip); unsigned long timeo = 400; - if (in_interrupt() || oops_in_progress) + if (mtd->oops_panic_write) return panic_nand_wait_ready(chip, timeo); /* Wait until command is processed or timeout occurs */ @@ -531,7 +532,7 @@ EXPORT_SYMBOL(nand_get_set_features_notsupp); */ static int nand_wait(struct nand_chip *chip) { - + struct mtd_info *mtd = nand_to_mtd(chip); unsigned long timeo = 400; u8 status; int ret; @@ -546,9 +547,9 @@ static int nand_wait(struct nand_chip *chip) if (ret) return ret; - if (in_interrupt() || oops_in_progress) + if (mtd->oops_panic_write) { panic_nand_wait(chip, timeo); - else { + } else { timeo = jiffies + msecs_to_jiffies(timeo); do { if (chip->legacy.dev_ready) { diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index a8048cb8d220..f2b9250c0ea8 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -23,7 +23,6 @@ #include <linux/string.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_bch.h> #include <linux/mtd/partitions.h> #include <linux/delay.h> #include <linux/list.h> @@ -2214,7 +2213,7 @@ static int ns_attach_chip(struct nand_chip *chip) if (!bch) return 0; - if (!mtd_nand_has_bch()) { + if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) { NS_ERR("BCH ECC support is disabled\n"); return -EINVAL; } diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c index 0fb4ba93c41e..338d6b1a189e 100644 --- a/drivers/mtd/nand/raw/ndfc.c +++ b/drivers/mtd/nand/raw/ndfc.c @@ -18,7 +18,6 @@ */ #include <linux/module.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/ndfc.h> #include <linux/slab.h> @@ -146,7 +145,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc, chip->controller = &ndfc->ndfc_control; chip->legacy.read_buf = ndfc_read_buf; chip->legacy.write_buf = ndfc_write_buf; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; chip->ecc.hwctl = ndfc_enable_hwecc; chip->ecc.calculate = ndfc_calculate_ecc; chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 512f60780a50..fbb9955f2467 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -23,7 +23,6 @@ #include <linux/of.h> #include <linux/of_device.h> -#include <linux/mtd/nand_bch.h> #include <linux/platform_data/elm.h> #include <linux/omap-gpmc.h> @@ -185,6 +184,7 @@ static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd) * @dma_mode: dma mode enable (1) or disable (0) * @u32_count: number of bytes to be transferred * @is_write: prefetch read(0) or write post(1) mode + * @info: NAND device structure containing platform data */ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, unsigned int u32_count, int is_write, struct omap_nand_info *info) @@ -214,7 +214,7 @@ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode, return 0; } -/** +/* * omap_prefetch_reset - disables and stops the prefetch engine */ static int omap_prefetch_reset(int cs, struct omap_nand_info *info) @@ -939,7 +939,7 @@ static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat, /** * omap_enable_hwecc - This function enables the hardware ecc functionality - * @mtd: MTD device structure + * @chip: NAND chip object * @mode: Read/Write mode */ static void omap_enable_hwecc(struct nand_chip *chip, int mode) @@ -1009,7 +1009,7 @@ static int omap_wait(struct nand_chip *this) /** * omap_dev_ready - checks the NAND Ready GPIO line - * @mtd: MTD device structure + * @chip: NAND chip object * * Returns true if ready and false if busy. */ @@ -1022,7 +1022,7 @@ static int omap_dev_ready(struct nand_chip *chip) /** * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation - * @mtd: MTD device structure + * @chip: NAND chip object * @mode: Read/Write mode * * When using BCH with SW correction (i.e. no ELM), sector size is set @@ -1131,7 +1131,7 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, * _omap_calculate_ecc_bch - Generate ECC bytes for one sector * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer + * @ecc_calc: The ecc_code buffer * @i: The sector number (for a multi sector page) * * Support calculating of BCH4/8/16 ECC vectors for one sector @@ -1259,7 +1259,7 @@ static int _omap_calculate_ecc_bch(struct mtd_info *mtd, * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction * @chip: NAND chip object * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer + * @ecc_calc: Buffer storing the calculated ECC bytes * * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used * when SW based correction is required as ECC is required for one sector @@ -1275,7 +1275,7 @@ static int omap_calculate_ecc_bch_sw(struct nand_chip *chip, * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed - * @ecc_code: The ecc_code buffer + * @ecc_calc: Buffer storing the calculated ECC bytes * * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. */ @@ -1674,7 +1674,8 @@ static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf, /** * is_elm_present - checks for presence of ELM module by scanning DT nodes - * @omap_nand_info: NAND device structure containing platform data + * @info: NAND device structure containing platform data + * @elm_node: ELM's DT node */ static bool is_elm_present(struct omap_nand_info *info, struct device_node *elm_node) @@ -2041,16 +2042,16 @@ static int omap_nand_attach_chip(struct nand_chip *chip) chip->ecc.bytes = 7; chip->ecc.strength = 4; chip->ecc.hwctl = omap_enable_hwecc_bch; - chip->ecc.correct = nand_bch_correct_data; + chip->ecc.correct = rawnand_sw_bch_correct; chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = chip->ecc.bytes + 1; /* Software BCH library is used for locating errors */ - chip->ecc.priv = nand_bch_init(mtd); - if (!chip->ecc.priv) { + err = rawnand_sw_bch_init(chip); + if (err) { dev_err(dev, "Unable to use BCH library\n"); - return -EINVAL; + return err; } break; @@ -2083,16 +2084,16 @@ static int omap_nand_attach_chip(struct nand_chip *chip) chip->ecc.bytes = 13; chip->ecc.strength = 8; chip->ecc.hwctl = omap_enable_hwecc_bch; - chip->ecc.correct = nand_bch_correct_data; + chip->ecc.correct = rawnand_sw_bch_correct; chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = chip->ecc.bytes + 1; /* Software BCH library is used for locating errors */ - chip->ecc.priv = nand_bch_init(mtd); - if (!chip->ecc.priv) { + err = rawnand_sw_bch_init(chip); + if (err) { dev_err(dev, "unable to use BCH library\n"); - return -EINVAL; + return err; } break; @@ -2195,7 +2196,6 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip = &info->nand; mtd = nand_to_mtd(nand_chip); mtd->dev.parent = &pdev->dev; - nand_chip->ecc.priv = NULL; nand_set_flash_node(nand_chip, dev->of_node); if (!mtd->name) { @@ -2271,10 +2271,9 @@ cleanup_nand: return_error: if (!IS_ERR_OR_NULL(info->dma)) dma_release_channel(info->dma); - if (nand_chip->ecc.priv) { - nand_bch_free(nand_chip->ecc.priv); - nand_chip->ecc.priv = NULL; - } + + rawnand_sw_bch_cleanup(nand_chip); + return err; } @@ -2285,10 +2284,8 @@ static int omap_nand_remove(struct platform_device *pdev) struct omap_nand_info *info = mtd_to_omap(mtd); int ret; - if (nand_chip->ecc.priv) { - nand_bch_free(nand_chip->ecc.priv); - nand_chip->ecc.priv = NULL; - } + rawnand_sw_bch_cleanup(nand_chip); + if (info->dma) dma_release_channel(info->dma); ret = mtd_device_unregister(mtd); diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 4b799521a427..550695a4c1ab 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -96,6 +96,9 @@ static u32 elm_read_reg(struct elm_info *info, int offset) * elm_config - Configure ELM module * @dev: ELM device * @bch_type: Type of BCH ecc + * @ecc_steps: ECC steps to assign to config + * @ecc_step_size: ECC step size to assign to config + * @ecc_syndrome_size: ECC syndrome size to assign to config */ int elm_config(struct device *dev, enum bch_ecc bch_type, int ecc_steps, int ecc_step_size, int ecc_syndrome_size) @@ -432,7 +435,7 @@ static int elm_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -/** +/* * elm_context_save * saves ELM configurations to preserve them across Hardware powered-down */ @@ -480,7 +483,7 @@ static int elm_context_save(struct elm_info *info) return 0; } -/** +/* * elm_context_restore * writes configurations saved duing power-down back into ELM registers */ diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c index e3bb65fd3ab2..66211c9311d2 100644 --- a/drivers/mtd/nand/raw/orion_nand.c +++ b/drivers/mtd/nand/raw/orion_nand.c @@ -86,7 +86,9 @@ static void orion_nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len) static int orion_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/pasemi_nand.c b/drivers/mtd/nand/raw/pasemi_nand.c index 4dfff34800f4..789f33312c15 100644 --- a/drivers/mtd/nand/raw/pasemi_nand.c +++ b/drivers/mtd/nand/raw/pasemi_nand.c @@ -14,7 +14,6 @@ #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> @@ -77,7 +76,9 @@ static int pasemi_device_ready(struct nand_chip *chip) static int pasemi_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c index 93d9f1694dc1..7711e1020c21 100644 --- a/drivers/mtd/nand/raw/plat_nand.c +++ b/drivers/mtd/nand/raw/plat_nand.c @@ -22,7 +22,9 @@ struct plat_nand_data { static int plat_nand_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 777fb0de0680..667e4bfe369f 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -145,6 +145,7 @@ #define OP_PAGE_READ 0x2 #define OP_PAGE_READ_WITH_ECC 0x3 #define OP_PAGE_READ_WITH_ECC_SPARE 0x4 +#define OP_PAGE_READ_ONFI_READ 0x5 #define OP_PROGRAM_PAGE 0x6 #define OP_PAGE_PROGRAM_WITH_ECC 0x7 #define OP_PROGRAM_PAGE_SPARE 0x9 @@ -460,12 +461,14 @@ struct qcom_nand_host { * @ecc_modes - ecc mode for NAND * @is_bam - whether NAND controller is using BAM * @is_qpic - whether NAND CTRL is part of qpic IP + * @qpic_v2 - flag to indicate QPIC IP version 2 * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset */ struct qcom_nandc_props { u32 ecc_modes; bool is_bam; bool is_qpic; + bool qpic_v2; u32 dev_cmd_reg_start; }; @@ -1164,7 +1167,13 @@ static int nandc_param(struct qcom_nand_host *host) * in use. we configure the controller to perform a raw read of 512 * bytes to read onfi params */ - nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE); + if (nandc->props->qpic_v2) + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ | + PAGE_ACC | LAST_PAGE); + else + nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | + PAGE_ACC | LAST_PAGE); + nandc_set_reg(nandc, NAND_ADDR0, 0); nandc_set_reg(nandc, NAND_ADDR1, 0); nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE @@ -1180,21 +1189,28 @@ static int nandc_param(struct qcom_nand_host *host) | 1 << DEV0_CFG1_ECC_DISABLE); nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); - /* configure CMD1 and VLD for ONFI param probing */ - nandc_set_reg(nandc, NAND_DEV_CMD_VLD, - (nandc->vld & ~READ_START_VLD)); - nandc_set_reg(nandc, NAND_DEV_CMD1, - (nandc->cmd1 & ~(0xFF << READ_ADDR)) - | NAND_CMD_PARAM << READ_ADDR); + /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ + if (!nandc->props->qpic_v2) { + nandc_set_reg(nandc, NAND_DEV_CMD_VLD, + (nandc->vld & ~READ_START_VLD)); + nandc_set_reg(nandc, NAND_DEV_CMD1, + (nandc->cmd1 & ~(0xFF << READ_ADDR)) + | NAND_CMD_PARAM << READ_ADDR); + } nandc_set_reg(nandc, NAND_EXEC_CMD, 1); - nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1); - nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); + if (!nandc->props->qpic_v2) { + nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1); + nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld); + } + nandc_set_read_loc(nandc, 0, 0, 512, 1); - write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); - write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); + if (!nandc->props->qpic_v2) { + write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL); + } nandc->buf_count = 512; memset(nandc->data_buffer, 0xff, nandc->buf_count); @@ -1205,8 +1221,10 @@ static int nandc_param(struct qcom_nand_host *host) nandc->buf_count, 0); /* restore CMD1 and VLD regs */ - write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); - write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); + if (!nandc->props->qpic_v2) { + write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0); + write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL); + } return 0; } @@ -1570,6 +1588,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt) struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); int i; + nandc_read_buffer_sync(nandc, true); + for (i = 0; i < cw_cnt; i++) { u32 flash = le32_to_cpu(nandc->reg_read_buf[i]); @@ -2770,8 +2790,10 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) /* kill onenand */ if (!nandc->props->is_qpic) nandc_write(nandc, SFLASHC_BURST_CFG, 0); - nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), - NAND_DEV_CMD_VLD_VAL); + + if (!nandc->props->qpic_v2) + nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), + NAND_DEV_CMD_VLD_VAL); /* enable ADM or BAM DMA */ if (nandc->props->is_bam) { @@ -2791,8 +2813,10 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) } /* save the original values of these registers */ - nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1)); - nandc->vld = NAND_DEV_CMD_VLD_VAL; + if (!nandc->props->qpic_v2) { + nandc->cmd1 = nandc_read(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD1)); + nandc->vld = NAND_DEV_CMD_VLD_VAL; + } return 0; } @@ -3050,6 +3074,14 @@ static const struct qcom_nandc_props ipq8074_nandc_props = { .dev_cmd_reg_start = 0x7000, }; +static const struct qcom_nandc_props sdx55_nandc_props = { + .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), + .is_bam = true, + .is_qpic = true, + .qpic_v2 = true, + .dev_cmd_reg_start = 0x7000, +}; + /* * data will hold a struct pointer containing more differences once we support * more controller variants @@ -3064,9 +3096,17 @@ static const struct of_device_id qcom_nandc_of_match[] = { .data = &ipq4019_nandc_props, }, { + .compatible = "qcom,ipq6018-nand", + .data = &ipq8074_nandc_props, + }, + { .compatible = "qcom,ipq8074-nand", .data = &ipq8074_nandc_props, }, + { + .compatible = "qcom,sdx55-nand", + .data = &sdx55_nandc_props, + }, {} }; MODULE_DEVICE_TABLE(of, qcom_nandc_of_match); diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c new file mode 100644 index 000000000000..796b678cb108 --- /dev/null +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -0,0 +1,1495 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Rockchip NAND Flash controller driver. + * Copyright (C) 2020 Rockchip Inc. + * Author: Yifeng Zhao <yifeng.zhao@rock-chips.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* + * NFC Page Data Layout: + * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data + + * 1024 bytes data + 4Bytes sys data + 28Bytes~124Bytes ECC data + + * ...... + * NAND Page Data Layout: + * 1024 * n data + m Bytes oob + * Original Bad Block Mask Location: + * First byte of oob(spare). + * nand_chip->oob_poi data layout: + * 4Bytes sys data + .... + 4Bytes sys data + ECC data. + */ + +/* NAND controller register definition */ +#define NFC_READ (0) +#define NFC_WRITE (1) + +#define NFC_FMCTL (0x00) +#define FMCTL_CE_SEL_M 0xFF +#define FMCTL_CE_SEL(x) (1 << (x)) +#define FMCTL_WP BIT(8) +#define FMCTL_RDY BIT(9) + +#define NFC_FMWAIT (0x04) +#define FLCTL_RST BIT(0) +#define FLCTL_WR (1) /* 0: read, 1: write */ +#define FLCTL_XFER_ST BIT(2) +#define FLCTL_XFER_EN BIT(3) +#define FLCTL_ACORRECT BIT(10) /* Auto correct error bits. */ +#define FLCTL_XFER_READY BIT(20) +#define FLCTL_XFER_SECTOR (22) +#define FLCTL_TOG_FIX BIT(29) + +#define BCHCTL_BANK_M (7 << 5) +#define BCHCTL_BANK (5) + +#define DMA_ST BIT(0) +#define DMA_WR (1) /* 0: write, 1: read */ +#define DMA_EN BIT(2) +#define DMA_AHB_SIZE (3) /* 0: 1, 1: 2, 2: 4 */ +#define DMA_BURST_SIZE (6) /* 0: 1, 3: 4, 5: 8, 7: 16 */ +#define DMA_INC_NUM (9) /* 1 - 16 */ + +#define ECC_ERR_CNT(x, e) ((((x) >> (e).low) & (e).low_mask) |\ + (((x) >> (e).high) & (e).high_mask) << (e).low_bn) +#define INT_DMA BIT(0) +#define NFC_BANK (0x800) +#define NFC_BANK_STEP (0x100) +#define BANK_DATA (0x00) +#define BANK_ADDR (0x04) +#define BANK_CMD (0x08) +#define NFC_SRAM0 (0x1000) +#define NFC_SRAM1 (0x1400) +#define NFC_SRAM_SIZE (0x400) +#define NFC_TIMEOUT (500000) +#define NFC_MAX_OOB_PER_STEP 128 +#define NFC_MIN_OOB_PER_STEP 64 +#define MAX_DATA_SIZE 0xFFFC +#define MAX_ADDRESS_CYC 6 +#define NFC_ECC_MAX_MODES 4 +#define NFC_MAX_NSELS (8) /* Some Socs only have 1 or 2 CSs. */ +#define NFC_SYS_DATA_SIZE (4) /* 4 bytes sys data in oob pre 1024 data.*/ +#define RK_DEFAULT_CLOCK_RATE (150 * 1000 * 1000) /* 150 Mhz */ +#define ACCTIMING(csrw, rwpw, rwcs) ((csrw) << 12 | (rwpw) << 5 | (rwcs)) + +enum nfc_type { + NFC_V6, + NFC_V8, + NFC_V9, +}; + +/** + * struct rk_ecc_cnt_status: represent a ecc status data. + * @err_flag_bit: error flag bit index at register. + * @low: ECC count low bit index at register. + * @low_mask: mask bit. + * @low_bn: ECC count low bit number. + * @high: ECC count high bit index at register. + * @high_mask: mask bit + */ +struct ecc_cnt_status { + u8 err_flag_bit; + u8 low; + u8 low_mask; + u8 low_bn; + u8 high; + u8 high_mask; +}; + +/** + * @type: NFC version + * @ecc_strengths: ECC strengths + * @ecc_cfgs: ECC config values + * @flctl_off: FLCTL register offset + * @bchctl_off: BCHCTL register offset + * @dma_data_buf_off: DMA_DATA_BUF register offset + * @dma_oob_buf_off: DMA_OOB_BUF register offset + * @dma_cfg_off: DMA_CFG register offset + * @dma_st_off: DMA_ST register offset + * @bch_st_off: BCG_ST register offset + * @randmz_off: RANDMZ register offset + * @int_en_off: interrupt enable register offset + * @int_clr_off: interrupt clean register offset + * @int_st_off: interrupt status register offset + * @oob0_off: oob0 register offset + * @oob1_off: oob1 register offset + * @ecc0: represent ECC0 status data + * @ecc1: represent ECC1 status data + */ +struct nfc_cfg { + enum nfc_type type; + u8 ecc_strengths[NFC_ECC_MAX_MODES]; + u32 ecc_cfgs[NFC_ECC_MAX_MODES]; + u32 flctl_off; + u32 bchctl_off; + u32 dma_cfg_off; + u32 dma_data_buf_off; + u32 dma_oob_buf_off; + u32 dma_st_off; + u32 bch_st_off; + u32 randmz_off; + u32 int_en_off; + u32 int_clr_off; + u32 int_st_off; + u32 oob0_off; + u32 oob1_off; + struct ecc_cnt_status ecc0; + struct ecc_cnt_status ecc1; +}; + +struct rk_nfc_nand_chip { + struct list_head node; + struct nand_chip chip; + + u16 boot_blks; + u16 metadata_size; + u32 boot_ecc; + u32 timing; + + u8 nsels; + u8 sels[0]; + /* Nothing after this field. */ +}; + +struct rk_nfc { + struct nand_controller controller; + const struct nfc_cfg *cfg; + struct device *dev; + + struct clk *nfc_clk; + struct clk *ahb_clk; + void __iomem *regs; + + u32 selected_bank; + u32 band_offset; + u32 cur_ecc; + u32 cur_timing; + + struct completion done; + struct list_head chips; + + u8 *page_buf; + u32 *oob_buf; + u32 page_buf_size; + u32 oob_buf_size; + + unsigned long assigned_cs; +}; + +static inline struct rk_nfc_nand_chip *rk_nfc_to_rknand(struct nand_chip *chip) +{ + return container_of(chip, struct rk_nfc_nand_chip, chip); +} + +static inline u8 *rk_nfc_buf_to_data_ptr(struct nand_chip *chip, const u8 *p, int i) +{ + return (u8 *)p + i * chip->ecc.size; +} + +static inline u8 *rk_nfc_buf_to_oob_ptr(struct nand_chip *chip, int i) +{ + u8 *poi; + + poi = chip->oob_poi + i * NFC_SYS_DATA_SIZE; + + return poi; +} + +static inline u8 *rk_nfc_buf_to_oob_ecc_ptr(struct nand_chip *chip, int i) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + u8 *poi; + + poi = chip->oob_poi + rknand->metadata_size + chip->ecc.bytes * i; + + return poi; +} + +static inline int rk_nfc_data_len(struct nand_chip *chip) +{ + return chip->ecc.size + chip->ecc.bytes + NFC_SYS_DATA_SIZE; +} + +static inline u8 *rk_nfc_data_ptr(struct nand_chip *chip, int i) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + + return nfc->page_buf + i * rk_nfc_data_len(chip); +} + +static inline u8 *rk_nfc_oob_ptr(struct nand_chip *chip, int i) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + + return nfc->page_buf + i * rk_nfc_data_len(chip) + chip->ecc.size; +} + +static int rk_nfc_hw_ecc_setup(struct nand_chip *chip, u32 strength) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + u32 reg, i; + + for (i = 0; i < NFC_ECC_MAX_MODES; i++) { + if (strength == nfc->cfg->ecc_strengths[i]) { + reg = nfc->cfg->ecc_cfgs[i]; + break; + } + } + + if (i >= NFC_ECC_MAX_MODES) + return -EINVAL; + + writel(reg, nfc->regs + nfc->cfg->bchctl_off); + + /* Save chip ECC setting */ + nfc->cur_ecc = strength; + + return 0; +} + +static void rk_nfc_select_chip(struct nand_chip *chip, int cs) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + u32 val; + + if (cs < 0) { + nfc->selected_bank = -1; + /* Deselect the currently selected target. */ + val = readl_relaxed(nfc->regs + NFC_FMCTL); + val &= ~FMCTL_CE_SEL_M; + writel(val, nfc->regs + NFC_FMCTL); + return; + } + + nfc->selected_bank = rknand->sels[cs]; + nfc->band_offset = NFC_BANK + nfc->selected_bank * NFC_BANK_STEP; + + val = readl_relaxed(nfc->regs + NFC_FMCTL); + val &= ~FMCTL_CE_SEL_M; + val |= FMCTL_CE_SEL(nfc->selected_bank); + + writel(val, nfc->regs + NFC_FMCTL); + + /* + * Compare current chip timing with selected chip timing and + * change if needed. + */ + if (nfc->cur_timing != rknand->timing) { + writel(rknand->timing, nfc->regs + NFC_FMWAIT); + nfc->cur_timing = rknand->timing; + } + + /* + * Compare current chip ECC setting with selected chip ECC setting and + * change if needed. + */ + if (nfc->cur_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, ecc->strength); +} + +static inline int rk_nfc_wait_ioready(struct rk_nfc *nfc) +{ + int rc; + u32 val; + + rc = readl_relaxed_poll_timeout(nfc->regs + NFC_FMCTL, val, + val & FMCTL_RDY, 10, NFC_TIMEOUT); + + return rc; +} + +static void rk_nfc_read_buf(struct rk_nfc *nfc, u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + buf[i] = readb_relaxed(nfc->regs + nfc->band_offset + + BANK_DATA); +} + +static void rk_nfc_write_buf(struct rk_nfc *nfc, const u8 *buf, int len) +{ + int i; + + for (i = 0; i < len; i++) + writeb(buf[i], nfc->regs + nfc->band_offset + BANK_DATA); +} + +static int rk_nfc_cmd(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct rk_nfc *nfc = nand_get_controller_data(chip); + unsigned int i, j, remaining, start; + int reg_offset = nfc->band_offset; + u8 *inbuf = NULL; + const u8 *outbuf; + u32 cnt = 0; + int ret = 0; + + for (i = 0; i < subop->ninstrs; i++) { + const struct nand_op_instr *instr = &subop->instrs[i]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb(instr->ctx.cmd.opcode, + nfc->regs + reg_offset + BANK_CMD); + break; + + case NAND_OP_ADDR_INSTR: + remaining = nand_subop_get_num_addr_cyc(subop, i); + start = nand_subop_get_addr_start_off(subop, i); + + for (j = 0; j < 8 && j + start < remaining; j++) + writeb(instr->ctx.addr.addrs[j + start], + nfc->regs + reg_offset + BANK_ADDR); + break; + + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + start = nand_subop_get_data_start_off(subop, i); + cnt = nand_subop_get_data_len(subop, i); + + if (instr->type == NAND_OP_DATA_OUT_INSTR) { + outbuf = instr->ctx.data.buf.out + start; + rk_nfc_write_buf(nfc, outbuf, cnt); + } else { + inbuf = instr->ctx.data.buf.in + start; + rk_nfc_read_buf(nfc, inbuf, cnt); + } + break; + + case NAND_OP_WAITRDY_INSTR: + if (rk_nfc_wait_ioready(nfc) < 0) { + ret = -ETIMEDOUT; + dev_err(nfc->dev, "IO not ready\n"); + } + break; + } + } + + return ret; +} + +static const struct nand_op_parser rk_nfc_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN( + rk_nfc_cmd, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, MAX_DATA_SIZE)), + NAND_OP_PARSER_PATTERN( + rk_nfc_cmd, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, MAX_DATA_SIZE), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), +); + +static int rk_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + if (!check_only) + rk_nfc_select_chip(chip, op->cs); + + return nand_op_parser_exec_op(chip, &rk_nfc_op_parser, op, + check_only); +} + +static int rk_nfc_setup_interface(struct nand_chip *chip, int target, + const struct nand_interface_config *conf) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + const struct nand_sdr_timings *timings; + u32 rate, tc2rw, trwpw, trw2c; + u32 temp; + + if (target < 0) + return 0; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return -EOPNOTSUPP; + + if (IS_ERR(nfc->nfc_clk)) + rate = clk_get_rate(nfc->ahb_clk); + else + rate = clk_get_rate(nfc->nfc_clk); + + /* Turn clock rate into kHz. */ + rate /= 1000; + + tc2rw = 1; + trw2c = 1; + + trwpw = max(timings->tWC_min, timings->tRC_min) / 1000; + trwpw = DIV_ROUND_UP(trwpw * rate, 1000000); + + temp = timings->tREA_max / 1000; + temp = DIV_ROUND_UP(temp * rate, 1000000); + + if (trwpw < temp) + trwpw = temp; + + /* + * ACCON: access timing control register + * ------------------------------------- + * 31:18: reserved + * 17:12: csrw, clock cycles from the falling edge of CSn to the + * falling edge of RDn or WRn + * 11:11: reserved + * 10:05: rwpw, the width of RDn or WRn in processor clock cycles + * 04:00: rwcs, clock cycles from the rising edge of RDn or WRn to the + * rising edge of CSn + */ + + /* Save chip timing */ + rknand->timing = ACCTIMING(tc2rw, trwpw, trw2c); + + return 0; +} + +static void rk_nfc_xfer_start(struct rk_nfc *nfc, u8 rw, u8 n_KB, + dma_addr_t dma_data, dma_addr_t dma_oob) +{ + u32 dma_reg, fl_reg, bch_reg; + + dma_reg = DMA_ST | ((!rw) << DMA_WR) | DMA_EN | (2 << DMA_AHB_SIZE) | + (7 << DMA_BURST_SIZE) | (16 << DMA_INC_NUM); + + fl_reg = (rw << FLCTL_WR) | FLCTL_XFER_EN | FLCTL_ACORRECT | + (n_KB << FLCTL_XFER_SECTOR) | FLCTL_TOG_FIX; + + if (nfc->cfg->type == NFC_V6 || nfc->cfg->type == NFC_V8) { + bch_reg = readl_relaxed(nfc->regs + nfc->cfg->bchctl_off); + bch_reg = (bch_reg & (~BCHCTL_BANK_M)) | + (nfc->selected_bank << BCHCTL_BANK); + writel(bch_reg, nfc->regs + nfc->cfg->bchctl_off); + } + + writel(dma_reg, nfc->regs + nfc->cfg->dma_cfg_off); + writel((u32)dma_data, nfc->regs + nfc->cfg->dma_data_buf_off); + writel((u32)dma_oob, nfc->regs + nfc->cfg->dma_oob_buf_off); + writel(fl_reg, nfc->regs + nfc->cfg->flctl_off); + fl_reg |= FLCTL_XFER_ST; + writel(fl_reg, nfc->regs + nfc->cfg->flctl_off); +} + +static int rk_nfc_wait_for_xfer_done(struct rk_nfc *nfc) +{ + void __iomem *ptr; + u32 reg; + + ptr = nfc->regs + nfc->cfg->flctl_off; + + return readl_relaxed_poll_timeout(ptr, reg, + reg & FLCTL_XFER_READY, + 10, NFC_TIMEOUT); +} + +static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf, + int oob_on, int page) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, pages_per_blk; + + pages_per_blk = mtd->erasesize / mtd->writesize; + if ((chip->options & NAND_IS_BOOT_MEDIUM) && + (page < (pages_per_blk * rknand->boot_blks)) && + rknand->boot_ecc != ecc->strength) { + /* + * There's currently no method to notify the MTD framework that + * a different ECC strength is in use for the boot blocks. + */ + return -EIO; + } + + if (!buf) + memset(nfc->page_buf, 0xff, mtd->writesize + mtd->oobsize); + + for (i = 0; i < ecc->steps; i++) { + /* Copy data to the NFC buffer. */ + if (buf) + memcpy(rk_nfc_data_ptr(chip, i), + rk_nfc_buf_to_data_ptr(chip, buf, i), + ecc->size); + /* + * The first four bytes of OOB are reserved for the + * boot ROM. In some debugging cases, such as with a + * read, erase and write back test these 4 bytes stored + * in OOB also need to be written back. + * + * The function nand_block_bad detects bad blocks like: + * + * bad = chip->oob_poi[chip->badblockpos]; + * + * chip->badblockpos == 0 for a large page NAND Flash, + * so chip->oob_poi[0] is the bad block mask (BBM). + * + * The OOB data layout on the NFC is: + * + * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ... + * + * or + * + * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... + * + * The code here just swaps the first 4 bytes with the last + * 4 bytes without losing any data. + * + * The chip->oob_poi data layout: + * + * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 + * + * The rk_nfc_ooblayout_free() function already has reserved + * these 4 bytes with: + * + * oob_region->offset = NFC_SYS_DATA_SIZE + 2; + */ + if (!i) + memcpy(rk_nfc_oob_ptr(chip, i), + rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1), + NFC_SYS_DATA_SIZE); + else + memcpy(rk_nfc_oob_ptr(chip, i), + rk_nfc_buf_to_oob_ptr(chip, i - 1), + NFC_SYS_DATA_SIZE); + /* Copy ECC data to the NFC buffer. */ + memcpy(rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE, + rk_nfc_buf_to_oob_ecc_ptr(chip, i), + ecc->bytes); + } + + nand_prog_page_begin_op(chip, page, 0, NULL, 0); + rk_nfc_write_buf(nfc, buf, mtd->writesize + mtd->oobsize); + return nand_prog_page_end_op(chip); +} + +static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, + int oob_on, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP : + NFC_MIN_OOB_PER_STEP; + int pages_per_blk = mtd->erasesize / mtd->writesize; + int ret = 0, i, boot_rom_mode = 0; + dma_addr_t dma_data, dma_oob; + u32 reg; + u8 *oob; + + nand_prog_page_begin_op(chip, page, 0, NULL, 0); + + if (buf) + memcpy(nfc->page_buf, buf, mtd->writesize); + else + memset(nfc->page_buf, 0xFF, mtd->writesize); + + /* + * The first blocks (4, 8 or 16 depending on the device) are used + * by the boot ROM and the first 32 bits of OOB need to link to + * the next page address in the same block. We can't directly copy + * OOB data from the MTD framework, because this page address + * conflicts for example with the bad block marker (BBM), + * so we shift all OOB data including the BBM with 4 byte positions. + * As a consequence the OOB size available to the MTD framework is + * also reduced with 4 bytes. + * + * PA0 PA1 PA2 PA3 | BBM OOB1 OOB2 OOB3 | ... + * + * If a NAND is not a boot medium or the page is not a boot block, + * the first 4 bytes are left untouched by writing 0xFF to them. + * + * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... + * + * Configure the ECC algorithm supported by the boot ROM. + */ + if ((page < (pages_per_blk * rknand->boot_blks)) && + (chip->options & NAND_IS_BOOT_MEDIUM)) { + boot_rom_mode = 1; + if (rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc); + } + + for (i = 0; i < ecc->steps; i++) { + if (!i) { + reg = 0xFFFFFFFF; + } else { + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + reg = oob[0] | oob[1] << 8 | oob[2] << 16 | + oob[3] << 24; + } + + if (!i && boot_rom_mode) + reg = (page & (pages_per_blk - 1)) * 4; + + if (nfc->cfg->type == NFC_V9) + nfc->oob_buf[i] = reg; + else + nfc->oob_buf[i * (oob_step / 4)] = reg; + } + + dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, + mtd->writesize, DMA_TO_DEVICE); + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, + ecc->steps * oob_step, + DMA_TO_DEVICE); + + reinit_completion(&nfc->done); + writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off); + + rk_nfc_xfer_start(nfc, NFC_WRITE, ecc->steps, dma_data, + dma_oob); + ret = wait_for_completion_timeout(&nfc->done, + msecs_to_jiffies(100)); + if (!ret) + dev_warn(nfc->dev, "write: wait dma done timeout.\n"); + /* + * Whether the DMA transfer is completed or not. The driver + * needs to check the NFC`s status register to see if the data + * transfer was completed. + */ + ret = rk_nfc_wait_for_xfer_done(nfc); + + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, + DMA_TO_DEVICE); + dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step, + DMA_TO_DEVICE); + + if (boot_rom_mode && rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, ecc->strength); + + if (ret) { + dev_err(nfc->dev, "write: wait transfer done timeout.\n"); + return -ETIMEDOUT; + } + + return nand_prog_page_end_op(chip); +} + +static int rk_nfc_write_oob(struct nand_chip *chip, int page) +{ + return rk_nfc_write_page_hwecc(chip, NULL, 1, page); +} + +static int rk_nfc_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_on, + int page) +{ + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int i, pages_per_blk; + + pages_per_blk = mtd->erasesize / mtd->writesize; + if ((chip->options & NAND_IS_BOOT_MEDIUM) && + (page < (pages_per_blk * rknand->boot_blks)) && + rknand->boot_ecc != ecc->strength) { + /* + * There's currently no method to notify the MTD framework that + * a different ECC strength is in use for the boot blocks. + */ + return -EIO; + } + + nand_read_page_op(chip, page, 0, NULL, 0); + rk_nfc_read_buf(nfc, nfc->page_buf, mtd->writesize + mtd->oobsize); + for (i = 0; i < ecc->steps; i++) { + /* + * The first four bytes of OOB are reserved for the + * boot ROM. In some debugging cases, such as with a read, + * erase and write back test, these 4 bytes also must be + * saved somewhere, otherwise this information will be + * lost during a write back. + */ + if (!i) + memcpy(rk_nfc_buf_to_oob_ptr(chip, ecc->steps - 1), + rk_nfc_oob_ptr(chip, i), + NFC_SYS_DATA_SIZE); + else + memcpy(rk_nfc_buf_to_oob_ptr(chip, i - 1), + rk_nfc_oob_ptr(chip, i), + NFC_SYS_DATA_SIZE); + + /* Copy ECC data from the NFC buffer. */ + memcpy(rk_nfc_buf_to_oob_ecc_ptr(chip, i), + rk_nfc_oob_ptr(chip, i) + NFC_SYS_DATA_SIZE, + ecc->bytes); + + /* Copy data from the NFC buffer. */ + if (buf) + memcpy(rk_nfc_buf_to_data_ptr(chip, buf, i), + rk_nfc_data_ptr(chip, i), + ecc->size); + } + + return 0; +} + +static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, + int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int oob_step = (ecc->bytes > 60) ? NFC_MAX_OOB_PER_STEP : + NFC_MIN_OOB_PER_STEP; + int pages_per_blk = mtd->erasesize / mtd->writesize; + dma_addr_t dma_data, dma_oob; + int ret = 0, i, cnt, boot_rom_mode = 0; + int max_bitflips = 0, bch_st, ecc_fail = 0; + u8 *oob; + u32 tmp; + + nand_read_page_op(chip, page, 0, NULL, 0); + + dma_data = dma_map_single(nfc->dev, nfc->page_buf, + mtd->writesize, + DMA_FROM_DEVICE); + dma_oob = dma_map_single(nfc->dev, nfc->oob_buf, + ecc->steps * oob_step, + DMA_FROM_DEVICE); + + /* + * The first blocks (4, 8 or 16 depending on the device) + * are used by the boot ROM. + * Configure the ECC algorithm supported by the boot ROM. + */ + if ((page < (pages_per_blk * rknand->boot_blks)) && + (chip->options & NAND_IS_BOOT_MEDIUM)) { + boot_rom_mode = 1; + if (rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, rknand->boot_ecc); + } + + reinit_completion(&nfc->done); + writel(INT_DMA, nfc->regs + nfc->cfg->int_en_off); + rk_nfc_xfer_start(nfc, NFC_READ, ecc->steps, dma_data, + dma_oob); + ret = wait_for_completion_timeout(&nfc->done, + msecs_to_jiffies(100)); + if (!ret) + dev_warn(nfc->dev, "read: wait dma done timeout.\n"); + /* + * Whether the DMA transfer is completed or not. The driver + * needs to check the NFC`s status register to see if the data + * transfer was completed. + */ + ret = rk_nfc_wait_for_xfer_done(nfc); + + dma_unmap_single(nfc->dev, dma_data, mtd->writesize, + DMA_FROM_DEVICE); + dma_unmap_single(nfc->dev, dma_oob, ecc->steps * oob_step, + DMA_FROM_DEVICE); + + if (ret) { + ret = -ETIMEDOUT; + dev_err(nfc->dev, "read: wait transfer done timeout.\n"); + goto timeout_err; + } + + for (i = 1; i < ecc->steps; i++) { + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + if (nfc->cfg->type == NFC_V9) + tmp = nfc->oob_buf[i]; + else + tmp = nfc->oob_buf[i * (oob_step / 4)]; + *oob++ = (u8)tmp; + *oob++ = (u8)(tmp >> 8); + *oob++ = (u8)(tmp >> 16); + *oob++ = (u8)(tmp >> 24); + } + + for (i = 0; i < (ecc->steps / 2); i++) { + bch_st = readl_relaxed(nfc->regs + + nfc->cfg->bch_st_off + i * 4); + if (bch_st & BIT(nfc->cfg->ecc0.err_flag_bit) || + bch_st & BIT(nfc->cfg->ecc1.err_flag_bit)) { + mtd->ecc_stats.failed++; + ecc_fail = 1; + } else { + cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc0); + mtd->ecc_stats.corrected += cnt; + max_bitflips = max_t(u32, max_bitflips, cnt); + + cnt = ECC_ERR_CNT(bch_st, nfc->cfg->ecc1); + mtd->ecc_stats.corrected += cnt; + max_bitflips = max_t(u32, max_bitflips, cnt); + } + } + + if (buf) + memcpy(buf, nfc->page_buf, mtd->writesize); + +timeout_err: + if (boot_rom_mode && rknand->boot_ecc != ecc->strength) + rk_nfc_hw_ecc_setup(chip, ecc->strength); + + if (ret) + return ret; + + if (ecc_fail) { + dev_err(nfc->dev, "read page: %x ecc error!\n", page); + return 0; + } + + return max_bitflips; +} + +static int rk_nfc_read_oob(struct nand_chip *chip, int page) +{ + return rk_nfc_read_page_hwecc(chip, NULL, 1, page); +} + +static inline void rk_nfc_hw_init(struct rk_nfc *nfc) +{ + /* Disable flash wp. */ + writel(FMCTL_WP, nfc->regs + NFC_FMCTL); + /* Config default timing 40ns at 150 Mhz NFC clock. */ + writel(0x1081, nfc->regs + NFC_FMWAIT); + nfc->cur_timing = 0x1081; + /* Disable randomizer and DMA. */ + writel(0, nfc->regs + nfc->cfg->randmz_off); + writel(0, nfc->regs + nfc->cfg->dma_cfg_off); + writel(FLCTL_RST, nfc->regs + nfc->cfg->flctl_off); +} + +static irqreturn_t rk_nfc_irq(int irq, void *id) +{ + struct rk_nfc *nfc = id; + u32 sta, ien; + + sta = readl_relaxed(nfc->regs + nfc->cfg->int_st_off); + ien = readl_relaxed(nfc->regs + nfc->cfg->int_en_off); + + if (!(sta & ien)) + return IRQ_NONE; + + writel(sta, nfc->regs + nfc->cfg->int_clr_off); + writel(~sta & ien, nfc->regs + nfc->cfg->int_en_off); + + complete(&nfc->done); + + return IRQ_HANDLED; +} + +static int rk_nfc_enable_clks(struct device *dev, struct rk_nfc *nfc) +{ + int ret; + + if (!IS_ERR(nfc->nfc_clk)) { + ret = clk_prepare_enable(nfc->nfc_clk); + if (ret) { + dev_err(dev, "failed to enable NFC clk\n"); + return ret; + } + } + + ret = clk_prepare_enable(nfc->ahb_clk); + if (ret) { + dev_err(dev, "failed to enable ahb clk\n"); + if (!IS_ERR(nfc->nfc_clk)) + clk_disable_unprepare(nfc->nfc_clk); + return ret; + } + + return 0; +} + +static void rk_nfc_disable_clks(struct rk_nfc *nfc) +{ + if (!IS_ERR(nfc->nfc_clk)) + clk_disable_unprepare(nfc->nfc_clk); + clk_disable_unprepare(nfc->ahb_clk); +} + +static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + + if (section) + return -ERANGE; + + /* + * The beginning of the OOB area stores the reserved data for the NFC, + * the size of the reserved data is NFC_SYS_DATA_SIZE bytes. + */ + oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; + oob_region->offset = NFC_SYS_DATA_SIZE + 2; + + return 0; +} + +static int rk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oob_region) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + + if (section) + return -ERANGE; + + oob_region->length = mtd->oobsize - rknand->metadata_size; + oob_region->offset = rknand->metadata_size; + + return 0; +} + +static const struct mtd_ooblayout_ops rk_nfc_ooblayout_ops = { + .free = rk_nfc_ooblayout_free, + .ecc = rk_nfc_ooblayout_ecc, +}; + +static int rk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + const u8 *strengths = nfc->cfg->ecc_strengths; + u8 max_strength, nfc_max_strength; + int i; + + nfc_max_strength = nfc->cfg->ecc_strengths[0]; + /* If optional dt settings not present. */ + if (!ecc->size || !ecc->strength || + ecc->strength > nfc_max_strength) { + chip->ecc.size = 1024; + ecc->steps = mtd->writesize / ecc->size; + + /* + * HW ECC always requests the number of ECC bytes per 1024 byte + * blocks. The first 4 OOB bytes are reserved for sys data. + */ + max_strength = ((mtd->oobsize / ecc->steps) - 4) * 8 / + fls(8 * 1024); + if (max_strength > nfc_max_strength) + max_strength = nfc_max_strength; + + for (i = 0; i < 4; i++) { + if (max_strength >= strengths[i]) + break; + } + + if (i >= 4) { + dev_err(nfc->dev, "unsupported ECC strength\n"); + return -EOPNOTSUPP; + } + + ecc->strength = strengths[i]; + } + ecc->steps = mtd->writesize / ecc->size; + ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * chip->ecc.size), 8); + + return 0; +} + +static int rk_nfc_attach_chip(struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct device *dev = mtd->dev.parent; + struct rk_nfc *nfc = nand_get_controller_data(chip); + struct rk_nfc_nand_chip *rknand = rk_nfc_to_rknand(chip); + struct nand_ecc_ctrl *ecc = &chip->ecc; + int new_page_len, new_oob_len; + void *buf; + int ret; + + if (chip->options & NAND_BUSWIDTH_16) { + dev_err(dev, "16 bits bus width not supported"); + return -EINVAL; + } + + if (ecc->engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) + return 0; + + ret = rk_nfc_ecc_init(dev, mtd); + if (ret) + return ret; + + rknand->metadata_size = NFC_SYS_DATA_SIZE * ecc->steps; + + if (rknand->metadata_size < NFC_SYS_DATA_SIZE + 2) { + dev_err(dev, + "driver needs at least %d bytes of meta data\n", + NFC_SYS_DATA_SIZE + 2); + return -EIO; + } + + /* Check buffer first, avoid duplicate alloc buffer. */ + new_page_len = mtd->writesize + mtd->oobsize; + if (nfc->page_buf && new_page_len > nfc->page_buf_size) { + buf = krealloc(nfc->page_buf, new_page_len, + GFP_KERNEL | GFP_DMA); + if (!buf) + return -ENOMEM; + nfc->page_buf = buf; + nfc->page_buf_size = new_page_len; + } + + new_oob_len = ecc->steps * NFC_MAX_OOB_PER_STEP; + if (nfc->oob_buf && new_oob_len > nfc->oob_buf_size) { + buf = krealloc(nfc->oob_buf, new_oob_len, + GFP_KERNEL | GFP_DMA); + if (!buf) { + kfree(nfc->page_buf); + nfc->page_buf = NULL; + return -ENOMEM; + } + nfc->oob_buf = buf; + nfc->oob_buf_size = new_oob_len; + } + + if (!nfc->page_buf) { + nfc->page_buf = kzalloc(new_page_len, GFP_KERNEL | GFP_DMA); + if (!nfc->page_buf) + return -ENOMEM; + nfc->page_buf_size = new_page_len; + } + + if (!nfc->oob_buf) { + nfc->oob_buf = kzalloc(new_oob_len, GFP_KERNEL | GFP_DMA); + if (!nfc->oob_buf) { + kfree(nfc->page_buf); + nfc->page_buf = NULL; + return -ENOMEM; + } + nfc->oob_buf_size = new_oob_len; + } + + chip->ecc.write_page_raw = rk_nfc_write_page_raw; + chip->ecc.write_page = rk_nfc_write_page_hwecc; + chip->ecc.write_oob = rk_nfc_write_oob; + + chip->ecc.read_page_raw = rk_nfc_read_page_raw; + chip->ecc.read_page = rk_nfc_read_page_hwecc; + chip->ecc.read_oob = rk_nfc_read_oob; + + return 0; +} + +static const struct nand_controller_ops rk_nfc_controller_ops = { + .attach_chip = rk_nfc_attach_chip, + .exec_op = rk_nfc_exec_op, + .setup_interface = rk_nfc_setup_interface, +}; + +static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc, + struct device_node *np) +{ + struct rk_nfc_nand_chip *rknand; + struct nand_chip *chip; + struct mtd_info *mtd; + int nsels; + u32 tmp; + int ret; + int i; + + if (!of_get_property(np, "reg", &nsels)) + return -ENODEV; + nsels /= sizeof(u32); + if (!nsels || nsels > NFC_MAX_NSELS) { + dev_err(dev, "invalid reg property size %d\n", nsels); + return -EINVAL; + } + + rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8), + GFP_KERNEL); + if (!rknand) + return -ENOMEM; + + rknand->nsels = nsels; + for (i = 0; i < nsels; i++) { + ret = of_property_read_u32_index(np, "reg", i, &tmp); + if (ret) { + dev_err(dev, "reg property failure : %d\n", ret); + return ret; + } + + if (tmp >= NFC_MAX_NSELS) { + dev_err(dev, "invalid CS: %u\n", tmp); + return -EINVAL; + } + + if (test_and_set_bit(tmp, &nfc->assigned_cs)) { + dev_err(dev, "CS %u already assigned\n", tmp); + return -EINVAL; + } + + rknand->sels[i] = tmp; + } + + chip = &rknand->chip; + chip->controller = &nfc->controller; + + nand_set_flash_node(chip, np); + + nand_set_controller_data(chip, nfc); + + chip->options |= NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE; + chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; + + /* Set default mode in case dt entry is missing. */ + chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + + mtd = nand_to_mtd(chip); + mtd->owner = THIS_MODULE; + mtd->dev.parent = dev; + + if (!mtd->name) { + dev_err(nfc->dev, "NAND label property is mandatory\n"); + return -EINVAL; + } + + mtd_set_ooblayout(mtd, &rk_nfc_ooblayout_ops); + rk_nfc_hw_init(nfc); + ret = nand_scan(chip, nsels); + if (ret) + return ret; + + if (chip->options & NAND_IS_BOOT_MEDIUM) { + ret = of_property_read_u32(np, "rockchip,boot-blks", &tmp); + rknand->boot_blks = ret ? 0 : tmp; + + ret = of_property_read_u32(np, "rockchip,boot-ecc-strength", + &tmp); + rknand->boot_ecc = ret ? chip->ecc.strength : tmp; + } + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "MTD parse partition error\n"); + nand_cleanup(chip); + return ret; + } + + list_add_tail(&rknand->node, &nfc->chips); + + return 0; +} + +static void rk_nfc_chips_cleanup(struct rk_nfc *nfc) +{ + struct rk_nfc_nand_chip *rknand, *tmp; + struct nand_chip *chip; + int ret; + + list_for_each_entry_safe(rknand, tmp, &nfc->chips, node) { + chip = &rknand->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&rknand->node); + } +} + +static int rk_nfc_nand_chips_init(struct device *dev, struct rk_nfc *nfc) +{ + struct device_node *np = dev->of_node, *nand_np; + int nchips = of_get_child_count(np); + int ret; + + if (!nchips || nchips > NFC_MAX_NSELS) { + dev_err(nfc->dev, "incorrect number of NAND chips (%d)\n", + nchips); + return -EINVAL; + } + + for_each_child_of_node(np, nand_np) { + ret = rk_nfc_nand_chip_init(dev, nfc, nand_np); + if (ret) { + of_node_put(nand_np); + rk_nfc_chips_cleanup(nfc); + return ret; + } + } + + return 0; +} + +static struct nfc_cfg nfc_v6_cfg = { + .type = NFC_V6, + .ecc_strengths = {60, 40, 24, 16}, + .ecc_cfgs = { + 0x00040011, 0x00040001, 0x00000011, 0x00000001, + }, + .flctl_off = 0x08, + .bchctl_off = 0x0C, + .dma_cfg_off = 0x10, + .dma_data_buf_off = 0x14, + .dma_oob_buf_off = 0x18, + .dma_st_off = 0x1C, + .bch_st_off = 0x20, + .randmz_off = 0x150, + .int_en_off = 0x16C, + .int_clr_off = 0x170, + .int_st_off = 0x174, + .oob0_off = 0x200, + .oob1_off = 0x230, + .ecc0 = { + .err_flag_bit = 2, + .low = 3, + .low_mask = 0x1F, + .low_bn = 5, + .high = 27, + .high_mask = 0x1, + }, + .ecc1 = { + .err_flag_bit = 15, + .low = 16, + .low_mask = 0x1F, + .low_bn = 5, + .high = 29, + .high_mask = 0x1, + }, +}; + +static struct nfc_cfg nfc_v8_cfg = { + .type = NFC_V8, + .ecc_strengths = {16, 16, 16, 16}, + .ecc_cfgs = { + 0x00000001, 0x00000001, 0x00000001, 0x00000001, + }, + .flctl_off = 0x08, + .bchctl_off = 0x0C, + .dma_cfg_off = 0x10, + .dma_data_buf_off = 0x14, + .dma_oob_buf_off = 0x18, + .dma_st_off = 0x1C, + .bch_st_off = 0x20, + .randmz_off = 0x150, + .int_en_off = 0x16C, + .int_clr_off = 0x170, + .int_st_off = 0x174, + .oob0_off = 0x200, + .oob1_off = 0x230, + .ecc0 = { + .err_flag_bit = 2, + .low = 3, + .low_mask = 0x1F, + .low_bn = 5, + .high = 27, + .high_mask = 0x1, + }, + .ecc1 = { + .err_flag_bit = 15, + .low = 16, + .low_mask = 0x1F, + .low_bn = 5, + .high = 29, + .high_mask = 0x1, + }, +}; + +static struct nfc_cfg nfc_v9_cfg = { + .type = NFC_V9, + .ecc_strengths = {70, 60, 40, 16}, + .ecc_cfgs = { + 0x00000001, 0x06000001, 0x04000001, 0x02000001, + }, + .flctl_off = 0x10, + .bchctl_off = 0x20, + .dma_cfg_off = 0x30, + .dma_data_buf_off = 0x34, + .dma_oob_buf_off = 0x38, + .dma_st_off = 0x3C, + .bch_st_off = 0x150, + .randmz_off = 0x208, + .int_en_off = 0x120, + .int_clr_off = 0x124, + .int_st_off = 0x128, + .oob0_off = 0x200, + .oob1_off = 0x204, + .ecc0 = { + .err_flag_bit = 2, + .low = 3, + .low_mask = 0x7F, + .low_bn = 7, + .high = 0, + .high_mask = 0x0, + }, + .ecc1 = { + .err_flag_bit = 18, + .low = 19, + .low_mask = 0x7F, + .low_bn = 7, + .high = 0, + .high_mask = 0x0, + }, +}; + +static const struct of_device_id rk_nfc_id_table[] = { + { + .compatible = "rockchip,px30-nfc", + .data = &nfc_v9_cfg + }, + { + .compatible = "rockchip,rk2928-nfc", + .data = &nfc_v6_cfg + }, + { + .compatible = "rockchip,rv1108-nfc", + .data = &nfc_v8_cfg + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rk_nfc_id_table); + +static int rk_nfc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rk_nfc *nfc; + int ret, irq; + + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nand_controller_init(&nfc->controller); + INIT_LIST_HEAD(&nfc->chips); + nfc->controller.ops = &rk_nfc_controller_ops; + + nfc->cfg = of_device_get_match_data(dev); + nfc->dev = dev; + + init_completion(&nfc->done); + + nfc->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nfc->regs)) { + ret = PTR_ERR(nfc->regs); + goto release_nfc; + } + + nfc->nfc_clk = devm_clk_get(dev, "nfc"); + if (IS_ERR(nfc->nfc_clk)) { + dev_dbg(dev, "no NFC clk\n"); + /* Some earlier models, such as rk3066, have no NFC clk. */ + } + + nfc->ahb_clk = devm_clk_get(dev, "ahb"); + if (IS_ERR(nfc->ahb_clk)) { + dev_err(dev, "no ahb clk\n"); + ret = PTR_ERR(nfc->ahb_clk); + goto release_nfc; + } + + ret = rk_nfc_enable_clks(dev, nfc); + if (ret) + goto release_nfc; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "no NFC irq resource\n"); + ret = -EINVAL; + goto clk_disable; + } + + writel(0, nfc->regs + nfc->cfg->int_en_off); + ret = devm_request_irq(dev, irq, rk_nfc_irq, 0x0, "rk-nand", nfc); + if (ret) { + dev_err(dev, "failed to request NFC irq\n"); + goto clk_disable; + } + + platform_set_drvdata(pdev, nfc); + + ret = rk_nfc_nand_chips_init(dev, nfc); + if (ret) { + dev_err(dev, "failed to init NAND chips\n"); + goto clk_disable; + } + return 0; + +clk_disable: + rk_nfc_disable_clks(nfc); +release_nfc: + return ret; +} + +static int rk_nfc_remove(struct platform_device *pdev) +{ + struct rk_nfc *nfc = platform_get_drvdata(pdev); + + kfree(nfc->page_buf); + kfree(nfc->oob_buf); + rk_nfc_chips_cleanup(nfc); + rk_nfc_disable_clks(nfc); + + return 0; +} + +static int __maybe_unused rk_nfc_suspend(struct device *dev) +{ + struct rk_nfc *nfc = dev_get_drvdata(dev); + + rk_nfc_disable_clks(nfc); + + return 0; +} + +static int __maybe_unused rk_nfc_resume(struct device *dev) +{ + struct rk_nfc *nfc = dev_get_drvdata(dev); + struct rk_nfc_nand_chip *rknand; + struct nand_chip *chip; + int ret; + u32 i; + + ret = rk_nfc_enable_clks(dev, nfc); + if (ret) + return ret; + + /* Reset NAND chip if VCC was powered off. */ + list_for_each_entry(rknand, &nfc->chips, node) { + chip = &rknand->chip; + for (i = 0; i < rknand->nsels; i++) + nand_reset(chip, i); + } + + return 0; +} + +static const struct dev_pm_ops rk_nfc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rk_nfc_suspend, rk_nfc_resume) +}; + +static struct platform_driver rk_nfc_driver = { + .probe = rk_nfc_probe, + .remove = rk_nfc_remove, + .driver = { + .name = "rockchip-nfc", + .of_match_table = rk_nfc_id_table, + .pm = &rk_nfc_pm_ops, + }, +}; + +module_platform_driver(rk_nfc_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Yifeng Zhao <yifeng.zhao@rock-chips.com>"); +MODULE_DESCRIPTION("Rockchip Nand Flash Controller Driver"); +MODULE_ALIAS("platform:rockchip-nand-controller"); diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index fbd0fa48e063..f0a4535c812a 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -30,7 +30,6 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/platform_data/mtd-nand-s3c2410.h> @@ -134,7 +133,8 @@ enum s3c_nand_clk_state { /** * struct s3c2410_nand_info - NAND controller state. - * @mtds: An array of MTD instances on this controoler. + * @controller: Base controller structure. + * @mtds: An array of MTD instances on this controller. * @platform: The platform data for this board. * @device: The platform device we bound to. * @clk: The clock resource for this controller. @@ -146,6 +146,7 @@ enum s3c_nand_clk_state { * @clk_rate: The clock rate from @clk. * @clk_state: The current clock state. * @cpu_type: The exact type of this controller. + * @freq_transition: CPUFreq notifier block */ struct s3c2410_nand_info { /* mtd info */ diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c index af98bcc9d689..5612ee628425 100644 --- a/drivers/mtd/nand/raw/sharpsl.c +++ b/drivers/mtd/nand/raw/sharpsl.c @@ -12,7 +12,6 @@ #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/sharpsl.h> #include <linux/interrupt.h> @@ -107,7 +106,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip) chip->ecc.strength = 1; chip->ecc.hwctl = sharpsl_nand_enable_hwecc; chip->ecc.calculate = sharpsl_nand_calculate_ecc; - chip->ecc.correct = nand_correct_data; + chip->ecc.correct = rawnand_sw_hamming_correct; return 0; } diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c index 107208311987..70f8305c9b6e 100644 --- a/drivers/mtd/nand/raw/socrates_nand.c +++ b/drivers/mtd/nand/raw/socrates_nand.c @@ -120,7 +120,9 @@ static int socrates_nand_device_ready(struct nand_chip *nand_chip) static int socrates_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 2a7ca3072f35..923a9e236fcf 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -51,6 +51,7 @@ #define NFC_REG_USER_DATA(x) (0x0050 + ((x) * 4)) #define NFC_REG_SPARE_AREA 0x00A0 #define NFC_REG_PAT_ID 0x00A4 +#define NFC_REG_MDMA_ADDR 0x00C0 #define NFC_REG_MDMA_CNT 0x00C4 #define NFC_RAM0_BASE 0x0400 #define NFC_RAM1_BASE 0x0800 @@ -182,6 +183,7 @@ struct sunxi_nand_hw_ecc { * * @node: used to store NAND chips into a list * @nand: base NAND chip structure + * @ecc: ECC controller structure * @clk_rate: clk_rate required for this NAND chip * @timing_cfg: TIMING_CFG register value for this NAND chip * @timing_ctl: TIMING_CTL register value for this NAND chip @@ -191,6 +193,7 @@ struct sunxi_nand_hw_ecc { struct sunxi_nand_chip { struct list_head node; struct nand_chip nand; + struct sunxi_nand_hw_ecc *ecc; unsigned long clk_rate; u32 timing_cfg; u32 timing_ctl; @@ -207,13 +210,13 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand) * NAND Controller capabilities structure: stores NAND controller capabilities * for distinction between compatible strings. * - * @extra_mbus_conf: Contrary to A10, A10s and A13, accessing internal RAM + * @has_mdma: Use mbus dma mode, otherwise general dma * through MBUS on A23/A33 needs extra configuration. * @reg_io_data: I/O data register * @dma_maxburst: DMA maxburst */ struct sunxi_nfc_caps { - bool extra_mbus_conf; + bool has_mdma; unsigned int reg_io_data; unsigned int dma_maxburst; }; @@ -233,6 +236,7 @@ struct sunxi_nfc_caps { * controller * @complete: a completion object used to wait for NAND controller events * @dmac: the DMA channel attached to the NAND controller + * @caps: NAND Controller capabilities */ struct sunxi_nfc { struct nand_controller controller; @@ -363,24 +367,31 @@ static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf, if (!ret) return -ENOMEM; - dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); - if (!dmad) { - ret = -EINVAL; - goto err_unmap_buf; + if (!nfc->caps->has_mdma) { + dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); + if (!dmad) { + ret = -EINVAL; + goto err_unmap_buf; + } } writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, nfc->regs + NFC_REG_CTL); writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); writel(chunksize, nfc->regs + NFC_REG_CNT); - if (nfc->caps->extra_mbus_conf) - writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT); - dmat = dmaengine_submit(dmad); + if (nfc->caps->has_mdma) { + writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_DMA_TYPE_NORMAL, + nfc->regs + NFC_REG_CTL); + writel(chunksize * nchunks, nfc->regs + NFC_REG_MDMA_CNT); + writel(sg_dma_address(sg), nfc->regs + NFC_REG_MDMA_ADDR); + } else { + dmat = dmaengine_submit(dmad); - ret = dma_submit_error(dmat); - if (ret) - goto err_clr_dma_flag; + ret = dma_submit_error(dmat); + if (ret) + goto err_clr_dma_flag; + } return 0; @@ -676,15 +687,15 @@ static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf, static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand) { + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); - struct sunxi_nand_hw_ecc *data = nand->ecc.priv; u32 ecc_ctl; ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE | NFC_ECC_BLOCK_SIZE_MSK); - ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION | - NFC_ECC_PIPELINE; + ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(sunxi_nand->ecc->mode) | + NFC_ECC_EXCEPTION | NFC_ECC_PIPELINE; if (nand->ecc.size == 512) ecc_ctl |= NFC_ECC_BLOCK_512; @@ -911,7 +922,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf unsigned int max_bitflips = 0; int ret, i, raw_mode = 0; struct scatterlist sg; - u32 status; + u32 status, wait; ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); if (ret) @@ -929,13 +940,18 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); - dma_async_issue_pending(nfc->dmac); + wait = NFC_CMD_INT_FLAG; + + if (nfc->caps->has_mdma) + wait |= NFC_DMA_INT_FLAG; + else + dma_async_issue_pending(nfc->dmac); writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - if (ret) + ret = sunxi_nfc_wait_events(nfc, wait, false, 0); + if (ret && !nfc->caps->has_mdma) dmaengine_terminate_all(nfc->dmac); sunxi_nfc_randomizer_disable(nand); @@ -1276,6 +1292,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand, struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; struct scatterlist sg; + u32 wait; int ret, i; sunxi_nfc_select_chip(nand, nand->cur_cs); @@ -1304,14 +1321,19 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand, writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, nfc->regs + NFC_REG_WCMD_SET); - dma_async_issue_pending(nfc->dmac); + wait = NFC_CMD_INT_FLAG; + + if (nfc->caps->has_mdma) + wait |= NFC_DMA_INT_FLAG; + else + dma_async_issue_pending(nfc->dmac); writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS | NFC_ACCESS_DIR, nfc->regs + NFC_REG_CMD); - ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - if (ret) + ret = sunxi_nfc_wait_events(nfc, wait, false, 0); + if (ret && !nfc->caps->has_mdma) dmaengine_terminate_all(nfc->dmac); sunxi_nfc_randomizer_disable(nand); @@ -1597,9 +1619,9 @@ static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = { .free = sunxi_nand_ooblayout_free, }; -static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc) +static void sunxi_nand_hw_ecc_ctrl_cleanup(struct sunxi_nand_chip *sunxi_nand) { - kfree(ecc->priv); + kfree(sunxi_nand->ecc); } static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, @@ -1607,10 +1629,10 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, struct device_node *np) { static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 }; + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct mtd_info *mtd = nand_to_mtd(nand); struct nand_device *nanddev = mtd_to_nanddev(mtd); - struct sunxi_nand_hw_ecc *data; int nsectors; int ret; int i; @@ -1647,8 +1669,8 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, if (ecc->size != 512 && ecc->size != 1024) return -EINVAL; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) + sunxi_nand->ecc = kzalloc(sizeof(*sunxi_nand->ecc), GFP_KERNEL); + if (!sunxi_nand->ecc) return -ENOMEM; /* Prefer 1k ECC chunk over 512 ones */ @@ -1675,7 +1697,7 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, goto err; } - data->mode = i; + sunxi_nand->ecc->mode = i; /* HW ECC always request ECC bytes for 1024 bytes blocks */ ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8); @@ -1693,9 +1715,8 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, ecc->read_oob = sunxi_nfc_hw_ecc_read_oob; ecc->write_oob = sunxi_nfc_hw_ecc_write_oob; mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops); - ecc->priv = data; - if (nfc->dmac) { + if (nfc->dmac || nfc->caps->has_mdma) { ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; @@ -1714,16 +1735,18 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, return 0; err: - kfree(data); + kfree(sunxi_nand->ecc); return ret; } -static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc) +static void sunxi_nand_ecc_cleanup(struct sunxi_nand_chip *sunxi_nand) { + struct nand_ecc_ctrl *ecc = &sunxi_nand->nand.ecc; + switch (ecc->engine_type) { case NAND_ECC_ENGINE_TYPE_ON_HOST: - sunxi_nand_hw_ecc_ctrl_cleanup(ecc); + sunxi_nand_hw_ecc_ctrl_cleanup(sunxi_nand); break; case NAND_ECC_ENGINE_TYPE_NONE: default: @@ -2053,11 +2076,41 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) ret = mtd_device_unregister(nand_to_mtd(chip)); WARN_ON(ret); nand_cleanup(chip); - sunxi_nand_ecc_cleanup(&chip->ecc); + sunxi_nand_ecc_cleanup(sunxi_nand); list_del(&sunxi_nand->node); } } +static int sunxi_nfc_dma_init(struct sunxi_nfc *nfc, struct resource *r) +{ + int ret; + + if (nfc->caps->has_mdma) + return 0; + + nfc->dmac = dma_request_chan(nfc->dev, "rxtx"); + if (IS_ERR(nfc->dmac)) { + ret = PTR_ERR(nfc->dmac); + if (ret == -EPROBE_DEFER) + return ret; + + /* Ignore errors to fall back to PIO mode */ + dev_warn(nfc->dev, "failed to request rxtx DMA channel: %d\n", ret); + nfc->dmac = NULL; + } else { + struct dma_slave_config dmac_cfg = { }; + + dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data; + dmac_cfg.dst_addr = dmac_cfg.src_addr; + dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; + dmac_cfg.src_maxburst = nfc->caps->dma_maxburst; + dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst; + dmaengine_slave_config(nfc->dmac, &dmac_cfg); + } + return 0; +} + static int sunxi_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2132,30 +2185,10 @@ static int sunxi_nfc_probe(struct platform_device *pdev) if (ret) goto out_ahb_reset_reassert; - nfc->dmac = dma_request_chan(dev, "rxtx"); - if (IS_ERR(nfc->dmac)) { - ret = PTR_ERR(nfc->dmac); - if (ret == -EPROBE_DEFER) - goto out_ahb_reset_reassert; - - /* Ignore errors to fall back to PIO mode */ - dev_warn(dev, "failed to request rxtx DMA channel: %d\n", ret); - nfc->dmac = NULL; - } else { - struct dma_slave_config dmac_cfg = { }; - - dmac_cfg.src_addr = r->start + nfc->caps->reg_io_data; - dmac_cfg.dst_addr = dmac_cfg.src_addr; - dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; - dmac_cfg.src_maxburst = nfc->caps->dma_maxburst; - dmac_cfg.dst_maxburst = nfc->caps->dma_maxburst; - dmaengine_slave_config(nfc->dmac, &dmac_cfg); + ret = sunxi_nfc_dma_init(nfc, r); - if (nfc->caps->extra_mbus_conf) - writel(readl(nfc->regs + NFC_REG_CTL) | - NFC_DMA_TYPE_NORMAL, nfc->regs + NFC_REG_CTL); - } + if (ret) + goto out_ahb_reset_reassert; platform_set_drvdata(pdev, nfc); @@ -2202,7 +2235,7 @@ static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = { }; static const struct sunxi_nfc_caps sunxi_nfc_a23_caps = { - .extra_mbus_conf = true, + .has_mdma = true, .reg_io_data = NFC_REG_A23_IO_DATA, .dma_maxburst = 8, }; diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index aa6c7e7bbf1b..de8e919d0ebe 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -35,7 +35,6 @@ #include <linux/ioport.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/slab.h> @@ -293,11 +292,11 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf, int r0, r1; /* assume ecc.size = 512 and ecc.bytes = 6 */ - r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256, false); + r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc); if (r0 < 0) return r0; - r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256, - false); + r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3, + calc_ecc + 3); if (r1 < 0) return r1; return r0 + r1; diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c index fe8ed2441588..1a9449e53bf9 100644 --- a/drivers/mtd/nand/raw/txx9ndfmc.c +++ b/drivers/mtd/nand/raw/txx9ndfmc.c @@ -14,7 +14,6 @@ #include <linux/delay.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> -#include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/io.h> #include <linux/platform_data/txx9/ndfmc.h> @@ -194,8 +193,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf, int stat; for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) { - stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256, - false); + stat = rawnand_sw_hamming_correct(chip, buf, read_ecc, + calc_ecc); if (stat < 0) return stat; corrected += stat; diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c index efc5bf5434e0..26751976e502 100644 --- a/drivers/mtd/nand/raw/xway_nand.c +++ b/drivers/mtd/nand/raw/xway_nand.c @@ -149,7 +149,9 @@ static void xway_write_buf(struct nand_chip *chip, const u_char *buf, int len) static int xway_attach_chip(struct nand_chip *chip) { chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + + if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; return 0; } diff --git a/drivers/mtd/nand/spi/Kconfig b/drivers/mtd/nand/spi/Kconfig index da89b250df7c..3d7649a2dd72 100644 --- a/drivers/mtd/nand/spi/Kconfig +++ b/drivers/mtd/nand/spi/Kconfig @@ -2,6 +2,7 @@ menuconfig MTD_SPI_NAND tristate "SPI NAND device Support" select MTD_NAND_CORE + select MTD_NAND_ECC depends on SPI_MASTER select SPI_MEM help diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index c35221794645..8ea545bb924d 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -193,6 +193,135 @@ static int spinand_ecc_enable(struct spinand_device *spinand, enable ? CFG_ECC_ENABLE : 0); } +static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) +{ + struct nand_device *nand = spinand_to_nand(spinand); + + if (spinand->eccinfo.get_status) + return spinand->eccinfo.get_status(spinand, status); + + switch (status & STATUS_ECC_MASK) { + case STATUS_ECC_NO_BITFLIPS: + return 0; + + case STATUS_ECC_HAS_BITFLIPS: + /* + * We have no way to know exactly how many bitflips have been + * fixed, so let's return the maximum possible value so that + * wear-leveling layers move the data immediately. + */ + return nanddev_get_ecc_conf(nand)->strength; + + case STATUS_ECC_UNCOR_ERROR: + return -EBADMSG; + + default: + break; + } + + return -EINVAL; +} + +static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + return -ERANGE; +} + +static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section) + return -ERANGE; + + /* Reserve 2 bytes for the BBM. */ + region->offset = 2; + region->length = 62; + + return 0; +} + +static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { + .ecc = spinand_noecc_ooblayout_ecc, + .free = spinand_noecc_ooblayout_free, +}; + +static int spinand_ondie_ecc_init_ctx(struct nand_device *nand) +{ + struct spinand_device *spinand = nand_to_spinand(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + struct spinand_ondie_ecc_conf *engine_conf; + + nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; + nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size; + nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength; + + engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL); + if (!engine_conf) + return -ENOMEM; + + nand->ecc.ctx.priv = engine_conf; + + if (spinand->eccinfo.ooblayout) + mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); + else + mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); + + return 0; +} + +static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand) +{ + kfree(nand->ecc.ctx.priv); +} + +static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct spinand_device *spinand = nand_to_spinand(nand); + bool enable = (req->mode != MTD_OPS_RAW); + + /* Only enable or disable the engine */ + return spinand_ecc_enable(spinand, enable); +} + +static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; + struct spinand_device *spinand = nand_to_spinand(nand); + + if (req->mode == MTD_OPS_RAW) + return 0; + + /* Nothing to do when finishing a page write */ + if (req->type == NAND_PAGE_WRITE) + return 0; + + /* Finish a page write: check the status, report errors/bitflips */ + return spinand_check_ecc_status(spinand, engine_conf->status); +} + +static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { + .init_ctx = spinand_ondie_ecc_init_ctx, + .cleanup_ctx = spinand_ondie_ecc_cleanup_ctx, + .prepare_io_req = spinand_ondie_ecc_prepare_io_req, + .finish_io_req = spinand_ondie_ecc_finish_io_req, +}; + +static struct nand_ecc_engine spinand_ondie_ecc_engine = { + .ops = &spinand_ondie_ecc_engine_ops, +}; + +static void spinand_ondie_ecc_save_status(struct nand_device *nand, u8 status) +{ + struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; + + if (nand->ecc.ctx.conf.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE && + engine_conf) + engine_conf->status = status; +} + static int spinand_write_enable_op(struct spinand_device *spinand) { struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); @@ -214,7 +343,6 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, const struct nand_page_io_req *req) { struct nand_device *nand = spinand_to_nand(spinand); - struct mtd_info *mtd = nanddev_to_mtd(nand); struct spi_mem_dirmap_desc *rdesc; unsigned int nbytes = 0; void *buf = NULL; @@ -254,16 +382,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, memcpy(req->databuf.in, spinand->databuf + req->dataoffs, req->datalen); - if (req->ooblen) { - if (req->mode == MTD_OPS_AUTO_OOB) - mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, - spinand->oobbuf, - req->ooboffs, - req->ooblen); - else - memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, - req->ooblen); - } + if (req->ooblen) + memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, + req->ooblen); return 0; } @@ -272,7 +393,7 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, const struct nand_page_io_req *req) { struct nand_device *nand = spinand_to_nand(spinand); - struct mtd_info *mtd = nanddev_to_mtd(nand); + struct mtd_info *mtd = spinand_to_mtd(spinand); struct spi_mem_dirmap_desc *wdesc; unsigned int nbytes, column = 0; void *buf = spinand->databuf; @@ -284,9 +405,12 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, * must fill the page cache entirely even if we only want to program * the data portion of the page, otherwise we might corrupt the BBM or * user data previously programmed in OOB area. + * + * Only reset the data buffer manually, the OOB buffer is prepared by + * ECC engines ->prepare_io_req() callback. */ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); - memset(spinand->databuf, 0xff, nbytes); + memset(spinand->databuf, 0xff, nanddev_page_size(nand)); if (req->datalen) memcpy(spinand->databuf + req->dataoffs, req->databuf.out, @@ -402,42 +526,17 @@ static int spinand_lock_block(struct spinand_device *spinand, u8 lock) return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); } -static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) -{ - struct nand_device *nand = spinand_to_nand(spinand); - - if (spinand->eccinfo.get_status) - return spinand->eccinfo.get_status(spinand, status); - - switch (status & STATUS_ECC_MASK) { - case STATUS_ECC_NO_BITFLIPS: - return 0; - - case STATUS_ECC_HAS_BITFLIPS: - /* - * We have no way to know exactly how many bitflips have been - * fixed, so let's return the maximum possible value so that - * wear-leveling layers move the data immediately. - */ - return nanddev_get_ecc_conf(nand)->strength; - - case STATUS_ECC_UNCOR_ERROR: - return -EBADMSG; - - default: - break; - } - - return -EINVAL; -} - static int spinand_read_page(struct spinand_device *spinand, - const struct nand_page_io_req *req, - bool ecc_enabled) + const struct nand_page_io_req *req) { + struct nand_device *nand = spinand_to_nand(spinand); u8 status; int ret; + ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); + if (ret) + return ret; + ret = spinand_load_page_op(spinand, req); if (ret) return ret; @@ -446,22 +545,26 @@ static int spinand_read_page(struct spinand_device *spinand, if (ret < 0) return ret; + spinand_ondie_ecc_save_status(nand, status); + ret = spinand_read_from_cache_op(spinand, req); if (ret) return ret; - if (!ecc_enabled) - return 0; - - return spinand_check_ecc_status(spinand, status); + return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); } static int spinand_write_page(struct spinand_device *spinand, const struct nand_page_io_req *req) { + struct nand_device *nand = spinand_to_nand(spinand); u8 status; int ret; + ret = nand_ecc_prepare_io_req(nand, (struct nand_page_io_req *)req); + if (ret) + return ret; + ret = spinand_write_enable_op(spinand); if (ret) return ret; @@ -476,9 +579,9 @@ static int spinand_write_page(struct spinand_device *spinand, ret = spinand_wait(spinand, &status); if (!ret && (status & STATUS_PROG_FAILED)) - ret = -EIO; + return -EIO; - return ret; + return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req); } static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, @@ -488,25 +591,24 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, struct nand_device *nand = mtd_to_nanddev(mtd); unsigned int max_bitflips = 0; struct nand_io_iter iter; - bool enable_ecc = false; + bool disable_ecc = false; bool ecc_failed = false; int ret = 0; - if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout) - enable_ecc = true; + if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout) + disable_ecc = true; mutex_lock(&spinand->lock); nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) { - ret = spinand_select_target(spinand, iter.req.pos.target); - if (ret) - break; + if (disable_ecc) + iter.req.mode = MTD_OPS_RAW; - ret = spinand_ecc_enable(spinand, enable_ecc); + ret = spinand_select_target(spinand, iter.req.pos.target); if (ret) break; - ret = spinand_read_page(spinand, &iter.req, enable_ecc); + ret = spinand_read_page(spinand, &iter.req); if (ret < 0 && ret != -EBADMSG) break; @@ -537,20 +639,19 @@ static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, struct spinand_device *spinand = mtd_to_spinand(mtd); struct nand_device *nand = mtd_to_nanddev(mtd); struct nand_io_iter iter; - bool enable_ecc = false; + bool disable_ecc = false; int ret = 0; - if (ops->mode != MTD_OPS_RAW && mtd->ooblayout) - enable_ecc = true; + if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout) + disable_ecc = true; mutex_lock(&spinand->lock); nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) { - ret = spinand_select_target(spinand, iter.req.pos.target); - if (ret) - break; + if (disable_ecc) + iter.req.mode = MTD_OPS_RAW; - ret = spinand_ecc_enable(spinand, enable_ecc); + ret = spinand_select_target(spinand, iter.req.pos.target); if (ret) break; @@ -580,7 +681,7 @@ static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) }; spinand_select_target(spinand, pos->target); - spinand_read_page(spinand, &req, false); + spinand_read_page(spinand, &req); if (marker[0] != 0xff || marker[1] != 0xff) return true; @@ -965,30 +1066,6 @@ static int spinand_detect(struct spinand_device *spinand) return 0; } -static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, - struct mtd_oob_region *region) -{ - return -ERANGE; -} - -static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, - struct mtd_oob_region *region) -{ - if (section) - return -ERANGE; - - /* Reserve 2 bytes for the BBM. */ - region->offset = 2; - region->length = 62; - - return 0; -} - -static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { - .ecc = spinand_noecc_ooblayout_ecc, - .free = spinand_noecc_ooblayout_free, -}; - static int spinand_init(struct spinand_device *spinand) { struct device *dev = &spinand->spimem->spi->dev; @@ -1066,10 +1143,15 @@ static int spinand_init(struct spinand_device *spinand) if (ret) goto err_manuf_cleanup; - /* - * Right now, we don't support ECC, so let the whole oob - * area is available for user. - */ + /* SPI-NAND default ECC engine is on-die */ + nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE; + nand->ecc.ondie_engine = &spinand_ondie_ecc_engine; + + spinand_ecc_enable(spinand, false); + ret = nanddev_ecc_engine_init(nand); + if (ret) + goto err_cleanup_nanddev; + mtd->_read_oob = spinand_mtd_read; mtd->_write_oob = spinand_mtd_write; mtd->_block_isbad = spinand_mtd_block_isbad; @@ -1078,14 +1160,11 @@ static int spinand_init(struct spinand_device *spinand) mtd->_erase = spinand_mtd_erase; mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; - if (spinand->eccinfo.ooblayout) - mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); - else - mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); - - ret = mtd_ooblayout_count_freebytes(mtd); - if (ret < 0) - goto err_cleanup_nanddev; + if (nand->ecc.engine) { + ret = mtd_ooblayout_count_freebytes(mtd); + if (ret < 0) + goto err_cleanup_ecc_engine; + } mtd->oobavail = ret; @@ -1095,6 +1174,9 @@ static int spinand_init(struct spinand_device *spinand) return 0; +err_cleanup_ecc_engine: + nanddev_ecc_engine_cleanup(nand); + err_cleanup_nanddev: nanddev_cleanup(nand); diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 8e801e4c3a00..6701aaa21a49 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -119,6 +119,53 @@ static const struct spinand_info macronix_spinand_table[] = { &update_cache_variants), SPINAND_HAS_QE_BIT, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_INFO("MX35LF2GE4AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x26), + NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35LF4GE4AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x37), + NAND_MEMORG(1, 4096, 128, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35LF1G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14), + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_INFO("MX35LF2G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), + NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), + SPINAND_INFO("MX35LF4G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), + NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, NULL)), SPINAND_INFO("MX31LF1GE4BC", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x1e), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c index 5d370cfcdaaa..50b7295bc922 100644 --- a/drivers/mtd/nand/spi/micron.c +++ b/drivers/mtd/nand/spi/micron.c @@ -28,7 +28,7 @@ #define MICRON_SELECT_DIE(x) ((x) << 6) -static SPINAND_OP_VARIANTS(read_cache_variants, +static SPINAND_OP_VARIANTS(quadio_read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), @@ -36,14 +36,27 @@ static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); -static SPINAND_OP_VARIANTS(write_cache_variants, +static SPINAND_OP_VARIANTS(x4_write_cache_variants, SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), SPINAND_PROG_LOAD(true, 0, NULL, 0)); -static SPINAND_OP_VARIANTS(update_cache_variants, +static SPINAND_OP_VARIANTS(x4_update_cache_variants, SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), SPINAND_PROG_LOAD(false, 0, NULL, 0)); +/* Micron MT29F2G01AAAED Device */ +static SPINAND_OP_VARIANTS(x4_read_cache_variants, + SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), + SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); + +static SPINAND_OP_VARIANTS(x1_write_cache_variants, + SPINAND_PROG_LOAD(true, 0, NULL, 0)); + +static SPINAND_OP_VARIANTS(x1_update_cache_variants, + SPINAND_PROG_LOAD(false, 0, NULL, 0)); + static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { @@ -74,6 +87,47 @@ static const struct mtd_ooblayout_ops micron_8_ooblayout = { .free = micron_8_ooblayout_free, }; +static int micron_4_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + + if (section >= spinand->base.memorg.pagesize / + mtd->ecc_step_size) + return -ERANGE; + + region->offset = (section * 16) + 8; + region->length = 8; + + return 0; +} + +static int micron_4_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + + if (section >= spinand->base.memorg.pagesize / + mtd->ecc_step_size) + return -ERANGE; + + if (section) { + region->offset = 16 * section; + region->length = 8; + } else { + /* section 0 has two bytes reserved for the BBM */ + region->offset = 2; + region->length = 6; + } + + return 0; +} + +static const struct mtd_ooblayout_ops micron_4_ooblayout = { + .ecc = micron_4_ooblayout_ecc, + .free = micron_4_ooblayout_free, +}; + static int micron_select_target(struct spinand_device *spinand, unsigned int target) { @@ -120,9 +174,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x24), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -131,9 +185,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x25), NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -142,9 +196,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x14), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -153,9 +207,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x15), NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -164,9 +218,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x36), NAND_MEMORG(1, 2048, 128, 64, 2048, 80, 2, 1, 2), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), 0, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status), @@ -176,9 +230,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x34), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -187,9 +241,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x35), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status)), @@ -198,9 +252,9 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x46), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status), @@ -210,13 +264,23 @@ static const struct spinand_info micron_spinand_table[] = { SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x47), NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 2), NAND_ECCREQ(8, 512), - SPINAND_INFO_OP_VARIANTS(&read_cache_variants, - &write_cache_variants, - &update_cache_variants), + SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants, + &x4_write_cache_variants, + &x4_update_cache_variants), SPINAND_HAS_CR_FEAT_BIT, SPINAND_ECCINFO(µn_8_ooblayout, micron_8_ecc_get_status), SPINAND_SELECT_TARGET(micron_select_target)), + /* M69A 2Gb 3.3V */ + SPINAND_INFO("MT29F2G01AAAED", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x9F), + NAND_MEMORG(1, 2048, 64, 64, 2048, 80, 2, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&x4_read_cache_variants, + &x1_write_cache_variants, + &x1_update_cache_variants), + 0, + SPINAND_ECCINFO(µn_4_ooblayout, NULL)), }; static int micron_spinand_init(struct spinand_device *spinand) diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 21fde2875674..7380b1ebaccd 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -28,7 +28,7 @@ static SPINAND_OP_VARIANTS(update_cache_x4_variants, SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), SPINAND_PROG_LOAD(false, 0, NULL, 0)); -/** +/* * Backward compatibility for 1st generation Serial NAND devices * which don't support Quad Program Load operation. */ |