summaryrefslogtreecommitdiff
path: root/drivers/mtd/nand
diff options
context:
space:
mode:
authorMasahiro Yamada <yamada.masahiro@socionext.com>2017-03-30 11:15:04 +0300
committerBoris Brezillon <boris.brezillon@free-electrons.com>2017-04-25 15:18:37 +0300
commit3deb9979c7319bc7846d1aac528a9db85162960a (patch)
tree461a23ff5a0761e5940c3b6db229bd85f5dc1082 /drivers/mtd/nand
parente7beeeec854c40c28caa53bd84fdf26e9e459f06 (diff)
downloadlinux-3deb9979c7319bc7846d1aac528a9db85162960a.tar.xz
mtd: nand: allocate aligned buffers if NAND_OWN_BUFFERS is unset
Some NAND controllers are using DMA engine requiring a specific buffer alignment. The core provides no guarantee on the nand_buffers pointers, which forces some drivers to allocate their own buffers and pass the NAND_OWN_BUFFERS flag. Rework the nand_buffers allocation logic to allocate each buffer independently. This should make most NAND controllers/DMA engine happy, and allow us to get rid of these custom buf allocation in NAND controller drivers. Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
Diffstat (limited to 'drivers/mtd/nand')
-rw-r--r--drivers/mtd/nand/nand_base.c41
1 files changed, 32 insertions, 9 deletions
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index de6c8045c85b..c796d0e4039a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4495,7 +4495,7 @@ int nand_scan_tail(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_buffers *nbuf;
+ struct nand_buffers *nbuf = NULL;
int ret;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
@@ -4509,13 +4509,28 @@ int nand_scan_tail(struct mtd_info *mtd)
}
if (!(chip->options & NAND_OWN_BUFFERS)) {
- nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
- + mtd->oobsize * 3, GFP_KERNEL);
+ nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL);
if (!nbuf)
return -ENOMEM;
- nbuf->ecccalc = (uint8_t *)(nbuf + 1);
- nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
- nbuf->databuf = nbuf->ecccode + mtd->oobsize;
+
+ nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!nbuf->ecccalc) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ nbuf->ecccode = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!nbuf->ecccode) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ nbuf->databuf = kmalloc(mtd->writesize + mtd->oobsize,
+ GFP_KERNEL);
+ if (!nbuf->databuf) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
chip->buffers = nbuf;
} else {
@@ -4755,8 +4770,12 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
return chip->scan_bbt(mtd);
err_free:
- if (!(chip->options & NAND_OWN_BUFFERS))
- kfree(chip->buffers);
+ if (nbuf) {
+ kfree(nbuf->databuf);
+ kfree(nbuf->ecccode);
+ kfree(nbuf->ecccalc);
+ kfree(nbuf);
+ }
return ret;
}
EXPORT_SYMBOL(nand_scan_tail);
@@ -4807,8 +4826,12 @@ void nand_cleanup(struct nand_chip *chip)
/* Free bad block table memory */
kfree(chip->bbt);
- if (!(chip->options & NAND_OWN_BUFFERS))
+ if (!(chip->options & NAND_OWN_BUFFERS) && chip->buffers) {
+ kfree(chip->buffers->databuf);
+ kfree(chip->buffers->ecccode);
+ kfree(chip->buffers->ecccalc);
kfree(chip->buffers);
+ }
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options