summaryrefslogtreecommitdiff
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/mtdchar.c48
-rw-r--r--drivers/mtd/mtdoops.c4
-rw-r--r--drivers/mtd/mtdpart.c12
-rw-r--r--drivers/mtd/nand/Kconfig40
-rw-r--r--drivers/mtd/nand/ams-delta.c10
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c12
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c56
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nand_bcm_umi.h73
-rw-r--r--drivers/mtd/nand/nomadik_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c303
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c89
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/onenand/omap2.c34
-rw-r--r--drivers/mtd/ubi/Kconfig40
-rw-r--r--drivers/mtd/ubi/attach.c46
-rw-r--r--drivers/mtd/ubi/build.c204
-rw-r--r--drivers/mtd/ubi/cdev.c18
-rw-r--r--drivers/mtd/ubi/debug.c153
-rw-r--r--drivers/mtd/ubi/debug.h12
-rw-r--r--drivers/mtd/ubi/eba.c33
-rw-r--r--drivers/mtd/ubi/gluebi.c30
-rw-r--r--drivers/mtd/ubi/io.c80
-rw-r--r--drivers/mtd/ubi/misc.c14
-rw-r--r--drivers/mtd/ubi/ubi.h16
-rw-r--r--drivers/mtd/ubi/vtbl.c10
-rw-r--r--drivers/mtd/ubi/wl.c48
29 files changed, 865 insertions, 532 deletions
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f2f482bec573..a6e74514e662 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
}
#endif
+static inline unsigned long get_vm_size(struct vm_area_struct *vma)
+{
+ return vma->vm_end - vma->vm_start;
+}
+
+static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
+{
+ return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
+}
+
+/*
+ * Set a new vm offset.
+ *
+ * Verify that the incoming offset really works as a page offset,
+ * and that the offset and size fit in a resource_size_t.
+ */
+static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
+{
+ pgoff_t pgoff = off >> PAGE_SHIFT;
+ if (off != (resource_size_t) pgoff << PAGE_SHIFT)
+ return -EINVAL;
+ if (off + get_vm_size(vma) - 1 < off)
+ return -EINVAL;
+ vma->vm_pgoff = pgoff;
+ return 0;
+}
+
/*
* set up a mapping for shared memory segments
*/
@@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
- unsigned long start;
- unsigned long off;
- u32 len;
+ resource_size_t start, off;
+ unsigned long len, vma_len;
if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
- off = vma->vm_pgoff << PAGE_SHIFT;
+ off = get_vm_offset(vma);
start = map->phys;
len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
start &= PAGE_MASK;
- if ((vma->vm_end - vma->vm_start + off) > len)
+ vma_len = get_vm_size(vma);
+
+ /* Overflow in off+len? */
+ if (vma_len + off < off)
+ return -EINVAL;
+ /* Does it fit in the mapping? */
+ if (vma_len + off > len)
return -EINVAL;
off += start;
- vma->vm_pgoff = off >> PAGE_SHIFT;
+ /* Did that overflow? */
+ if (off < start)
+ return -EINVAL;
+ if (set_vm_offset(vma, off) < 0)
+ return -EINVAL;
vma->vm_flags |= VM_IO | VM_RESERVED;
#ifdef pgprot_noncached
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 551e316e4454..438737a1f59a 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -387,8 +387,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
cxt->mtd = NULL;
- flush_work_sync(&cxt->work_erase);
- flush_work_sync(&cxt->work_write);
+ flush_work(&cxt->work_erase);
+ flush_work(&cxt->work_write);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index d518e4db8a0b..3a49e6de5e60 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -744,7 +744,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
return ret;
}
-int mtd_is_partition(struct mtd_info *mtd)
+int mtd_is_partition(const struct mtd_info *mtd)
{
struct mtd_part *part;
int ispart = 0;
@@ -760,3 +760,13 @@ int mtd_is_partition(struct mtd_info *mtd)
return ispart;
}
EXPORT_SYMBOL_GPL(mtd_is_partition);
+
+/* Returns the size of the entire flash chip */
+uint64_t mtd_get_device_size(const struct mtd_info *mtd)
+{
+ if (!mtd_is_partition(mtd))
+ return mtd->size;
+
+ return PART(mtd)->master->size;
+}
+EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 8ca417614c57..598cd0a3adee 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -406,46 +406,6 @@ config MTD_NAND_ATMEL
help
Enables support for NAND Flash / Smart Media Card interface
on Atmel AT91 and AVR32 processors.
-choice
- prompt "ECC management for NAND Flash / SmartMedia on AT91 / AVR32"
- depends on MTD_NAND_ATMEL
-
-config MTD_NAND_ATMEL_ECC_HW
- bool "Hardware ECC"
- depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 || AVR32
- help
- Use hardware ECC instead of software ECC when the chip
- supports it.
-
- The hardware ECC controller is capable of single bit error
- correction and 2-bit random detection per page.
-
- NB : hardware and software ECC schemes are incompatible.
- If you switch from one to another, you'll have to erase your
- mtd partition.
-
- If unsure, say Y
-
-config MTD_NAND_ATMEL_ECC_SOFT
- bool "Software ECC"
- help
- Use software ECC.
-
- NB : hardware and software ECC schemes are incompatible.
- If you switch from one to another, you'll have to erase your
- mtd partition.
-
-config MTD_NAND_ATMEL_ECC_NONE
- bool "No ECC (testing only, DANGEROUS)"
- depends on DEBUG_KERNEL
- help
- No ECC will be used.
- It's not a good idea and it should be reserved for testing
- purpose only.
-
- If unsure, say N
-
-endchoice
config MTD_NAND_PXA3xx
tristate "Support for NAND flash devices on PXA3xx"
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 861ca8f7e47d..a7040af08536 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -23,11 +23,15 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-omap.h>
+
#include <asm/io.h>
-#include <mach/hardware.h>
#include <asm/sizes.h>
-#include <linux/gpio.h>
-#include <plat/board-ams-delta.h>
+
+#include <mach/board-ams-delta.h>
+
+#include <mach/hardware.h>
/*
* MTD structure for E3 (Delta)
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index c855e7cd337b..d0d1bd4d0e7d 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -249,20 +249,20 @@ static int nand_dev_ready(struct mtd_info *mtd)
int bcm_umi_nand_inithw(void)
{
/* Configure nand timing parameters */
- REG_UMI_NAND_TCR &= ~0x7ffff;
- REG_UMI_NAND_TCR |= HW_CFG_NAND_TCR;
+ writel(readl(&REG_UMI_NAND_TCR) & ~0x7ffff, &REG_UMI_NAND_TCR);
+ writel(readl(&REG_UMI_NAND_TCR) | HW_CFG_NAND_TCR, &REG_UMI_NAND_TCR);
#if !defined(CONFIG_MTD_NAND_BCM_UMI_HWCS)
/* enable software control of CS */
- REG_UMI_NAND_TCR |= REG_UMI_NAND_TCR_CS_SWCTRL;
+ writel(readl(&REG_UMI_NAND_TCR) | REG_UMI_NAND_TCR_CS_SWCTRL, &REG_UMI_NAND_TCR);
#endif
/* keep NAND chip select asserted */
- REG_UMI_NAND_RCSR |= REG_UMI_NAND_RCSR_CS_ASSERTED;
+ writel(readl(&REG_UMI_NAND_RCSR) | REG_UMI_NAND_RCSR_CS_ASSERTED, &REG_UMI_NAND_RCSR);
- REG_UMI_NAND_TCR &= ~REG_UMI_NAND_TCR_WORD16;
+ writel(readl(&REG_UMI_NAND_TCR) & ~REG_UMI_NAND_TCR_WORD16, &REG_UMI_NAND_TCR);
/* enable writes to flash */
- REG_UMI_MMD_ICR |= REG_UMI_MMD_ICR_FLASH_WP;
+ writel(readl(&REG_UMI_MMD_ICR) | REG_UMI_MMD_ICR_FLASH_WP, &REG_UMI_MMD_ICR);
writel(NAND_CMD_RESET, bcm_umi_io_base + REG_NAND_CMD_OFFSET);
nand_bcm_umi_wait_till_ready();
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index d94b03c207af..f1deb1ee2c95 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -34,8 +34,8 @@
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
-#include <mach/nand.h>
-#include <mach/aemif.h>
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
/*
* This is a device driver for the NAND flash controller found on the
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 9602c1b7e27e..01e2f2e87d8c 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -31,6 +31,7 @@
#include <linux/mtd/nand_ecc.h>
#include <asm/fsl_ifc.h>
+#define FSL_IFC_V1_1_0 0x01010000
#define ERR_BYTE 0xFF /* Value returned for read
bytes when read failed */
#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
@@ -773,13 +774,62 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
+static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+{
+ struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+ uint32_t cs = priv->bank;
+
+ /* Save CSOR and CSOR_ext */
+ csor = in_be32(&ifc->csor_cs[cs].csor);
+ csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
+
+ /* chage PageSize 8K and SpareSize 1K*/
+ csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+ out_be32(&ifc->csor_cs[cs].csor, csor_8k);
+ out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
+
+ /* READID */
+ out_be32(&ifc->ifc_nand.nand_fir0,
+ (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ out_be32(&ifc->ifc_nand.nand_fcr0,
+ NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.row3, 0x0);
+
+ out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
+
+ /* Program ROW0/COL0 */
+ out_be32(&ifc->ifc_nand.row0, 0x0);
+ out_be32(&ifc->ifc_nand.col0, 0x0);
+
+ /* set the chip select for NAND Transaction */
+ out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
+
+ /* start read seq */
+ out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+
+ /* wait for command complete flag or timeout */
+ wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+ IFC_TIMEOUT_MSECS * HZ/1000);
+
+ if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+ printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
+
+ /* Restore CSOR and CSOR_ext */
+ out_be32(&ifc->csor_cs[cs].csor, csor);
+ out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
+}
+
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
struct nand_chip *chip = &priv->chip;
struct nand_ecclayout *layout;
- u32 csor;
+ u32 csor, ver;
/* Fill in fsl_ifc_mtd structure */
priv->mtd.priv = chip;
@@ -874,6 +924,10 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.mode = NAND_ECC_SOFT;
}
+ ver = in_be32(&ifc->ifc_rev);
+ if (ver == FSL_IFC_V1_1_0)
+ fsl_ifc_sram_init(priv);
+
return 0;
}
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 6acc790c2fbb..5683604967d7 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -36,7 +36,7 @@
#include <linux/of_mtd.h>
#include <asm/mach/flash.h>
-#include <mach/mxc_nand.h>
+#include <linux/platform_data/mtd-mxc_nand.h>
#include <mach/hardware.h>
#define DRIVER_NAME "mxc_nand"
diff --git a/drivers/mtd/nand/nand_bcm_umi.h b/drivers/mtd/nand/nand_bcm_umi.h
index 198b304d6f72..d90186684db8 100644
--- a/drivers/mtd/nand/nand_bcm_umi.h
+++ b/drivers/mtd/nand/nand_bcm_umi.h
@@ -17,7 +17,7 @@
/* ---- Include Files ---------------------------------------------------- */
#include <mach/reg_umi.h>
#include <mach/reg_nand.h>
-#include <cfg_global.h>
+#include <mach/cfg_global.h>
/* ---- Constants and Types ---------------------------------------------- */
#if (CFG_GLOBAL_CHIP_FAMILY == CFG_GLOBAL_CHIP_FAMILY_BCMRING)
@@ -48,7 +48,7 @@ int nand_bcm_umi_bch_correct_page(uint8_t *datap, uint8_t *readEccData,
/* Check in device is ready */
static inline int nand_bcm_umi_dev_ready(void)
{
- return REG_UMI_NAND_RCSR & REG_UMI_NAND_RCSR_RDY;
+ return readl(&REG_UMI_NAND_RCSR) & REG_UMI_NAND_RCSR_RDY;
}
/* Wait until device is ready */
@@ -62,10 +62,11 @@ static inline void nand_bcm_umi_wait_till_ready(void)
static inline void nand_bcm_umi_hamming_enable_hwecc(void)
{
/* disable and reset ECC, 512 byte page */
- REG_UMI_NAND_ECC_CSR &= ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
- REG_UMI_NAND_ECC_CSR_256BYTE);
+ writel(readl(&REG_UMI_NAND_ECC_CSR) & ~(REG_UMI_NAND_ECC_CSR_ECC_ENABLE |
+ REG_UMI_NAND_ECC_CSR_256BYTE), &REG_UMI_NAND_ECC_CSR);
/* enable ECC */
- REG_UMI_NAND_ECC_CSR |= REG_UMI_NAND_ECC_CSR_ECC_ENABLE;
+ writel(readl(&REG_UMI_NAND_ECC_CSR) | REG_UMI_NAND_ECC_CSR_ECC_ENABLE,
+ &REG_UMI_NAND_ECC_CSR);
}
#if NAND_ECC_BCH
@@ -76,18 +77,18 @@ static inline void nand_bcm_umi_hamming_enable_hwecc(void)
static inline void nand_bcm_umi_bch_enable_read_hwecc(void)
{
/* disable and reset ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
+ writel(REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID, &REG_UMI_BCH_CTRL_STATUS);
/* Turn on ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
+ writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, &REG_UMI_BCH_CTRL_STATUS);
}
/* Enable BCH Write ECC */
static inline void nand_bcm_umi_bch_enable_write_hwecc(void)
{
/* disable and reset ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID;
+ writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID, &REG_UMI_BCH_CTRL_STATUS);
/* Turn on ECC */
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN;
+ writel(REG_UMI_BCH_CTRL_STATUS_ECC_WR_EN, &REG_UMI_BCH_CTRL_STATUS);
}
/* Config number of BCH ECC bytes */
@@ -99,9 +100,9 @@ static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
uint32_t numBits = numEccBytes * 8;
/* disable and reset ECC */
- REG_UMI_BCH_CTRL_STATUS =
- REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
- REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID;
+ writel(REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID |
+ REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID,
+ &REG_UMI_BCH_CTRL_STATUS);
/* Every correctible bit requires 13 ECC bits */
tValue = (uint32_t) (numBits / ECC_BITS_PER_CORRECTABLE_BIT);
@@ -113,23 +114,21 @@ static inline void nand_bcm_umi_bch_config_ecc(uint8_t numEccBytes)
kValue = nValue - (tValue * ECC_BITS_PER_CORRECTABLE_BIT);
/* Write the settings */
- REG_UMI_BCH_N = nValue;
- REG_UMI_BCH_T = tValue;
- REG_UMI_BCH_K = kValue;
+ writel(nValue, &REG_UMI_BCH_N);
+ writel(tValue, &REG_UMI_BCH_T);
+ writel(kValue, &REG_UMI_BCH_K);
}
/* Pause during ECC read calculation to skip bytes in OOB */
static inline void nand_bcm_umi_bch_pause_read_ecc_calc(void)
{
- REG_UMI_BCH_CTRL_STATUS =
- REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN |
- REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC;
+ writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN | REG_UMI_BCH_CTRL_STATUS_PAUSE_ECC_DEC, &REG_UMI_BCH_CTRL_STATUS);
}
/* Resume during ECC read calculation after skipping bytes in OOB */
static inline void nand_bcm_umi_bch_resume_read_ecc_calc(void)
{
- REG_UMI_BCH_CTRL_STATUS = REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN;
+ writel(REG_UMI_BCH_CTRL_STATUS_ECC_RD_EN, &REG_UMI_BCH_CTRL_STATUS);
}
/* Poll read ECC calc to check when hardware completes */
@@ -139,7 +138,7 @@ static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
do {
/* wait for ECC to be valid */
- regVal = REG_UMI_BCH_CTRL_STATUS;
+ regVal = readl(&REG_UMI_BCH_CTRL_STATUS);
} while ((regVal & REG_UMI_BCH_CTRL_STATUS_RD_ECC_VALID) == 0);
return regVal;
@@ -149,7 +148,7 @@ static inline uint32_t nand_bcm_umi_bch_poll_read_ecc_calc(void)
static inline void nand_bcm_umi_bch_poll_write_ecc_calc(void)
{
/* wait for ECC to be valid */
- while ((REG_UMI_BCH_CTRL_STATUS & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
+ while ((readl(&REG_UMI_BCH_CTRL_STATUS) & REG_UMI_BCH_CTRL_STATUS_WR_ECC_VALID)
== 0)
;
}
@@ -170,9 +169,9 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
if (pageSize != NAND_DATA_ACCESS_SIZE) {
/* skip BI */
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp++ = readb(&REG_NAND_DATA8);
#else
- REG_NAND_DATA8;
+ readb(&REG_NAND_DATA8);
#endif
numToRead--;
}
@@ -180,9 +179,9 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
while (numToRead > numEccBytes) {
/* skip free oob region */
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp++ = readb(&REG_NAND_DATA8);
#else
- REG_NAND_DATA8;
+ readb(&REG_NAND_DATA8);
#endif
numToRead--;
}
@@ -193,11 +192,11 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
while (numToRead > 11) {
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
+ *oobp = readb(&REG_NAND_DATA8);
eccCalc[eccPos++] = *oobp;
oobp++;
#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
+ eccCalc[eccPos++] = readb(&REG_NAND_DATA8);
#endif
numToRead--;
}
@@ -207,9 +206,9 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
if (numToRead == 11) {
/* read BI */
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp++ = REG_NAND_DATA8;
+ *oobp++ = readb(&REG_NAND_DATA8);
#else
- REG_NAND_DATA8;
+ readb(&REG_NAND_DATA8);
#endif
numToRead--;
}
@@ -219,11 +218,11 @@ static inline void nand_bcm_umi_bch_read_oobEcc(uint32_t pageSize,
nand_bcm_umi_bch_resume_read_ecc_calc();
while (numToRead) {
#if defined(__KERNEL__) && !defined(STANDALONE)
- *oobp = REG_NAND_DATA8;
+ *oobp = readb(&REG_NAND_DATA8);
eccCalc[eccPos++] = *oobp;
oobp++;
#else
- eccCalc[eccPos++] = REG_NAND_DATA8;
+ eccCalc[eccPos++] = readb(&REG_NAND_DATA8);
#endif
numToRead--;
}
@@ -255,7 +254,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
if (pageSize == NAND_DATA_ACCESS_SIZE) {
/* Now fill in the ECC bytes */
if (numEccBytes >= 13)
- eccVal = REG_UMI_BCH_WR_ECC_3;
+ eccVal = readl(&REG_UMI_BCH_WR_ECC_3);
/* Usually we skip CM in oob[0,1] */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[0],
@@ -268,7 +267,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
eccVal & 0xff); /* ECC 12 */
if (numEccBytes >= 9)
- eccVal = REG_UMI_BCH_WR_ECC_2;
+ eccVal = readl(&REG_UMI_BCH_WR_ECC_2);
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[3],
(eccVal >> 24) & 0xff); /* ECC11 */
@@ -281,7 +280,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
/* Now fill in the ECC bytes */
if (numEccBytes >= 13)
- eccVal = REG_UMI_BCH_WR_ECC_3;
+ eccVal = readl(&REG_UMI_BCH_WR_ECC_3);
/* Usually skip CM in oob[1,2] */
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 15, &oobp[1],
@@ -294,7 +293,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
eccVal & 0xff); /* ECC12 */
if (numEccBytes >= 9)
- eccVal = REG_UMI_BCH_WR_ECC_2;
+ eccVal = readl(&REG_UMI_BCH_WR_ECC_2);
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 12, &oobp[4],
(eccVal >> 24) & 0xff); /* ECC11 */
@@ -309,7 +308,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
eccVal & 0xff); /* ECC8 */
if (numEccBytes >= 5)
- eccVal = REG_UMI_BCH_WR_ECC_1;
+ eccVal = readl(&REG_UMI_BCH_WR_ECC_1);
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 8, &oobp[8],
(eccVal >> 24) & 0xff); /* ECC7 */
@@ -321,7 +320,7 @@ static inline void nand_bcm_umi_bch_write_oobEcc(uint32_t pageSize,
eccVal & 0xff); /* ECC4 */
if (numEccBytes >= 1)
- eccVal = REG_UMI_BCH_WR_ECC_0;
+ eccVal = readl(&REG_UMI_BCH_WR_ECC_0);
NAND_BCM_UMI_ECC_WRITE(numEccBytes, 4, &oobp[12],
(eccVal >> 24) & 0xff); /* ECC3 */
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a86aa812ca13..9ee0c4edfacf 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -31,7 +31,7 @@
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <mach/nand.h>
+#include <linux/platform_data/mtd-nomadik-nand.h>
#include <mach/fsmc.h>
#include <mtd/mtd-abi.h>
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ac4fd756eda3..fc8111278d12 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -29,7 +29,7 @@
#include <plat/dma.h>
#include <plat/gpmc.h>
-#include <plat/nand.h>
+#include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
#define OMAP_NAND_TIMEOUT_MS 5000
@@ -101,6 +101,16 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+#define PREFETCH_CONFIG1_CS_SHIFT 24
+#define ECC_CONFIG_CS_SHIFT 1
+#define CS_MASK 0x7
+#define ENABLE_PREFETCH (0x1 << 7)
+#define DMA_MPU_MODE_SHIFT 2
+#define ECCSIZE1_SHIFT 22
+#define ECC1RESULTSIZE 0x1
+#define ECCCLEAR 0x100
+#define ECC1 0x1
+
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used
@@ -124,15 +134,18 @@ struct omap_nand_info {
int gpmc_cs;
unsigned long phys_base;
+ unsigned long mem_size;
struct completion comp;
struct dma_chan *dma;
- int gpmc_irq;
+ int gpmc_irq_fifo;
+ int gpmc_irq_count;
enum {
OMAP_NAND_IO_READ = 0, /* read */
OMAP_NAND_IO_WRITE, /* write */
} iomode;
u_char *buf;
int buf_len;
+ struct gpmc_nand_regs reg;
#ifdef CONFIG_MTD_NAND_OMAP_BCH
struct bch_control *bch;
@@ -141,6 +154,63 @@ struct omap_nand_info {
};
/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+ unsigned int u32_count, int is_write, struct omap_nand_info *info)
+{
+ u32 val;
+
+ if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+ return -1;
+
+ if (readl(info->reg.gpmc_prefetch_control))
+ return -EBUSY;
+
+ /* Set the amount of bytes to be prefetched */
+ writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+ /* Set dma/mpu mode, the prefetch read / post write and
+ * enable the engine. Set which cs is has requested for.
+ */
+ val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+ PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
+ writel(val, info->reg.gpmc_prefetch_config1);
+
+ /* Start the prefetch engine */
+ writel(0x1, info->reg.gpmc_prefetch_control);
+
+ return 0;
+}
+
+/**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+{
+ u32 config1;
+
+ /* check if the same module/cs is trying to reset */
+ config1 = readl(info->reg.gpmc_prefetch_config1);
+ if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+ return -EINVAL;
+
+ /* Stop the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_control);
+
+ /* Reset/disable the PFPW engine */
+ writel(0x0, info->reg.gpmc_prefetch_config1);
+
+ return 0;
+}
+
+/**
* omap_hwcontrol - hardware specific access to control-lines
* @mtd: MTD device structure
* @cmd: command to device
@@ -158,13 +228,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd);
+ writeb(cmd, info->reg.gpmc_nand_command);
else if (ctrl & NAND_ALE)
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd);
+ writeb(cmd, info->reg.gpmc_nand_address);
else /* NAND_NCE */
- gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd);
+ writeb(cmd, info->reg.gpmc_nand_data);
}
}
@@ -198,7 +268,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
iowrite8(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ GPMC_STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -235,7 +306,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
iowrite16(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = gpmc_read_status(GPMC_STATUS_BUFFER);
+ status = readl(info->reg.gpmc_status) &
+ GPMC_STATUS_BUFF_EMPTY;
} while (!status);
}
}
@@ -265,8 +337,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -275,14 +347,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
omap_read_buf8(mtd, (u_char *)p, len);
} else {
do {
- r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ r_count = readl(info->reg.gpmc_prefetch_status);
+ r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2;
ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
p += r_count;
len -= r_count << 2;
} while (len);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
@@ -301,6 +374,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
int i = 0, ret = 0;
u16 *p = (u16 *)buf;
unsigned long tim, limit;
+ u32 val;
/* take care of subpage writes */
if (len % 2 != 0) {
@@ -310,8 +384,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
}
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) {
/* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16)
@@ -320,7 +394,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
omap_write_buf8(mtd, (u_char *)p, len);
} else {
while (len) {
- w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ w_count = readl(info->reg.gpmc_prefetch_status);
+ w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->nand.IO_ADDR_W);
@@ -329,11 +404,14 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
tim = 0;
limit = (loops_per_jiffy *
msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
}
}
@@ -365,6 +443,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
unsigned long tim, limit;
unsigned n;
int ret;
+ u32 val;
if (addr >= high_memory) {
struct page *p1;
@@ -396,9 +475,9 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
tx->callback_param = &info->comp;
dmaengine_submit(tx);
- /* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+ /* configure and start prefetch transfer */
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
@@ -410,11 +489,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
wait_for_completion(&info->comp);
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+
+ do {
cpu_relax();
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0;
@@ -471,13 +554,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
{
struct omap_nand_info *info = (struct omap_nand_info *) dev;
u32 bytes;
- u32 irq_stat;
- irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS);
- bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT);
+ bytes = readl(info->reg.gpmc_prefetch_status);
+ bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
if (info->buf_len && (info->buf_len < bytes))
@@ -494,20 +576,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
(u32 *)info->buf, bytes >> 2);
info->buf = info->buf + bytes;
- if (irq_stat & 0x2)
+ if (this_irq == info->gpmc_irq_count)
goto done;
}
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
return IRQ_HANDLED;
done:
complete(&info->comp);
- /* disable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
- /* clear status */
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
+ disable_irq_nosync(info->gpmc_irq_fifo);
+ disable_irq_nosync(info->gpmc_irq_count);
return IRQ_HANDLED;
}
@@ -534,22 +613,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
init_completion(&info->comp);
/* configure and start prefetch transfer */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for read to complete */
wait_for_completion(&info->comp);
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
@@ -572,6 +651,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
struct omap_nand_info, mtd);
int ret = 0;
unsigned long tim, limit;
+ u32 val;
if (len <= mtd->oobsize) {
omap_write_buf_pref(mtd, buf, len);
@@ -583,27 +663,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
init_completion(&info->comp);
/* configure and start prefetch transfer : size=24 */
- ret = gpmc_prefetch_enable(info->gpmc_cs,
- (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1);
+ ret = omap_prefetch_enable(info->gpmc_cs,
+ (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
if (ret)
/* PFPW engine is busy, use cpu copy method */
goto out_copy;
info->buf_len = len;
- /* enable irq */
- gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ,
- (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT));
+
+ enable_irq(info->gpmc_irq_count);
+ enable_irq(info->gpmc_irq_fifo);
/* waiting for write to complete */
wait_for_completion(&info->comp);
+
/* wait for data to flushed-out before reset the prefetch */
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
- while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+ do {
+ val = readl(info->reg.gpmc_prefetch_status);
+ val = GPMC_PREFETCH_STATUS_COUNT(val);
cpu_relax();
+ } while (val && (tim++ < limit));
/* disable and stop the PFPW engine */
- gpmc_prefetch_reset(info->gpmc_cs);
+ omap_prefetch_reset(info->gpmc_cs, info);
return;
out_copy:
@@ -843,7 +927,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
{
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code);
+ u32 val;
+
+ val = readl(info->reg.gpmc_ecc_config);
+ if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
+ return -EINVAL;
+
+ /* read ecc result */
+ val = readl(info->reg.gpmc_ecc1_result);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+ return 0;
}
/**
@@ -857,8 +954,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
mtd);
struct nand_chip *chip = mtd->priv;
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ u32 val;
+
+ /* clear ecc and enable bits */
+ val = ECCCLEAR | ECC1;
+ writel(val, info->reg.gpmc_ecc_control);
+
+ /* program ecc and result sizes */
+ val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+ ECC1RESULTSIZE);
+ writel(val, info->reg.gpmc_ecc_size_config);
- gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size);
+ switch (mode) {
+ case NAND_ECC_READ:
+ case NAND_ECC_WRITE:
+ writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+ break;
+ case NAND_ECC_READSYN:
+ writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+ break;
+ default:
+ dev_info(&info->pdev->dev,
+ "error: unrecognized Mode[%d]!\n", mode);
+ break;
+ }
+
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ writel(val, info->reg.gpmc_ecc_config);
}
/**
@@ -886,10 +1009,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
else
timeo += (HZ * 20) / 1000;
- gpmc_nand_write(info->gpmc_cs,
- GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
+ writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
while (time_before(jiffies, timeo)) {
- status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
+ status = readb(info->reg.gpmc_nand_data);
if (status & NAND_STATUS_READY)
break;
cond_resched();
@@ -909,22 +1031,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
+ val = readl(info->reg.gpmc_status);
+
if ((val & 0x100) == 0x100) {
- /* Clear IRQ Interrupt */
- val |= 0x100;
- val &= ~(0x0);
- gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
+ return 1;
} else {
- unsigned int cnt = 0;
- while (cnt++ < 0x1FF) {
- if ((val & 0x100) == 0x100)
- return 0;
- val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
- }
+ return 0;
}
-
- return 1;
}
#ifdef CONFIG_MTD_NAND_OMAP_BCH
@@ -1155,6 +1268,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
int i, offset;
dma_cap_mask_t mask;
unsigned sig;
+ struct resource *res;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -1174,7 +1288,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev;
info->gpmc_cs = pdata->cs;
- info->phys_base = pdata->phys_base;
+ info->reg = pdata->reg;
info->mtd.priv = &info->nand;
info->mtd.name = dev_name(&pdev->dev);
@@ -1183,16 +1297,23 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
- /* NAND write protect off */
- gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "error getting memory resource\n");
+ goto out_free_info;
+ }
+
+ info->phys_base = res->start;
+ info->mem_size = resource_size(res);
- if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ if (!request_mem_region(info->phys_base, info->mem_size,
pdev->dev.driver->name)) {
err = -EBUSY;
goto out_free_info;
}
- info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
if (!info->nand.IO_ADDR_R) {
err = -ENOMEM;
goto out_release_mem_region;
@@ -1265,17 +1386,39 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
break;
case NAND_OMAP_PREFETCH_IRQ:
- err = request_irq(pdata->gpmc_irq,
- omap_nand_irq, IRQF_SHARED, "gpmc-nand", info);
+ info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+ if (info->gpmc_irq_fifo <= 0) {
+ dev_err(&pdev->dev, "error getting fifo irq\n");
+ err = -ENODEV;
+ goto out_release_mem_region;
+ }
+ err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
+ IRQF_SHARED, "gpmc-nand-fifo", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
- pdata->gpmc_irq, err);
+ info->gpmc_irq_fifo, err);
+ info->gpmc_irq_fifo = 0;
+ goto out_release_mem_region;
+ }
+
+ info->gpmc_irq_count = platform_get_irq(pdev, 1);
+ if (info->gpmc_irq_count <= 0) {
+ dev_err(&pdev->dev, "error getting count irq\n");
+ err = -ENODEV;
+ goto out_release_mem_region;
+ }
+ err = request_irq(info->gpmc_irq_count, omap_nand_irq,
+ IRQF_SHARED, "gpmc-nand-count", info);
+ if (err) {
+ dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+ info->gpmc_irq_count, err);
+ info->gpmc_irq_count = 0;
goto out_release_mem_region;
- } else {
- info->gpmc_irq = pdata->gpmc_irq;
- info->nand.read_buf = omap_read_buf_irq_pref;
- info->nand.write_buf = omap_write_buf_irq_pref;
}
+
+ info->nand.read_buf = omap_read_buf_irq_pref;
+ info->nand.write_buf = omap_write_buf_irq_pref;
+
break;
default:
@@ -1363,7 +1506,11 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
out_release_mem_region:
if (info->dma)
dma_release_channel(info->dma);
- release_mem_region(info->phys_base, NAND_IO_SIZE);
+ if (info->gpmc_irq_count > 0)
+ free_irq(info->gpmc_irq_count, info);
+ if (info->gpmc_irq_fifo > 0)
+ free_irq(info->gpmc_irq_fifo, info);
+ release_mem_region(info->phys_base, info->mem_size);
out_free_info:
kfree(info);
@@ -1381,8 +1528,10 @@ static int omap_nand_remove(struct platform_device *pdev)
if (info->dma)
dma_release_channel(info->dma);
- if (info->gpmc_irq)
- free_irq(info->gpmc_irq, info);
+ if (info->gpmc_irq_count > 0)
+ free_irq(info->gpmc_irq_count, info);
+ if (info->gpmc_irq_fifo > 0)
+ free_irq(info->gpmc_irq_fifo, info);
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index fc5a868c436e..131b58a133f1 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -22,7 +22,7 @@
#include <asm/io.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
-#include <plat/orion_nand.h>
+#include <linux/platform_data/mtd-orion_nand.h>
static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 252aaefcacfa..c45227173efd 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -22,9 +22,11 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <mach/dma.h>
-#include <plat/pxa3xx_nand.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
#define NAND_STOP_DELAY (2 * HZ/50)
@@ -1032,7 +1034,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
struct pxa3xx_nand_platform_data *pdata;
struct pxa3xx_nand_info *info;
struct pxa3xx_nand_host *host;
- struct nand_chip *chip;
+ struct nand_chip *chip = NULL;
struct mtd_info *mtd;
struct resource *r;
int ret, irq, cs;
@@ -1081,21 +1083,31 @@ static int alloc_nand_resource(struct platform_device *pdev)
}
clk_enable(info->clk);
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for data DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
- }
- info->drcmr_dat = r->start;
+ /*
+ * This is a dirty hack to make this driver work from devicetree
+ * bindings. It can be removed once we have a prober DMA controller
+ * framework for DT.
+ */
+ if (pdev->dev.of_node && cpu_is_pxa3xx()) {
+ info->drcmr_dat = 97;
+ info->drcmr_cmd = 99;
+ } else {
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for data DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_dat = r->start;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r == NULL) {
- dev_err(&pdev->dev, "no resource defined for command DMA\n");
- ret = -ENXIO;
- goto fail_put_clk;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no resource defined for command DMA\n");
+ ret = -ENXIO;
+ goto fail_put_clk;
+ }
+ info->drcmr_cmd = r->start;
}
- info->drcmr_cmd = r->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
@@ -1200,12 +1212,55 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id pxa3xx_nand_dt_ids[] = {
+ { .compatible = "marvell,pxa3xx-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
+
+static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+{
+ struct pxa3xx_nand_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+
+ if (!of_id)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
+ pdata->enable_arbiter = 1;
+ if (of_get_property(np, "marvell,nand-keep-config", NULL))
+ pdata->keep_config = 1;
+ of_property_read_u32(np, "num-cs", &pdata->num_cs);
+
+ pdev->dev.platform_data = pdata;
+
+ return 0;
+}
+#else
+static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
struct pxa3xx_nand_platform_data *pdata;
+ struct mtd_part_parser_data ppdata = {};
struct pxa3xx_nand_info *info;
int ret, cs, probe_success;
+ ret = pxa3xx_nand_probe_dt(pdev);
+ if (ret)
+ return ret;
+
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
@@ -1229,8 +1284,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
continue;
}
+ ppdata.of_node = pdev->dev.of_node;
ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
- NULL, pdata->parts[cs],
+ &ppdata, pdata->parts[cs],
pdata->nr_parts[cs]);
if (!ret)
probe_success = 1;
@@ -1306,6 +1362,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
static struct platform_driver pxa3xx_nand_driver = {
.driver = {
.name = "pxa3xx-nand",
+ .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
},
.probe = pxa3xx_nand_probe,
.remove = pxa3xx_nand_remove,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 91121f33f743..d8040619ad8d 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -46,7 +46,7 @@
#include <asm/io.h>
#include <plat/regs-nand.h>
-#include <plat/nand.h>
+#include <linux/platform_data/mtd-nand-s3c2410.h>
#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int hardware_ecc = 1;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 398a82783848..1961be985171 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -39,22 +39,21 @@
#include <asm/mach/flash.h>
#include <plat/gpmc.h>
-#include <plat/onenand.h>
+#include <linux/platform_data/mtd-onenand-omap2.h>
#include <asm/gpio.h>
#include <plat/dma.h>
-
-#include <plat/board.h>
+#include <plat/cpu.h>
#define DRIVER_NAME "omap2-onenand"
-#define ONENAND_IO_SIZE SZ_128K
#define ONENAND_BUFRAM_SIZE (1024 * 5)
struct omap2_onenand {
struct platform_device *pdev;
int gpmc_cs;
unsigned long phys_base;
+ unsigned int mem_size;
int gpio_irq;
struct mtd_info mtd;
struct onenand_chip onenand;
@@ -626,6 +625,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
struct omap2_onenand *c;
struct onenand_chip *this;
int r;
+ struct resource *res;
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -647,20 +647,24 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->gpio_irq = 0;
}
- r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base);
- if (r < 0) {
- dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ r = -EINVAL;
+ dev_err(&pdev->dev, "error getting memory resource\n");
goto err_kfree;
}
- if (request_mem_region(c->phys_base, ONENAND_IO_SIZE,
+ c->phys_base = res->start;
+ c->mem_size = resource_size(res);
+
+ if (request_mem_region(c->phys_base, c->mem_size,
pdev->dev.driver->name) == NULL) {
- dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, "
- "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE);
+ dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
+ c->phys_base, c->mem_size);
r = -EBUSY;
- goto err_free_cs;
+ goto err_kfree;
}
- c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE);
+ c->onenand.base = ioremap(c->phys_base, c->mem_size);
if (c->onenand.base == NULL) {
r = -ENOMEM;
goto err_release_mem_region;
@@ -776,9 +780,7 @@ err_release_gpio:
err_iounmap:
iounmap(c->onenand.base);
err_release_mem_region:
- release_mem_region(c->phys_base, ONENAND_IO_SIZE);
-err_free_cs:
- gpmc_cs_free(c->gpmc_cs);
+ release_mem_region(c->phys_base, c->mem_size);
err_kfree:
kfree(c);
@@ -800,7 +802,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
gpio_free(c->gpio_irq);
}
iounmap(c->onenand.base);
- release_mem_region(c->phys_base, ONENAND_IO_SIZE);
+ release_mem_region(c->phys_base, c->mem_size);
gpmc_cs_free(c->gpmc_cs);
kfree(c);
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index ea4b95b5451c..271a842f8c39 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,20 +27,34 @@ config MTD_UBI_WL_THRESHOLD
life-cycle less than 10000, the threshold should be lessened (e.g.,
to 128 or 256, although it does not have to be power of 2).
-config MTD_UBI_BEB_RESERVE
- int "Percentage of reserved eraseblocks for bad eraseblocks handling"
- default 2
- range 0 25
+config MTD_UBI_BEB_LIMIT
+ int "Maximum expected bad eraseblock count per 1024 eraseblocks"
+ default 20
+ range 0 768
help
- If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
- reserves some amount of physical eraseblocks to handle new bad
- eraseblocks. For example, if a flash physical eraseblock becomes bad,
- UBI uses these reserved physical eraseblocks to relocate the bad one.
- This option specifies how many physical eraseblocks will be reserved
- for bad eraseblock handling (percents of total number of good flash
- eraseblocks). If the underlying flash does not admit of bad
- eraseblocks (e.g. NOR flash), this value is ignored and nothing is
- reserved. Leave the default value if unsure.
+ This option specifies the maximum bad physical eraseblocks UBI
+ expects on the MTD device (per 1024 eraseblocks). If the underlying
+ flash does not admit of bad eraseblocks (e.g. NOR flash), this value
+ is ignored.
+
+ NAND datasheets often specify the minimum and maximum NVM (Number of
+ Valid Blocks) for the flashes' endurance lifetime. The maximum
+ expected bad eraseblocks per 1024 eraseblocks then can be calculated
+ as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
+ (MaxNVB is basically the total count of eraseblocks on the chip).
+
+ To put it differently, if this value is 20, UBI will try to reserve
+ about 1.9% of physical eraseblocks for bad blocks handling. And that
+ will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
+ partition UBI attaches. This means that if you have, say, a NAND
+ flash chip admits maximum 40 bad eraseblocks, and it is split on two
+ MTD partitions of the same size, UBI will reserve 40 eraseblocks when
+ attaching a partition.
+
+ This option can be overridden by the "mtd=" UBI module parameter or
+ by the "attach" ioctl.
+
+ Leave the default value if unsure.
config MTD_UBI_GLUEBI
tristate "MTD devices emulation driver (gluebi)"
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index bd27cbbb4066..f7adf53e4f45 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -79,7 +79,7 @@
* NAND), it is probably a PEB which was being erased when power cut
* happened, so this is corruption type 1. However, this is just a guess,
* which might be wrong.
- * o Otherwise this it corruption type 2.
+ * o Otherwise this is corruption type 2.
*/
#include <linux/err.h>
@@ -378,8 +378,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
else {
- ubi_err("VID of PEB %d header is bad, but it "
- "was OK earlier, err %d", pnum, err);
+ ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
+ pnum, err);
if (err > 0)
err = -EIO;
@@ -790,12 +790,12 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
goto out_unlock;
- ubi_err("PEB %d contains corrupted VID header, and the data does not "
- "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
- "header corruption which requires manual inspection", pnum);
+ ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
+ pnum);
+ ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
ubi_dump_vid_hdr(vid_hdr);
- dbg_msg("hexdump of PEB %d offset %d, length %d",
- pnum, ubi->leb_start, ubi->leb_size);
+ pr_err("hexdump of PEB %d offset %d, length %d",
+ pnum, ubi->leb_start, ubi->leb_size);
ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ubi->peb_buf, ubi->leb_size, 1);
err = 1;
@@ -907,8 +907,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi->image_seq = image_seq;
if (ubi->image_seq && image_seq &&
ubi->image_seq != image_seq) {
- ubi_err("bad image sequence number %d in PEB %d, "
- "expected %d", image_seq, pnum, ubi->image_seq);
+ ubi_err("bad image sequence number %d in PEB %d, expected %d",
+ image_seq, pnum, ubi->image_seq);
ubi_dump_ec_hdr(ech);
return -EINVAL;
}
@@ -975,7 +975,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
return err;
goto adjust_mean_ec;
case UBI_IO_FF:
- if (ec_err)
+ if (ec_err || bitflips)
err = add_to_list(ai, pnum, UBI_UNKNOWN,
UBI_UNKNOWN, ec, 1, &ai->erase);
else
@@ -997,8 +997,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
- ubi_msg("\"delete\" compatible internal volume %d:%d"
- " found, will remove it", vol_id, lnum);
+ ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
@@ -1006,15 +1006,14 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
return 0;
case UBI_COMPAT_RO:
- ubi_msg("read-only compatible internal volume %d:%d"
- " found, switch to read-only mode",
+ ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
vol_id, lnum);
ubi->ro_mode = 1;
break;
case UBI_COMPAT_PRESERVE:
- ubi_msg("\"preserve\" compatible internal volume %d:%d"
- " found", vol_id, lnum);
+ ubi_msg("\"preserve\" compatible internal volume %d:%d found",
+ vol_id, lnum);
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 0, &ai->alien);
if (err)
@@ -1075,10 +1074,10 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ai->corr_peb_count) {
ubi_err("%d PEBs are corrupted and preserved",
ai->corr_peb_count);
- printk(KERN_ERR "Corrupted PEBs are:");
+ pr_err("Corrupted PEBs are:");
list_for_each_entry(aeb, &ai->corr, u.list)
- printk(KERN_CONT " %d", aeb->pnum);
- printk(KERN_CONT "\n");
+ pr_cont(" %d", aeb->pnum);
+ pr_cont("\n");
/*
* If too many PEBs are corrupted, we refuse attaching,
@@ -1112,8 +1111,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
get_random_bytes(&ubi->image_seq,
sizeof(ubi->image_seq));
} else {
- ubi_err("MTD device is not UBI-formatted and possibly "
- "contains non-UBI data - refusing it");
+ ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
return -EINVAL;
}
@@ -1172,7 +1170,7 @@ static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
goto out_vidh;
}
- dbg_msg("scanning is finished");
+ ubi_msg("scanning is finished");
/* Calculate mean erase counter */
if (ai->ec_count)
@@ -1244,7 +1242,7 @@ int ubi_attach(struct ubi_device *ubi)
ubi->corr_peb_count = ai->corr_peb_count;
ubi->max_ec = ai->max_ec;
ubi->mean_ec = ai->mean_ec;
- ubi_msg("max. sequence number: %llu", ai->max_sqnum);
+ dbg_gen("max. sequence number: %llu", ai->max_sqnum);
err = ubi_read_volume_table(ubi, ai);
if (err)
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 2c5ed5ca9c33..34977039850c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -36,6 +36,7 @@
#include <linux/namei.h>
#include <linux/stat.h>
#include <linux/miscdevice.h>
+#include <linux/mtd/partitions.h>
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
@@ -45,6 +46,12 @@
/* Maximum length of the 'mtd=' parameter */
#define MTD_PARAM_LEN_MAX 64
+/* Maximum number of comma-separated items in the 'mtd=' parameter */
+#define MTD_PARAM_MAX_COUNT 3
+
+/* Maximum value for the number of bad PEBs per 1024 PEBs */
+#define MAX_MTD_UBI_BEB_LIMIT 768
+
#ifdef CONFIG_MTD_UBI_MODULE
#define ubi_is_module() 1
#else
@@ -56,10 +63,12 @@
* @name: MTD character device node path, MTD device name, or MTD device number
* string
* @vid_hdr_offs: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
*/
struct mtd_dev_param {
char name[MTD_PARAM_LEN_MAX];
int vid_hdr_offs;
+ int max_beb_per1024;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -564,9 +573,38 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
}
}
+static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
+{
+ int limit, device_pebs;
+ uint64_t device_size;
+
+ if (!max_beb_per1024)
+ return 0;
+
+ /*
+ * Here we are using size of the entire flash chip and
+ * not just the MTD partition size because the maximum
+ * number of bad eraseblocks is a percentage of the
+ * whole device and bad eraseblocks are not fairly
+ * distributed over the flash chip. So the worst case
+ * is that all the bad eraseblocks of the chip are in
+ * the MTD partition we are attaching (ubi->mtd).
+ */
+ device_size = mtd_get_device_size(ubi->mtd);
+ device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
+ limit = mult_frac(device_pebs, max_beb_per1024, 1024);
+
+ /* Round it up */
+ if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
+ limit += 1;
+
+ return limit;
+}
+
/**
* io_init - initialize I/O sub-system for a given UBI device.
* @ubi: UBI device description object
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
* assumed:
@@ -579,8 +617,11 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int io_init(struct ubi_device *ubi)
+static int io_init(struct ubi_device *ubi, int max_beb_per1024)
{
+ dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
+ dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
+
if (ubi->mtd->numeraseregions != 0) {
/*
* Some flashes have several erase regions. Different regions
@@ -607,8 +648,10 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (mtd_can_have_bb(ubi->mtd))
+ if (mtd_can_have_bb(ubi->mtd)) {
ubi->bad_allowed = 1;
+ ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
+ }
if (ubi->mtd->type == MTD_NORFLASH) {
ubi_assert(ubi->mtd->writesize == 1);
@@ -650,11 +693,11 @@ static int io_init(struct ubi_device *ubi)
ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
- dbg_msg("min_io_size %d", ubi->min_io_size);
- dbg_msg("max_write_size %d", ubi->max_write_size);
- dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
- dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
- dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
+ dbg_gen("min_io_size %d", ubi->min_io_size);
+ dbg_gen("max_write_size %d", ubi->max_write_size);
+ dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
+ dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
+ dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
if (ubi->vid_hdr_offset == 0)
/* Default offset */
@@ -671,10 +714,10 @@ static int io_init(struct ubi_device *ubi)
ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
- dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
- dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
- dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift);
- dbg_msg("leb_start %d", ubi->leb_start);
+ dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
+ dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
+ dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
+ dbg_gen("leb_start %d", ubi->leb_start);
/* The shift must be aligned to 32-bit boundary */
if (ubi->vid_hdr_shift % 4) {
@@ -700,7 +743,7 @@ static int io_init(struct ubi_device *ubi)
ubi->max_erroneous = ubi->peb_count / 10;
if (ubi->max_erroneous < 16)
ubi->max_erroneous = 16;
- dbg_msg("max_erroneous %d", ubi->max_erroneous);
+ dbg_gen("max_erroneous %d", ubi->max_erroneous);
/*
* It may happen that EC and VID headers are situated in one minimal
@@ -708,30 +751,18 @@ static int io_init(struct ubi_device *ubi)
* read-only mode.
*/
if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
- ubi_warn("EC and VID headers are in the same minimal I/O unit, "
- "switch to read-only mode");
+ ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
ubi->ro_mode = 1;
}
ubi->leb_size = ubi->peb_size - ubi->leb_start;
if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
- ubi_msg("MTD device %d is write-protected, attach in "
- "read-only mode", ubi->mtd->index);
+ ubi_msg("MTD device %d is write-protected, attach in read-only mode",
+ ubi->mtd->index);
ubi->ro_mode = 1;
}
- ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
- ubi->peb_size, ubi->peb_size >> 10);
- ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
- ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
- if (ubi->hdrs_min_io_size != ubi->min_io_size)
- ubi_msg("sub-page size: %d",
- ubi->hdrs_min_io_size);
- ubi_msg("VID header offset: %d (aligned %d)",
- ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
- ubi_msg("data offset: %d", ubi->leb_start);
-
/*
* Note, ideally, we have to initialize @ubi->bad_peb_count here. But
* unfortunately, MTD does not provide this information. We should loop
@@ -759,6 +790,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
struct ubi_volume *vol = ubi->volumes[vol_id];
int err, old_reserved_pebs = vol->reserved_pebs;
+ if (ubi->ro_mode) {
+ ubi_warn("skip auto-resize because of R/O mode");
+ return 0;
+ }
+
/*
* Clear the auto-resize flag in the volume in-memory copy of the
* volume table, and 'ubi_resize_volume()' will propagate this change
@@ -800,6 +836,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* @mtd: MTD device description object
* @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
+ * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
*
* This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
* to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -810,11 +847,18 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
* Note, the invocations of this function has to be serialized by the
* @ubi_devices_mutex.
*/
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024)
{
struct ubi_device *ubi;
int i, err, ref = 0;
+ if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
+ return -EINVAL;
+
+ if (!max_beb_per1024)
+ max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
+
/*
* Check if we already have the same MTD device attached.
*
@@ -839,8 +883,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
* no sense to attach emulated MTD devices, so we prohibit this.
*/
if (mtd->type == MTD_UBIVOLUME) {
- ubi_err("refuse attaching mtd%d - it is already emulated on "
- "top of UBI", mtd->index);
+ ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
+ mtd->index);
return -EINVAL;
}
@@ -880,10 +924,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
spin_lock_init(&ubi->volumes_lock);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
- dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
- dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
- err = io_init(ubi);
+ err = io_init(ubi, max_beb_per1024);
if (err)
goto out_free;
@@ -924,23 +966,24 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
goto out_debugfs;
}
- ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
- ubi_msg("MTD device name: \"%s\"", mtd->name);
- ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
- ubi_msg("number of good PEBs: %d", ubi->good_peb_count);
- ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count);
- ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count);
- ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots);
- ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD);
- ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT);
- ubi_msg("number of user volumes: %d",
- ubi->vol_count - UBI_INT_VOL_COUNT);
- ubi_msg("available PEBs: %d", ubi->avail_pebs);
- ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs);
- ubi_msg("number of PEBs reserved for bad PEB handling: %d",
- ubi->beb_rsvd_pebs);
- ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec);
- ubi_msg("image sequence number: %d", ubi->image_seq);
+ ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
+ mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
+ ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
+ ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
+ ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
+ ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
+ ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
+ ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
+ ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
+ ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
+ ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
+ ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
+ ubi->vtbl_slots);
+ ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
+ ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
+ ubi->image_seq);
+ ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
+ ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
/*
* The below lock makes sure we do not race with 'ubi_thread()' which
@@ -1017,7 +1060,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
- dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+ ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
/*
* Before freeing anything, we have to stop the background thread to
@@ -1172,7 +1215,7 @@ static int __init ubi_init(void)
mutex_lock(&ubi_devices_mutex);
err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
- p->vid_hdr_offs);
+ p->vid_hdr_offs, p->max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0) {
ubi_err("cannot attach mtd%d", mtd->index);
@@ -1218,7 +1261,7 @@ out:
ubi_err("UBI error: cannot initialize UBI, error %d", err);
return err;
}
-module_init(ubi_init);
+late_initcall(ubi_init);
static void __exit ubi_exit(void)
{
@@ -1252,8 +1295,7 @@ static int __init bytes_str_to_int(const char *str)
result = simple_strtoul(str, &endp, 0);
if (str == endp || result >= INT_MAX) {
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1269,8 +1311,7 @@ static int __init bytes_str_to_int(const char *str)
case '\0':
break;
default:
- printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
- str);
+ ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
return -EINVAL;
}
@@ -1291,27 +1332,26 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
struct mtd_dev_param *p;
char buf[MTD_PARAM_LEN_MAX];
char *pbuf = &buf[0];
- char *tokens[2] = {NULL, NULL};
+ char *tokens[MTD_PARAM_MAX_COUNT];
if (!val)
return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
- printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
- UBI_MAX_DEVICES);
+ ubi_err("UBI error: too many parameters, max. is %d\n",
+ UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
- printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
- "max. is %d\n", val, MTD_PARAM_LEN_MAX);
+ ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
+ val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
- printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
- "ignored\n");
+ pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
return 0;
}
@@ -1321,12 +1361,11 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
- for (i = 0; i < 2; i++)
+ for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
- printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
- val);
+ ubi_err("UBI error: too many arguments at \"%s\"\n", val);
return -EINVAL;
}
@@ -1339,23 +1378,32 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (p->vid_hdr_offs < 0)
return p->vid_hdr_offs;
+ if (tokens[2]) {
+ int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024);
+
+ if (err) {
+ ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+ tokens[2]);
+ return -EINVAL;
+ }
+ }
+
mtd_devs += 1;
return 0;
}
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
-MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
+MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n"
"Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number, name, or "
- "path to the MTD character device node.\n"
- "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
- "header position to be used by UBI.\n"
- "Example 1: mtd=/dev/mtd0 - attach MTD device "
- "/dev/mtd0.\n"
- "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
- "with name \"content\" using VID header offset 1984, and "
- "MTD device number 4 with default VID header offset.");
+ "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
+ "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
+ __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
+ "\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
+ "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
+ "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fb5567878181..dfcc65b33e99 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
vol->updating = 0;
vfree(vol->upd_buf);
} else if (vol->changing_leb) {
- dbg_gen("only %lld of %lld bytes received for atomic LEB change"
- " for volume %d:%d, cancel", vol->upd_received,
- vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+ dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
+ vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
+ vol->vol_id);
vol->changing_leb = 0;
vfree(vol->upd_buf);
}
@@ -189,7 +189,8 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
return new_offset;
}
-static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
{
struct ubi_volume_desc *desc = file->private_data;
struct ubi_device *ubi = desc->vol->ubi;
@@ -753,7 +754,7 @@ static int rename_volumes(struct ubi_device *ubi,
re->new_name_len = name_len;
memcpy(re->new_name, name, name_len);
list_add_tail(&re->list, &rename_list);
- dbg_msg("will rename volume %d from \"%s\" to \"%s\"",
+ dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
vol_id, re->desc->vol->name, name);
}
@@ -811,7 +812,7 @@ static int rename_volumes(struct ubi_device *ubi,
re1->remove = 1;
re1->desc = desc;
list_add(&re1->list, &rename_list);
- dbg_msg("will remove volume %d, name \"%s\"",
+ dbg_gen("will remove volume %d, name \"%s\"",
re1->desc->vol->vol_id, re1->desc->vol->name);
}
@@ -942,7 +943,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
{
struct ubi_rnvol_req *req;
- dbg_msg("re-name volumes");
+ dbg_gen("re-name volumes");
req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
if (!req) {
err = -ENOMEM;
@@ -1010,7 +1011,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
* 'ubi_attach_mtd_dev()'.
*/
mutex_lock(&ubi_devices_mutex);
- err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
+ req.max_beb_per1024);
mutex_unlock(&ubi_devices_mutex);
if (err < 0)
put_mtd_device(mtd);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 7c1380305219..26908a59506b 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -43,8 +43,8 @@ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
return;
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
goto out;
}
@@ -62,21 +62,15 @@ out:
*/
void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
{
- printk(KERN_DEBUG "Erase counter header dump:\n");
- printk(KERN_DEBUG "\tmagic %#08x\n",
- be32_to_cpu(ec_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version);
- printk(KERN_DEBUG "\tec %llu\n",
- (long long)be64_to_cpu(ec_hdr->ec));
- printk(KERN_DEBUG "\tvid_hdr_offset %d\n",
- be32_to_cpu(ec_hdr->vid_hdr_offset));
- printk(KERN_DEBUG "\tdata_offset %d\n",
- be32_to_cpu(ec_hdr->data_offset));
- printk(KERN_DEBUG "\timage_seq %d\n",
- be32_to_cpu(ec_hdr->image_seq));
- printk(KERN_DEBUG "\thdr_crc %#08x\n",
- be32_to_cpu(ec_hdr->hdr_crc));
- printk(KERN_DEBUG "erase counter header hexdump:\n");
+ pr_err("Erase counter header dump:\n");
+ pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
+ pr_err("\tversion %d\n", (int)ec_hdr->version);
+ pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
+ pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
+ pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
+ pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
+ pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
+ pr_err("erase counter header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ec_hdr, UBI_EC_HDR_SIZE, 1);
}
@@ -87,21 +81,21 @@ void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
*/
void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
{
- printk(KERN_DEBUG "Volume identifier header dump:\n");
- printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
- printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version);
- printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type);
- printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
- printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat);
- printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
- printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
- printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
- printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
- printk(KERN_DEBUG "\tsqnum %llu\n",
+ pr_err("Volume identifier header dump:\n");
+ pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
+ pr_err("\tversion %d\n", (int)vid_hdr->version);
+ pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
+ pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
+ pr_err("\tcompat %d\n", (int)vid_hdr->compat);
+ pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
+ pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
+ pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
+ pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
+ pr_err("\tsqnum %llu\n",
(unsigned long long)be64_to_cpu(vid_hdr->sqnum));
- printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
- printk(KERN_DEBUG "Volume identifier header hexdump:\n");
+ pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
+ pr_err("Volume identifier header hexdump:\n");
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
vid_hdr, UBI_VID_HDR_SIZE, 1);
}
@@ -112,25 +106,25 @@ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
*/
void ubi_dump_vol_info(const struct ubi_volume *vol)
{
- printk(KERN_DEBUG "Volume information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id);
- printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs);
- printk(KERN_DEBUG "\talignment %d\n", vol->alignment);
- printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad);
- printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", vol->name_len);
- printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size);
- printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs);
- printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes);
- printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes);
- printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted);
- printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker);
+ pr_err("Volume information dump:\n");
+ pr_err("\tvol_id %d\n", vol->vol_id);
+ pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
+ pr_err("\talignment %d\n", vol->alignment);
+ pr_err("\tdata_pad %d\n", vol->data_pad);
+ pr_err("\tvol_type %d\n", vol->vol_type);
+ pr_err("\tname_len %d\n", vol->name_len);
+ pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
+ pr_err("\tused_ebs %d\n", vol->used_ebs);
+ pr_err("\tused_bytes %lld\n", vol->used_bytes);
+ pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
+ pr_err("\tcorrupted %d\n", vol->corrupted);
+ pr_err("\tupd_marker %d\n", vol->upd_marker);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
- printk(KERN_DEBUG "\tname %s\n", vol->name);
+ pr_err("\tname %s\n", vol->name);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
vol->name[0], vol->name[1], vol->name[2],
vol->name[3], vol->name[4]);
}
@@ -145,29 +139,28 @@ void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
{
int name_len = be16_to_cpu(r->name_len);
- printk(KERN_DEBUG "Volume table record %d dump:\n", idx);
- printk(KERN_DEBUG "\treserved_pebs %d\n",
- be32_to_cpu(r->reserved_pebs));
- printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment));
- printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad));
- printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type);
- printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker);
- printk(KERN_DEBUG "\tname_len %d\n", name_len);
+ pr_err("Volume table record %d dump:\n", idx);
+ pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
+ pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
+ pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
+ pr_err("\tvol_type %d\n", (int)r->vol_type);
+ pr_err("\tupd_marker %d\n", (int)r->upd_marker);
+ pr_err("\tname_len %d\n", name_len);
if (r->name[0] == '\0') {
- printk(KERN_DEBUG "\tname NULL\n");
+ pr_err("\tname NULL\n");
return;
}
if (name_len <= UBI_VOL_NAME_MAX &&
strnlen(&r->name[0], name_len + 1) == name_len) {
- printk(KERN_DEBUG "\tname %s\n", &r->name[0]);
+ pr_err("\tname %s\n", &r->name[0]);
} else {
- printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n",
+ pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
r->name[0], r->name[1], r->name[2], r->name[3],
r->name[4]);
}
- printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc));
+ pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
}
/**
@@ -176,15 +169,15 @@ void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
*/
void ubi_dump_av(const struct ubi_ainf_volume *av)
{
- printk(KERN_DEBUG "Volume attaching information dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", av->vol_id);
- printk(KERN_DEBUG "\thighest_lnum %d\n", av->highest_lnum);
- printk(KERN_DEBUG "\tleb_count %d\n", av->leb_count);
- printk(KERN_DEBUG "\tcompat %d\n", av->compat);
- printk(KERN_DEBUG "\tvol_type %d\n", av->vol_type);
- printk(KERN_DEBUG "\tused_ebs %d\n", av->used_ebs);
- printk(KERN_DEBUG "\tlast_data_size %d\n", av->last_data_size);
- printk(KERN_DEBUG "\tdata_pad %d\n", av->data_pad);
+ pr_err("Volume attaching information dump:\n");
+ pr_err("\tvol_id %d\n", av->vol_id);
+ pr_err("\thighest_lnum %d\n", av->highest_lnum);
+ pr_err("\tleb_count %d\n", av->leb_count);
+ pr_err("\tcompat %d\n", av->compat);
+ pr_err("\tvol_type %d\n", av->vol_type);
+ pr_err("\tused_ebs %d\n", av->used_ebs);
+ pr_err("\tlast_data_size %d\n", av->last_data_size);
+ pr_err("\tdata_pad %d\n", av->data_pad);
}
/**
@@ -194,13 +187,13 @@ void ubi_dump_av(const struct ubi_ainf_volume *av)
*/
void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
{
- printk(KERN_DEBUG "eraseblock attaching information dump:\n");
- printk(KERN_DEBUG "\tec %d\n", aeb->ec);
- printk(KERN_DEBUG "\tpnum %d\n", aeb->pnum);
+ pr_err("eraseblock attaching information dump:\n");
+ pr_err("\tec %d\n", aeb->ec);
+ pr_err("\tpnum %d\n", aeb->pnum);
if (type == 0) {
- printk(KERN_DEBUG "\tlnum %d\n", aeb->lnum);
- printk(KERN_DEBUG "\tscrub %d\n", aeb->scrub);
- printk(KERN_DEBUG "\tsqnum %llu\n", aeb->sqnum);
+ pr_err("\tlnum %d\n", aeb->lnum);
+ pr_err("\tscrub %d\n", aeb->scrub);
+ pr_err("\tsqnum %llu\n", aeb->sqnum);
}
}
@@ -212,16 +205,16 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
{
char nm[17];
- printk(KERN_DEBUG "Volume creation request dump:\n");
- printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id);
- printk(KERN_DEBUG "\talignment %d\n", req->alignment);
- printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes);
- printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type);
- printk(KERN_DEBUG "\tname_len %d\n", req->name_len);
+ pr_err("Volume creation request dump:\n");
+ pr_err("\tvol_id %d\n", req->vol_id);
+ pr_err("\talignment %d\n", req->alignment);
+ pr_err("\tbytes %lld\n", (long long)req->bytes);
+ pr_err("\tvol_type %d\n", req->vol_type);
+ pr_err("\tname_len %d\n", req->name_len);
memcpy(nm, req->name, 16);
nm[16] = 0;
- printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm);
+ pr_err("\t1st 16 characters of name: %s\n", nm);
}
/**
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index d5d2645b51a7..3dbc877d9663 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -29,22 +29,18 @@ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
#define ubi_assert(expr) do { \
if (unlikely(!(expr))) { \
- printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
+ pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \
dump_stack(); \
} \
} while (0)
-#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
+#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
print_hex_dump(l, ps, pt, r, g, b, len, a)
#define ubi_dbg_msg(type, fmt, ...) \
- pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
-
-/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
- current->pid, __func__, ##__VA_ARGS__)
+ pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
+ ##__VA_ARGS__)
/* General debugging messages */
#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index b703ac7729cf..a26d7d253174 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -420,9 +420,8 @@ retry:
*/
if (err == UBI_IO_BAD_HDR_EBADMSG ||
err == UBI_IO_BAD_HDR) {
- ubi_warn("corrupted VID header at PEB "
- "%d, LEB %d:%d", pnum, vol_id,
- lnum);
+ ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
+ pnum, vol_id, lnum);
err = -EBADMSG;
} else
ubi_ro_mode(ubi);
@@ -660,9 +659,8 @@ retry:
if (len) {
err = ubi_io_write_data(ubi, buf, pnum, offset, len);
if (err) {
- ubi_warn("failed to write %d bytes at offset %d of "
- "LEB %d:%d, PEB %d", len, offset, vol_id,
- lnum, pnum);
+ ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
goto write_error;
}
}
@@ -1040,9 +1038,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* cancel it.
*/
if (vol->eba_tbl[lnum] != from) {
- dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from,
- vol->eba_tbl[lnum]);
+ dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
+ vol_id, lnum, from, vol->eba_tbl[lnum]);
err = MOVE_CANCEL_RACE;
goto out_unlock_leb;
}
@@ -1107,8 +1104,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading VID header back from "
- "PEB %d", err, to);
+ ubi_warn("error %d while reading VID header back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
@@ -1134,8 +1131,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
if (err) {
if (err != UBI_IO_BITFLIPS) {
- ubi_warn("error %d while reading data back "
- "from PEB %d", err, to);
+ ubi_warn("error %d while reading data back from PEB %d",
+ err, to);
if (is_error_sane(err))
err = MOVE_TARGET_RD_ERR;
} else
@@ -1146,8 +1143,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
- ubi_warn("read data back from PEB %d and it is "
- "different", to);
+ ubi_warn("read data back from PEB %d and it is different",
+ to);
err = -EINVAL;
goto out_unlock_buf;
}
@@ -1197,11 +1194,11 @@ static void print_rsvd_warning(struct ubi_device *ubi,
return;
}
- ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d,"
- " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
+ ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
+ ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
if (ubi->corr_peb_count)
ubi_warn("%d PEBs are corrupted and not used",
- ubi->corr_peb_count);
+ ubi->corr_peb_count);
}
/**
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 4e44bee4c564..4bd4db8c84c9 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -41,7 +41,7 @@
#include "ubi-media.h"
#define err_msg(fmt, ...) \
- printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \
+ pr_err("gluebi (pid %d): %s: " fmt "\n", \
current->pid, __func__, ##__VA_ARGS__)
/**
@@ -341,9 +341,8 @@ static int gluebi_create(struct ubi_device_info *di,
mutex_lock(&devices_mutex);
g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (g)
- err_msg("gluebi MTD device %d form UBI device %d volume %d "
- "already exists", g->mtd.index, vi->ubi_num,
- vi->vol_id);
+ err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
+ g->mtd.index, vi->ubi_num, vi->vol_id);
mutex_unlock(&devices_mutex);
if (mtd_device_register(mtd, NULL, 0)) {
@@ -376,8 +375,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mutex_lock(&devices_mutex);
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
- err_msg("got remove notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got remove notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
err = -ENOENT;
} else if (gluebi->refcnt)
err = -EBUSY;
@@ -390,9 +389,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
mtd = &gluebi->mtd;
err = mtd_device_unregister(mtd);
if (err) {
- err_msg("cannot remove fake MTD device %d, UBI device %d, "
- "volume %d, error %d", mtd->index, gluebi->ubi_num,
- gluebi->vol_id, err);
+ err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
+ mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
mutex_lock(&devices_mutex);
list_add_tail(&gluebi->list, &gluebi_devices);
mutex_unlock(&devices_mutex);
@@ -422,8 +420,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
@@ -449,8 +447,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
if (!gluebi) {
mutex_unlock(&devices_mutex);
- err_msg("got update notification for unknown UBI device %d "
- "volume %d", vi->ubi_num, vi->vol_id);
+ err_msg("got update notification for unknown UBI device %d volume %d",
+ vi->ubi_num, vi->vol_id);
return -ENOENT;
}
gluebi->mtd.size = vi->used_bytes;
@@ -507,9 +505,9 @@ static void __exit ubi_gluebi_exit(void)
err = mtd_device_unregister(mtd);
if (err)
- err_msg("error %d while removing gluebi MTD device %d, "
- "UBI device %d, volume %d - ignoring", err,
- mtd->index, gluebi->ubi_num, gluebi->vol_id);
+ err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
+ err, mtd->index, gluebi->ubi_num,
+ gluebi->vol_id);
kfree(mtd->name);
kfree(gluebi);
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index a8d523794b52..78a1dcbf2107 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -177,21 +177,20 @@ retry:
* enabled. A corresponding message will be printed
* later, when it is has been scrubbed.
*/
- dbg_msg("fixable bit-flip detected at PEB %d", pnum);
+ ubi_msg("fixable bit-flip detected at PEB %d", pnum);
ubi_assert(len == read);
return UBI_IO_BITFLIPS;
}
if (retries++ < UBI_IO_RETRIES) {
- ubi_warn("error %d%s while reading %d bytes from PEB "
- "%d:%d, read only %zd bytes, retry",
+ ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
err, errstr, len, pnum, offset, read);
yield();
goto retry;
}
- ubi_err("error %d%s while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, errstr, len, pnum, offset, read);
+ ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, errstr, len, pnum, offset, read);
dump_stack();
/*
@@ -274,8 +273,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
}
if (ubi_dbg_is_write_failure(ubi)) {
- ubi_err("cannot write %d bytes to PEB %d:%d "
- "(emulated)", len, pnum, offset);
+ ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
+ len, pnum, offset);
dump_stack();
return -EIO;
}
@@ -283,8 +282,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
- ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
- "%zd bytes", err, len, pnum, offset, written);
+ ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
+ err, len, pnum, offset, written);
dump_stack();
ubi_dump_flash(ubi, pnum, offset, len);
} else
@@ -685,8 +684,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
leb_start = be32_to_cpu(ec_hdr->data_offset);
if (ec_hdr->version != UBI_VERSION) {
- ubi_err("node with incompatible UBI version found: "
- "this UBI version is %d, image version is %d",
+ ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
UBI_VERSION, (int)ec_hdr->version);
goto bad;
}
@@ -777,10 +775,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
/* The physical eraseblock is supposedly empty */
if (verbose)
- ubi_warn("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no EC header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -792,12 +790,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
* 0xFF bytes. Report that the header is corrupted.
*/
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -806,12 +804,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
+ ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
ubi_dump_ec_hdr(ec_hdr);
}
- dbg_bld("bad EC header CRC at PEB %d, calculated "
- "%#08x, read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
@@ -1032,10 +1030,10 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
if (verbose)
- ubi_warn("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
- dbg_bld("no VID header found at PEB %d, "
- "only 0xFF bytes", pnum);
+ ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
+ dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
+ pnum);
if (!read_err)
return UBI_IO_FF;
else
@@ -1043,12 +1041,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
}
if (verbose) {
- ubi_warn("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad magic number at PEB %d: %08x instead of "
- "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
+ dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
+ pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_HDR;
}
@@ -1057,12 +1055,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (hdr_crc != crc) {
if (verbose) {
- ubi_warn("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
ubi_dump_vid_hdr(vid_hdr);
}
- dbg_bld("bad CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
if (!read_err)
return UBI_IO_BAD_HDR;
else
@@ -1300,8 +1298,8 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
- ubi_err("bad VID header CRC at PEB %d, calculated %#08x, "
- "read %#08x", pnum, crc, hdr_crc);
+ ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
+ pnum, crc, hdr_crc);
ubi_err("self-check failed for PEB %d", pnum);
ubi_dump_vid_hdr(vid_hdr);
dump_stack();
@@ -1411,15 +1409,15 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && !mtd_is_bitflip(err)) {
- ubi_err("error %d while reading %d bytes from PEB %d:%d, "
- "read %zd bytes", err, len, pnum, offset, read);
+ ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
+ err, len, pnum, offset, read);
goto error;
}
err = ubi_check_pattern(buf, 0xFF, len);
if (err == 0) {
- ubi_err("flash region at PEB %d:%d, length %d does not "
- "contain all 0xFF bytes", pnum, offset, len);
+ ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
+ pnum, offset, len);
goto fail;
}
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 8bbfb444b895..f913d701a5b3 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -121,10 +121,16 @@ void ubi_update_reserved(struct ubi_device *ubi)
*/
void ubi_calculate_reserved(struct ubi_device *ubi)
{
- ubi->beb_rsvd_level = ubi->good_peb_count/100;
- ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE;
- if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS)
- ubi->beb_rsvd_level = MIN_RESEVED_PEBS;
+ /*
+ * Calculate the actual number of PEBs currently needed to be reserved
+ * for future bad eraseblock handling.
+ */
+ ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
+ if (ubi->beb_rsvd_level < 0) {
+ ubi->beb_rsvd_level = 0;
+ ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
+ ubi->bad_peb_count, ubi->bad_peb_limit);
+ }
}
/**
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 84f66e3fa05d..383ee43d2425 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -51,17 +51,14 @@
#define UBI_NAME_STR "ubi"
/* Normal UBI messages */
-#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__)
+#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
/* UBI warning messages */
-#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \
- __func__, ##__VA_ARGS__)
+#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
+ __func__, ##__VA_ARGS__)
/* UBI error messages */
-#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \
+#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
__func__, ##__VA_ARGS__)
-/* Lowest number PEBs reserved for bad PEB handling */
-#define MIN_RESEVED_PEBS 2
-
/* Background thread name pattern */
#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
@@ -363,6 +360,7 @@ struct ubi_wl_entry;
* @flash_size: underlying MTD device size (in bytes)
* @peb_count: count of physical eraseblocks on the MTD device
* @peb_size: physical eraseblock size
+ * @bad_peb_limit: top limit of expected bad physical eraseblocks
* @bad_peb_count: count of bad physical eraseblocks
* @good_peb_count: count of good physical eraseblocks
* @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
@@ -410,6 +408,7 @@ struct ubi_device {
int avail_pebs;
int beb_rsvd_pebs;
int beb_rsvd_level;
+ int bad_peb_limit;
int autoresize_vol_id;
int vtbl_slots;
@@ -694,7 +693,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
/* build.c */
-int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ int vid_hdr_offset, int max_beb_per1024);
int ubi_detach_mtd_dev(int ubi_num, int anyway);
struct ubi_device *ubi_get_device(int ubi_num);
void ubi_put_device(struct ubi_device *ubi);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 568307cc7caf..926e3df14fb2 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -270,8 +270,8 @@ static int vtbl_check(const struct ubi_device *ubi,
if (len1 > 0 && len1 == len2 &&
!strncmp(vtbl[i].name, vtbl[n].name, len1)) {
- ubi_err("volumes %d and %d have the same name"
- " \"%s\"", i, n, vtbl[i].name);
+ ubi_err("volumes %d and %d have the same name \"%s\"",
+ i, n, vtbl[i].name);
ubi_dump_vtbl_record(&vtbl[i], i);
ubi_dump_vtbl_record(&vtbl[n], n);
return -EINVAL;
@@ -304,7 +304,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_vid_hdr *vid_hdr;
struct ubi_ainf_peb *new_aeb;
- ubi_msg("create volume table (copy #%d)", copy + 1);
+ dbg_gen("create volume table (copy #%d)", copy + 1);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vid_hdr)
@@ -562,8 +562,8 @@ static int init_volumes(struct ubi_device *ubi,
if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
/* Auto re-size flag may be set only for one volume */
if (ubi->autoresize_vol_id != -1) {
- ubi_err("more than one auto-resize volume (%d "
- "and %d)", ubi->autoresize_vol_id, i);
+ ubi_err("more than one auto-resize volume (%d and %d)",
+ ubi->autoresize_vol_id, i);
kfree(vol);
return -EINVAL;
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b6be644e7b85..032fc57f1090 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -978,9 +978,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
{
struct ubi_wl_entry *e = wl_wrk->e;
- int pnum = e->pnum, err, need;
+ int pnum = e->pnum;
int vol_id = wl_wrk->vol_id;
int lnum = wl_wrk->lnum;
+ int err, available_consumed = 0;
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -1045,20 +1046,14 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
spin_lock(&ubi->volumes_lock);
- need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
- if (need > 0) {
- need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
- ubi->avail_pebs -= need;
- ubi->rsvd_pebs += need;
- ubi->beb_rsvd_pebs += need;
- if (need > 0)
- ubi_msg("reserve more %d PEBs", need);
- }
-
if (ubi->beb_rsvd_pebs == 0) {
- spin_unlock(&ubi->volumes_lock);
- ubi_err("no reserved physical eraseblocks");
- goto out_ro;
+ if (ubi->avail_pebs == 0) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_err("no reserved/available physical eraseblocks");
+ goto out_ro;
+ }
+ ubi->avail_pebs -= 1;
+ available_consumed = 1;
}
spin_unlock(&ubi->volumes_lock);
@@ -1068,19 +1063,36 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
spin_lock(&ubi->volumes_lock);
- ubi->beb_rsvd_pebs -= 1;
+ if (ubi->beb_rsvd_pebs > 0) {
+ if (available_consumed) {
+ /*
+ * The amount of reserved PEBs increased since we last
+ * checked.
+ */
+ ubi->avail_pebs += 1;
+ available_consumed = 0;
+ }
+ ubi->beb_rsvd_pebs -= 1;
+ }
ubi->bad_peb_count += 1;
ubi->good_peb_count -= 1;
ubi_calculate_reserved(ubi);
- if (ubi->beb_rsvd_pebs)
+ if (available_consumed)
+ ubi_warn("no PEBs in the reserved pool, used an available PEB");
+ else if (ubi->beb_rsvd_pebs)
ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
else
- ubi_warn("last PEB from the reserved pool was used");
+ ubi_warn("last PEB from the reserve was used");
spin_unlock(&ubi->volumes_lock);
return err;
out_ro:
+ if (available_consumed) {
+ spin_lock(&ubi->volumes_lock);
+ ubi->avail_pebs += 1;
+ spin_unlock(&ubi->volumes_lock);
+ }
ubi_ro_mode(ubi);
return err;
}
@@ -1189,7 +1201,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
{
struct ubi_wl_entry *e;
- dbg_msg("schedule PEB %d for scrubbing", pnum);
+ ubi_msg("schedule PEB %d for scrubbing", pnum);
retry:
spin_lock(&ubi->wl_lock);