diff options
Diffstat (limited to 'drivers/mtd/spi-nor/controllers')
-rw-r--r-- | drivers/mtd/spi-nor/controllers/Kconfig | 75 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/Makefile | 8 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/aspeed-smc.c | 910 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/cadence-quadspi.c | 1540 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/hisi-sfc.c | 499 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/intel-spi-pci.c | 94 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/intel-spi-platform.c | 54 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/intel-spi.c | 960 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/intel-spi.h | 21 | ||||
-rw-r--r-- | drivers/mtd/spi-nor/controllers/nxp-spifi.c | 486 |
10 files changed, 4647 insertions, 0 deletions
diff --git a/drivers/mtd/spi-nor/controllers/Kconfig b/drivers/mtd/spi-nor/controllers/Kconfig new file mode 100644 index 000000000000..10b86660b821 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/Kconfig @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SPI_ASPEED_SMC + tristate "Aspeed flash controllers in SPI mode" + depends on ARCH_ASPEED || COMPILE_TEST + depends on HAS_IOMEM && OF + help + This enables support for the Firmware Memory controller (FMC) + in the Aspeed AST2500/AST2400 SoCs when attached to SPI NOR chips, + and support for the SPI flash memory controller (SPI) for + the host firmware. The implementation only supports SPI NOR. + +config SPI_CADENCE_QUADSPI + tristate "Cadence Quad SPI controller" + depends on OF && (ARM || ARM64 || COMPILE_TEST) + help + Enable support for the Cadence Quad SPI Flash controller. + + Cadence QSPI is a specialized controller for connecting an SPI + Flash over 1/2/4-bit wide bus. Enable this option if you have a + device with a Cadence QSPI controller and want to access the + Flash as an MTD device. + +config SPI_HISI_SFC + tristate "Hisilicon FMC SPI-NOR Flash Controller(SFC)" + depends on ARCH_HISI || COMPILE_TEST + depends on HAS_IOMEM + help + This enables support for HiSilicon FMC SPI-NOR flash controller. + +config SPI_NXP_SPIFI + tristate "NXP SPI Flash Interface (SPIFI)" + depends on OF && (ARCH_LPC18XX || COMPILE_TEST) + depends on HAS_IOMEM + help + Enable support for the NXP LPC SPI Flash Interface controller. + + SPIFI is a specialized controller for connecting serial SPI + Flash. Enable this option if you have a device with a SPIFI + controller and want to access the Flash as a mtd device. + +config SPI_INTEL_SPI + tristate + +config SPI_INTEL_SPI_PCI + tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)" + depends on X86 && PCI + select SPI_INTEL_SPI + help + This enables PCI support for the Intel PCH/PCU SPI controller in + master mode. This controller is present in modern Intel hardware + and is used to hold BIOS and other persistent settings. Using + this driver it is possible to upgrade BIOS directly from Linux. + + Say N here unless you know what you are doing. Overwriting the + SPI flash may render the system unbootable. + + To compile this driver as a module, choose M here: the module + will be called intel-spi-pci. + +config SPI_INTEL_SPI_PLATFORM + tristate "Intel PCH/PCU SPI flash platform driver (DANGEROUS)" + depends on X86 + select SPI_INTEL_SPI + help + This enables platform support for the Intel PCH/PCU SPI + controller in master mode. This controller is present in modern + Intel hardware and is used to hold BIOS and other persistent + settings. Using this driver it is possible to upgrade BIOS + directly from Linux. + + Say N here unless you know what you are doing. Overwriting the + SPI flash may render the system unbootable. + + To compile this driver as a module, choose M here: the module + will be called intel-spi-platform. diff --git a/drivers/mtd/spi-nor/controllers/Makefile b/drivers/mtd/spi-nor/controllers/Makefile new file mode 100644 index 000000000000..46e6fbe586e3 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o +obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o +obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o +obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o +obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o +obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o +obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o diff --git a/drivers/mtd/spi-nor/controllers/aspeed-smc.c b/drivers/mtd/spi-nor/controllers/aspeed-smc.c new file mode 100644 index 000000000000..ae85e4c0e114 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/aspeed-smc.c @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * ASPEED Static Memory Controller driver + * + * Copyright (c) 2015-2016, IBM Corporation. + */ + +#include <linux/bug.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/sizes.h> +#include <linux/sysfs.h> + +#define DEVICE_NAME "aspeed-smc" + +/* + * The driver only support SPI flash + */ +enum aspeed_smc_flash_type { + smc_type_nor = 0, + smc_type_nand = 1, + smc_type_spi = 2, +}; + +struct aspeed_smc_chip; + +struct aspeed_smc_info { + u32 maxsize; /* maximum size of chip window */ + u8 nce; /* number of chip enables */ + bool hastype; /* flash type field exists in config reg */ + u8 we0; /* shift for write enable bit for CE0 */ + u8 ctl0; /* offset in regs of ctl for CE0 */ + + void (*set_4b)(struct aspeed_smc_chip *chip); +}; + +static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip); +static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip); + +static const struct aspeed_smc_info fmc_2400_info = { + .maxsize = 64 * 1024 * 1024, + .nce = 5, + .hastype = true, + .we0 = 16, + .ctl0 = 0x10, + .set_4b = aspeed_smc_chip_set_4b, +}; + +static const struct aspeed_smc_info spi_2400_info = { + .maxsize = 64 * 1024 * 1024, + .nce = 1, + .hastype = false, + .we0 = 0, + .ctl0 = 0x04, + .set_4b = aspeed_smc_chip_set_4b_spi_2400, +}; + +static const struct aspeed_smc_info fmc_2500_info = { + .maxsize = 256 * 1024 * 1024, + .nce = 3, + .hastype = true, + .we0 = 16, + .ctl0 = 0x10, + .set_4b = aspeed_smc_chip_set_4b, +}; + +static const struct aspeed_smc_info spi_2500_info = { + .maxsize = 128 * 1024 * 1024, + .nce = 2, + .hastype = false, + .we0 = 16, + .ctl0 = 0x10, + .set_4b = aspeed_smc_chip_set_4b, +}; + +enum aspeed_smc_ctl_reg_value { + smc_base, /* base value without mode for other commands */ + smc_read, /* command reg for (maybe fast) reads */ + smc_write, /* command reg for writes */ + smc_max, +}; + +struct aspeed_smc_controller; + +struct aspeed_smc_chip { + int cs; + struct aspeed_smc_controller *controller; + void __iomem *ctl; /* control register */ + void __iomem *ahb_base; /* base of chip window */ + u32 ahb_window_size; /* chip mapping window size */ + u32 ctl_val[smc_max]; /* control settings */ + enum aspeed_smc_flash_type type; /* what type of flash */ + struct spi_nor nor; +}; + +struct aspeed_smc_controller { + struct device *dev; + + struct mutex mutex; /* controller access mutex */ + const struct aspeed_smc_info *info; /* type info of controller */ + void __iomem *regs; /* controller registers */ + void __iomem *ahb_base; /* per-chip windows resource */ + u32 ahb_window_size; /* full mapping window size */ + + struct aspeed_smc_chip *chips[]; /* pointers to attached chips */ +}; + +/* + * SPI Flash Configuration Register (AST2500 SPI) + * or + * Type setting Register (AST2500 FMC). + * CE0 and CE1 can only be of type SPI. CE2 can be of type NOR but the + * driver does not support it. + */ +#define CONFIG_REG 0x0 +#define CONFIG_DISABLE_LEGACY BIT(31) /* 1 */ + +#define CONFIG_CE2_WRITE BIT(18) +#define CONFIG_CE1_WRITE BIT(17) +#define CONFIG_CE0_WRITE BIT(16) + +#define CONFIG_CE2_TYPE BIT(4) /* AST2500 FMC only */ +#define CONFIG_CE1_TYPE BIT(2) /* AST2500 FMC only */ +#define CONFIG_CE0_TYPE BIT(0) /* AST2500 FMC only */ + +/* + * CE Control Register + */ +#define CE_CONTROL_REG 0x4 + +/* + * CEx Control Register + */ +#define CONTROL_AAF_MODE BIT(31) +#define CONTROL_IO_MODE_MASK GENMASK(30, 28) +#define CONTROL_IO_DUAL_DATA BIT(29) +#define CONTROL_IO_DUAL_ADDR_DATA (BIT(29) | BIT(28)) +#define CONTROL_IO_QUAD_DATA BIT(30) +#define CONTROL_IO_QUAD_ADDR_DATA (BIT(30) | BIT(28)) +#define CONTROL_CE_INACTIVE_SHIFT 24 +#define CONTROL_CE_INACTIVE_MASK GENMASK(27, \ + CONTROL_CE_INACTIVE_SHIFT) +/* 0 = 16T ... 15 = 1T T=HCLK */ +#define CONTROL_COMMAND_SHIFT 16 +#define CONTROL_DUMMY_COMMAND_OUT BIT(15) +#define CONTROL_IO_DUMMY_HI BIT(14) +#define CONTROL_IO_DUMMY_HI_SHIFT 14 +#define CONTROL_CLK_DIV4 BIT(13) /* others */ +#define CONTROL_IO_ADDRESS_4B BIT(13) /* AST2400 SPI */ +#define CONTROL_RW_MERGE BIT(12) +#define CONTROL_IO_DUMMY_LO_SHIFT 6 +#define CONTROL_IO_DUMMY_LO GENMASK(7, \ + CONTROL_IO_DUMMY_LO_SHIFT) +#define CONTROL_IO_DUMMY_MASK (CONTROL_IO_DUMMY_HI | \ + CONTROL_IO_DUMMY_LO) +#define CONTROL_IO_DUMMY_SET(dummy) \ + (((((dummy) >> 2) & 0x1) << CONTROL_IO_DUMMY_HI_SHIFT) | \ + (((dummy) & 0x3) << CONTROL_IO_DUMMY_LO_SHIFT)) + +#define CONTROL_CLOCK_FREQ_SEL_SHIFT 8 +#define CONTROL_CLOCK_FREQ_SEL_MASK GENMASK(11, \ + CONTROL_CLOCK_FREQ_SEL_SHIFT) +#define CONTROL_LSB_FIRST BIT(5) +#define CONTROL_CLOCK_MODE_3 BIT(4) +#define CONTROL_IN_DUAL_DATA BIT(3) +#define CONTROL_CE_STOP_ACTIVE_CONTROL BIT(2) +#define CONTROL_COMMAND_MODE_MASK GENMASK(1, 0) +#define CONTROL_COMMAND_MODE_NORMAL 0 +#define CONTROL_COMMAND_MODE_FREAD 1 +#define CONTROL_COMMAND_MODE_WRITE 2 +#define CONTROL_COMMAND_MODE_USER 3 + +#define CONTROL_KEEP_MASK \ + (CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \ + CONTROL_CLOCK_FREQ_SEL_MASK | CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3) + +/* + * The Segment Register uses a 8MB unit to encode the start address + * and the end address of the mapping window of a flash SPI slave : + * + * | byte 1 | byte 2 | byte 3 | byte 4 | + * +--------+--------+--------+--------+ + * | end | start | 0 | 0 | + */ +#define SEGMENT_ADDR_REG0 0x30 +#define SEGMENT_ADDR_START(_r) ((((_r) >> 16) & 0xFF) << 23) +#define SEGMENT_ADDR_END(_r) ((((_r) >> 24) & 0xFF) << 23) +#define SEGMENT_ADDR_VALUE(start, end) \ + (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24)) +#define SEGMENT_ADDR_REG(controller, cs) \ + ((controller)->regs + SEGMENT_ADDR_REG0 + (cs) * 4) + +/* + * In user mode all data bytes read or written to the chip decode address + * range are transferred to or from the SPI bus. The range is treated as a + * fifo of arbitratry 1, 2, or 4 byte width but each write has to be aligned + * to its size. The address within the multiple 8kB range is ignored when + * sending bytes to the SPI bus. + * + * On the arm architecture, as of Linux version 4.3, memcpy_fromio and + * memcpy_toio on little endian targets use the optimized memcpy routines + * that were designed for well behavied memory storage. These routines + * have a stutter if the source and destination are not both word aligned, + * once with a duplicate access to the source after aligning to the + * destination to a word boundary, and again with a duplicate access to + * the source when the final byte count is not word aligned. + * + * When writing or reading the fifo this stutter discards data or sends + * too much data to the fifo and can not be used by this driver. + * + * While the low level io string routines that implement the insl family do + * the desired accesses and memory increments, the cross architecture io + * macros make them essentially impossible to use on a memory mapped address + * instead of a a token from the call to iomap of an io port. + * + * These fifo routines use readl and friends to a constant io port and update + * the memory buffer pointer and count via explicit code. The final updates + * to len are optimistically suppressed. + */ +static int aspeed_smc_read_from_ahb(void *buf, void __iomem *src, size_t len) +{ + size_t offset = 0; + + if (IS_ALIGNED((uintptr_t)src, sizeof(uintptr_t)) && + IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { + ioread32_rep(src, buf, len >> 2); + offset = len & ~0x3; + len -= offset; + } + ioread8_rep(src, (u8 *)buf + offset, len); + return 0; +} + +static int aspeed_smc_write_to_ahb(void __iomem *dst, const void *buf, + size_t len) +{ + size_t offset = 0; + + if (IS_ALIGNED((uintptr_t)dst, sizeof(uintptr_t)) && + IS_ALIGNED((uintptr_t)buf, sizeof(uintptr_t))) { + iowrite32_rep(dst, buf, len >> 2); + offset = len & ~0x3; + len -= offset; + } + iowrite8_rep(dst, (const u8 *)buf + offset, len); + return 0; +} + +static inline u32 aspeed_smc_chip_write_bit(struct aspeed_smc_chip *chip) +{ + return BIT(chip->controller->info->we0 + chip->cs); +} + +static void aspeed_smc_chip_check_config(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + reg = readl(controller->regs + CONFIG_REG); + + if (reg & aspeed_smc_chip_write_bit(chip)) + return; + + dev_dbg(controller->dev, "config write is not set ! @%p: 0x%08x\n", + controller->regs + CONFIG_REG, reg); + reg |= aspeed_smc_chip_write_bit(chip); + writel(reg, controller->regs + CONFIG_REG); +} + +static void aspeed_smc_start_user(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + u32 ctl = chip->ctl_val[smc_base]; + + /* + * When the chip is controlled in user mode, we need write + * access to send the opcodes to it. So check the config. + */ + aspeed_smc_chip_check_config(chip); + + ctl |= CONTROL_COMMAND_MODE_USER | + CONTROL_CE_STOP_ACTIVE_CONTROL; + writel(ctl, chip->ctl); + + ctl &= ~CONTROL_CE_STOP_ACTIVE_CONTROL; + writel(ctl, chip->ctl); +} + +static void aspeed_smc_stop_user(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + + u32 ctl = chip->ctl_val[smc_read]; + u32 ctl2 = ctl | CONTROL_COMMAND_MODE_USER | + CONTROL_CE_STOP_ACTIVE_CONTROL; + + writel(ctl2, chip->ctl); /* stop user CE control */ + writel(ctl, chip->ctl); /* default to fread or read mode */ +} + +static int aspeed_smc_prep(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + + mutex_lock(&chip->controller->mutex); + return 0; +} + +static void aspeed_smc_unprep(struct spi_nor *nor) +{ + struct aspeed_smc_chip *chip = nor->priv; + + mutex_unlock(&chip->controller->mutex); +} + +static int aspeed_smc_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct aspeed_smc_chip *chip = nor->priv; + + aspeed_smc_start_user(nor); + aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1); + aspeed_smc_read_from_ahb(buf, chip->ahb_base, len); + aspeed_smc_stop_user(nor); + return 0; +} + +static int aspeed_smc_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + struct aspeed_smc_chip *chip = nor->priv; + + aspeed_smc_start_user(nor); + aspeed_smc_write_to_ahb(chip->ahb_base, &opcode, 1); + aspeed_smc_write_to_ahb(chip->ahb_base, buf, len); + aspeed_smc_stop_user(nor); + return 0; +} + +static void aspeed_smc_send_cmd_addr(struct spi_nor *nor, u8 cmd, u32 addr) +{ + struct aspeed_smc_chip *chip = nor->priv; + __be32 temp; + u32 cmdaddr; + + switch (nor->addr_width) { + default: + WARN_ONCE(1, "Unexpected address width %u, defaulting to 3\n", + nor->addr_width); + fallthrough; + case 3: + cmdaddr = addr & 0xFFFFFF; + cmdaddr |= cmd << 24; + + temp = cpu_to_be32(cmdaddr); + aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4); + break; + case 4: + temp = cpu_to_be32(addr); + aspeed_smc_write_to_ahb(chip->ahb_base, &cmd, 1); + aspeed_smc_write_to_ahb(chip->ahb_base, &temp, 4); + break; + } +} + +static ssize_t aspeed_smc_read_user(struct spi_nor *nor, loff_t from, + size_t len, u_char *read_buf) +{ + struct aspeed_smc_chip *chip = nor->priv; + int i; + u8 dummy = 0xFF; + + aspeed_smc_start_user(nor); + aspeed_smc_send_cmd_addr(nor, nor->read_opcode, from); + for (i = 0; i < chip->nor.read_dummy / 8; i++) + aspeed_smc_write_to_ahb(chip->ahb_base, &dummy, sizeof(dummy)); + + aspeed_smc_read_from_ahb(read_buf, chip->ahb_base, len); + aspeed_smc_stop_user(nor); + return len; +} + +static ssize_t aspeed_smc_write_user(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf) +{ + struct aspeed_smc_chip *chip = nor->priv; + + aspeed_smc_start_user(nor); + aspeed_smc_send_cmd_addr(nor, nor->program_opcode, to); + aspeed_smc_write_to_ahb(chip->ahb_base, write_buf, len); + aspeed_smc_stop_user(nor); + return len; +} + +static int aspeed_smc_unregister(struct aspeed_smc_controller *controller) +{ + struct aspeed_smc_chip *chip; + int n; + + for (n = 0; n < controller->info->nce; n++) { + chip = controller->chips[n]; + if (chip) + mtd_device_unregister(&chip->nor.mtd); + } + + return 0; +} + +static int aspeed_smc_remove(struct platform_device *dev) +{ + return aspeed_smc_unregister(platform_get_drvdata(dev)); +} + +static const struct of_device_id aspeed_smc_matches[] = { + { .compatible = "aspeed,ast2400-fmc", .data = &fmc_2400_info }, + { .compatible = "aspeed,ast2400-spi", .data = &spi_2400_info }, + { .compatible = "aspeed,ast2500-fmc", .data = &fmc_2500_info }, + { .compatible = "aspeed,ast2500-spi", .data = &spi_2500_info }, + { } +}; +MODULE_DEVICE_TABLE(of, aspeed_smc_matches); + +/* + * Each chip has a mapping window defined by a segment address + * register defining a start and an end address on the AHB bus. These + * addresses can be configured to fit the chip size and offer a + * contiguous memory region across chips. For the moment, we only + * check that each chip segment is valid. + */ +static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip, + struct resource *res) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 offset = 0; + u32 reg; + + if (controller->info->nce > 1) { + reg = readl(SEGMENT_ADDR_REG(controller, chip->cs)); + + if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg)) + return NULL; + + offset = SEGMENT_ADDR_START(reg) - res->start; + } + + return controller->ahb_base + offset; +} + +static u32 aspeed_smc_ahb_base_phy(struct aspeed_smc_controller *controller) +{ + u32 seg0_val = readl(SEGMENT_ADDR_REG(controller, 0)); + + return SEGMENT_ADDR_START(seg0_val); +} + +static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start, + u32 size) +{ + struct aspeed_smc_controller *controller = chip->controller; + void __iomem *seg_reg; + u32 seg_oldval, seg_newval, ahb_base_phy, end; + + ahb_base_phy = aspeed_smc_ahb_base_phy(controller); + + seg_reg = SEGMENT_ADDR_REG(controller, cs); + seg_oldval = readl(seg_reg); + + /* + * If the chip size is not specified, use the default segment + * size, but take into account the possible overlap with the + * previous segment + */ + if (!size) + size = SEGMENT_ADDR_END(seg_oldval) - start; + + /* + * The segment cannot exceed the maximum window size of the + * controller. + */ + if (start + size > ahb_base_phy + controller->ahb_window_size) { + size = ahb_base_phy + controller->ahb_window_size - start; + dev_warn(chip->nor.dev, "CE%d window resized to %dMB", + cs, size >> 20); + } + + end = start + size; + seg_newval = SEGMENT_ADDR_VALUE(start, end); + writel(seg_newval, seg_reg); + + /* + * Restore default value if something goes wrong. The chip + * might have set some bogus value and we would loose access + * to the chip. + */ + if (seg_newval != readl(seg_reg)) { + dev_err(chip->nor.dev, "CE%d window invalid", cs); + writel(seg_oldval, seg_reg); + start = SEGMENT_ADDR_START(seg_oldval); + end = SEGMENT_ADDR_END(seg_oldval); + size = end - start; + } + + dev_info(chip->nor.dev, "CE%d window [ 0x%.8x - 0x%.8x ] %dMB", + cs, start, end, size >> 20); + + return size; +} + +/* + * The segment register defines the mapping window on the AHB bus and + * it needs to be configured depending on the chip size. The segment + * register of the following CE also needs to be tuned in order to + * provide a contiguous window across multiple chips. + * + * This is expected to be called in increasing CE order + */ +static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 ahb_base_phy, start; + u32 size = chip->nor.mtd.size; + + /* + * Each controller has a chip size limit for direct memory + * access + */ + if (size > controller->info->maxsize) + size = controller->info->maxsize; + + /* + * The AST2400 SPI controller only handles one chip and does + * not have segment registers. Let's use the chip size for the + * AHB window. + */ + if (controller->info == &spi_2400_info) + goto out; + + /* + * The AST2500 SPI controller has a HW bug when the CE0 chip + * size reaches 128MB. Enforce a size limit of 120MB to + * prevent the controller from using bogus settings in the + * segment register. + */ + if (chip->cs == 0 && controller->info == &spi_2500_info && + size == SZ_128M) { + size = 120 << 20; + dev_info(chip->nor.dev, + "CE%d window resized to %dMB (AST2500 HW quirk)", + chip->cs, size >> 20); + } + + ahb_base_phy = aspeed_smc_ahb_base_phy(controller); + + /* + * As a start address for the current segment, use the default + * start address if we are handling CE0 or use the previous + * segment ending address + */ + if (chip->cs) { + u32 prev = readl(SEGMENT_ADDR_REG(controller, chip->cs - 1)); + + start = SEGMENT_ADDR_END(prev); + } else { + start = ahb_base_phy; + } + + size = chip_set_segment(chip, chip->cs, start, size); + + /* Update chip base address on the AHB bus */ + chip->ahb_base = controller->ahb_base + (start - ahb_base_phy); + + /* + * Now, make sure the next segment does not overlap with the + * current one we just configured, even if there is no + * available chip. That could break access in Command Mode. + */ + if (chip->cs < controller->info->nce - 1) + chip_set_segment(chip, chip->cs + 1, start + size, 0); + +out: + if (size < chip->nor.mtd.size) + dev_warn(chip->nor.dev, + "CE%d window too small for chip %dMB", + chip->cs, (u32)chip->nor.mtd.size >> 20); + + return size; +} + +static void aspeed_smc_chip_enable_write(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + reg = readl(controller->regs + CONFIG_REG); + + reg |= aspeed_smc_chip_write_bit(chip); + writel(reg, controller->regs + CONFIG_REG); +} + +static void aspeed_smc_chip_set_type(struct aspeed_smc_chip *chip, int type) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + chip->type = type; + + reg = readl(controller->regs + CONFIG_REG); + reg &= ~(3 << (chip->cs * 2)); + reg |= chip->type << (chip->cs * 2); + writel(reg, controller->regs + CONFIG_REG); +} + +/* + * The first chip of the AST2500 FMC flash controller is strapped by + * hardware, or autodetected, but other chips need to be set. Enforce + * the 4B setting for all chips. + */ +static void aspeed_smc_chip_set_4b(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 reg; + + reg = readl(controller->regs + CE_CONTROL_REG); + reg |= 1 << chip->cs; + writel(reg, controller->regs + CE_CONTROL_REG); +} + +/* + * The AST2400 SPI flash controller does not have a CE Control + * register. It uses the CE0 control register to set 4Byte mode at the + * controller level. + */ +static void aspeed_smc_chip_set_4b_spi_2400(struct aspeed_smc_chip *chip) +{ + chip->ctl_val[smc_base] |= CONTROL_IO_ADDRESS_4B; + chip->ctl_val[smc_read] |= CONTROL_IO_ADDRESS_4B; +} + +static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip, + struct resource *res) +{ + struct aspeed_smc_controller *controller = chip->controller; + const struct aspeed_smc_info *info = controller->info; + u32 reg, base_reg; + + /* + * Always turn on the write enable bit to allow opcodes to be + * sent in user mode. + */ + aspeed_smc_chip_enable_write(chip); + + /* The driver only supports SPI type flash */ + if (info->hastype) + aspeed_smc_chip_set_type(chip, smc_type_spi); + + /* + * Configure chip base address in memory + */ + chip->ahb_base = aspeed_smc_chip_base(chip, res); + if (!chip->ahb_base) { + dev_warn(chip->nor.dev, "CE%d window closed", chip->cs); + return -EINVAL; + } + + /* + * Get value of the inherited control register. U-Boot usually + * does some timing calibration on the FMC chip, so it's good + * to keep them. In the future, we should handle calibration + * from Linux. + */ + reg = readl(chip->ctl); + dev_dbg(controller->dev, "control register: %08x\n", reg); + + base_reg = reg & CONTROL_KEEP_MASK; + if (base_reg != reg) { + dev_dbg(controller->dev, + "control register changed to: %08x\n", + base_reg); + } + chip->ctl_val[smc_base] = base_reg; + + /* + * Retain the prior value of the control register as the + * default if it was normal access mode. Otherwise start with + * the sanitized base value set to read mode. + */ + if ((reg & CONTROL_COMMAND_MODE_MASK) == + CONTROL_COMMAND_MODE_NORMAL) + chip->ctl_val[smc_read] = reg; + else + chip->ctl_val[smc_read] = chip->ctl_val[smc_base] | + CONTROL_COMMAND_MODE_NORMAL; + + dev_dbg(controller->dev, "default control register: %08x\n", + chip->ctl_val[smc_read]); + return 0; +} + +static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + const struct aspeed_smc_info *info = controller->info; + u32 cmd; + + if (chip->nor.addr_width == 4 && info->set_4b) + info->set_4b(chip); + + /* This is for direct AHB access when using Command Mode. */ + chip->ahb_window_size = aspeed_smc_chip_set_segment(chip); + + /* + * base mode has not been optimized yet. use it for writes. + */ + chip->ctl_val[smc_write] = chip->ctl_val[smc_base] | + chip->nor.program_opcode << CONTROL_COMMAND_SHIFT | + CONTROL_COMMAND_MODE_WRITE; + + dev_dbg(controller->dev, "write control register: %08x\n", + chip->ctl_val[smc_write]); + + /* + * TODO: Adjust clocks if fast read is supported and interpret + * SPI-NOR flags to adjust controller settings. + */ + if (chip->nor.read_proto == SNOR_PROTO_1_1_1) { + if (chip->nor.read_dummy == 0) + cmd = CONTROL_COMMAND_MODE_NORMAL; + else + cmd = CONTROL_COMMAND_MODE_FREAD; + } else { + dev_err(chip->nor.dev, "unsupported SPI read mode\n"); + return -EINVAL; + } + + chip->ctl_val[smc_read] |= cmd | + CONTROL_IO_DUMMY_SET(chip->nor.read_dummy / 8); + + dev_dbg(controller->dev, "base control register: %08x\n", + chip->ctl_val[smc_read]); + return 0; +} + +static const struct spi_nor_controller_ops aspeed_smc_controller_ops = { + .prepare = aspeed_smc_prep, + .unprepare = aspeed_smc_unprep, + .read_reg = aspeed_smc_read_reg, + .write_reg = aspeed_smc_write_reg, + .read = aspeed_smc_read_user, + .write = aspeed_smc_write_user, +}; + +static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller, + struct device_node *np, struct resource *r) +{ + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + const struct aspeed_smc_info *info = controller->info; + struct device *dev = controller->dev; + struct device_node *child; + unsigned int cs; + int ret = -ENODEV; + + for_each_available_child_of_node(np, child) { + struct aspeed_smc_chip *chip; + struct spi_nor *nor; + struct mtd_info *mtd; + + /* This driver does not support NAND or NOR flash devices. */ + if (!of_device_is_compatible(child, "jedec,spi-nor")) + continue; + + ret = of_property_read_u32(child, "reg", &cs); + if (ret) { + dev_err(dev, "Couldn't not read chip select.\n"); + break; + } + + if (cs >= info->nce) { + dev_err(dev, "Chip select %d out of range.\n", + cs); + ret = -ERANGE; + break; + } + + if (controller->chips[cs]) { + dev_err(dev, "Chip select %d already in use by %s\n", + cs, dev_name(controller->chips[cs]->nor.dev)); + ret = -EBUSY; + break; + } + + chip = devm_kzalloc(controller->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) { + ret = -ENOMEM; + break; + } + + chip->controller = controller; + chip->ctl = controller->regs + info->ctl0 + cs * 4; + chip->cs = cs; + + nor = &chip->nor; + mtd = &nor->mtd; + + nor->dev = dev; + nor->priv = chip; + spi_nor_set_flash_node(nor, child); + nor->controller_ops = &aspeed_smc_controller_ops; + + ret = aspeed_smc_chip_setup_init(chip, r); + if (ret) + break; + + /* + * TODO: Add support for Dual and Quad SPI protocols + * attach when board support is present as determined + * by of property. + */ + ret = spi_nor_scan(nor, NULL, &hwcaps); + if (ret) + break; + + ret = aspeed_smc_chip_setup_finish(chip); + if (ret) + break; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + break; + + controller->chips[cs] = chip; + } + + if (ret) { + of_node_put(child); + aspeed_smc_unregister(controller); + } + + return ret; +} + +static int aspeed_smc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct aspeed_smc_controller *controller; + const struct of_device_id *match; + const struct aspeed_smc_info *info; + struct resource *res; + int ret; + + match = of_match_device(aspeed_smc_matches, &pdev->dev); + if (!match || !match->data) + return -ENODEV; + info = match->data; + + controller = devm_kzalloc(&pdev->dev, + struct_size(controller, chips, info->nce), + GFP_KERNEL); + if (!controller) + return -ENOMEM; + controller->info = info; + controller->dev = dev; + + mutex_init(&controller->mutex); + platform_set_drvdata(pdev, controller); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + controller->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(controller->regs)) + return PTR_ERR(controller->regs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + controller->ahb_base = devm_ioremap_resource(dev, res); + if (IS_ERR(controller->ahb_base)) + return PTR_ERR(controller->ahb_base); + + controller->ahb_window_size = resource_size(res); + + ret = aspeed_smc_setup_flash(controller, np, res); + if (ret) + dev_err(dev, "Aspeed SMC probe failed %d\n", ret); + + return ret; +} + +static struct platform_driver aspeed_smc_driver = { + .probe = aspeed_smc_probe, + .remove = aspeed_smc_remove, + .driver = { + .name = DEVICE_NAME, + .of_match_table = aspeed_smc_matches, + } +}; + +module_platform_driver(aspeed_smc_driver); + +MODULE_DESCRIPTION("ASPEED Static Memory Controller Driver"); +MODULE_AUTHOR("Cedric Le Goater <clg@kaod.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/cadence-quadspi.c b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c new file mode 100644 index 000000000000..494dcab4aaaa --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/cadence-quadspi.c @@ -0,0 +1,1540 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Cadence QSPI Controller + * + * Copyright Altera Corporation (C) 2012-2014. All rights reserved. + */ +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/sched.h> +#include <linux/spi/spi.h> +#include <linux/timer.h> + +#define CQSPI_NAME "cadence-qspi" +#define CQSPI_MAX_CHIPSELECT 16 + +/* Quirks */ +#define CQSPI_NEEDS_WR_DELAY BIT(0) + +/* Capabilities mask */ +#define CQSPI_BASE_HWCAPS_MASK \ + (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \ + SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \ + SNOR_HWCAPS_PP) + +struct cqspi_st; + +struct cqspi_flash_pdata { + struct spi_nor nor; + struct cqspi_st *cqspi; + u32 clk_rate; + u32 read_delay; + u32 tshsl_ns; + u32 tsd2d_ns; + u32 tchsh_ns; + u32 tslch_ns; + u8 inst_width; + u8 addr_width; + u8 data_width; + u8 cs; + bool registered; + bool use_direct_mode; +}; + +struct cqspi_st { + struct platform_device *pdev; + + struct clk *clk; + unsigned int sclk; + + void __iomem *iobase; + void __iomem *ahb_base; + resource_size_t ahb_size; + struct completion transfer_complete; + struct mutex bus_mutex; + + struct dma_chan *rx_chan; + struct completion rx_dma_complete; + dma_addr_t mmap_phys_base; + + int current_cs; + int current_page_size; + int current_erase_size; + int current_addr_width; + unsigned long master_ref_clk_hz; + bool is_decoded_cs; + u32 fifo_depth; + u32 fifo_width; + bool rclk_en; + u32 trigger_address; + u32 wr_delay; + struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; +}; + +struct cqspi_driver_platdata { + u32 hwcaps_mask; + u8 quirks; +}; + +/* Operation timeout value */ +#define CQSPI_TIMEOUT_MS 500 +#define CQSPI_READ_TIMEOUT_MS 10 + +/* Instruction type */ +#define CQSPI_INST_TYPE_SINGLE 0 +#define CQSPI_INST_TYPE_DUAL 1 +#define CQSPI_INST_TYPE_QUAD 2 +#define CQSPI_INST_TYPE_OCTAL 3 + +#define CQSPI_DUMMY_CLKS_PER_BYTE 8 +#define CQSPI_DUMMY_BYTES_MAX 4 +#define CQSPI_DUMMY_CLKS_MAX 31 + +#define CQSPI_STIG_DATA_LEN_MAX 8 + +/* Register map */ +#define CQSPI_REG_CONFIG 0x00 +#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) +#define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7) +#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) +#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 +#define CQSPI_REG_CONFIG_DMA_MASK BIT(15) +#define CQSPI_REG_CONFIG_BAUD_LSB 19 +#define CQSPI_REG_CONFIG_IDLE_LSB 31 +#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF +#define CQSPI_REG_CONFIG_BAUD_MASK 0xF + +#define CQSPI_REG_RD_INSTR 0x04 +#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 +#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 +#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 +#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 +#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 +#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 +#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 +#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 +#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 +#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F + +#define CQSPI_REG_WR_INSTR 0x08 +#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 +#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 +#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 + +#define CQSPI_REG_DELAY 0x0C +#define CQSPI_REG_DELAY_TSLCH_LSB 0 +#define CQSPI_REG_DELAY_TCHSH_LSB 8 +#define CQSPI_REG_DELAY_TSD2D_LSB 16 +#define CQSPI_REG_DELAY_TSHSL_LSB 24 +#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF +#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF +#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF +#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF + +#define CQSPI_REG_READCAPTURE 0x10 +#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 +#define CQSPI_REG_READCAPTURE_DELAY_LSB 1 +#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF + +#define CQSPI_REG_SIZE 0x14 +#define CQSPI_REG_SIZE_ADDRESS_LSB 0 +#define CQSPI_REG_SIZE_PAGE_LSB 4 +#define CQSPI_REG_SIZE_BLOCK_LSB 16 +#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF +#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF +#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F + +#define CQSPI_REG_SRAMPARTITION 0x18 +#define CQSPI_REG_INDIRECTTRIGGER 0x1C + +#define CQSPI_REG_DMA 0x20 +#define CQSPI_REG_DMA_SINGLE_LSB 0 +#define CQSPI_REG_DMA_BURST_LSB 8 +#define CQSPI_REG_DMA_SINGLE_MASK 0xFF +#define CQSPI_REG_DMA_BURST_MASK 0xFF + +#define CQSPI_REG_REMAP 0x24 +#define CQSPI_REG_MODE_BIT 0x28 + +#define CQSPI_REG_SDRAMLEVEL 0x2C +#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 +#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 +#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF +#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF + +#define CQSPI_REG_IRQSTATUS 0x40 +#define CQSPI_REG_IRQMASK 0x44 + +#define CQSPI_REG_INDIRECTRD 0x60 +#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) +#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) +#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) + +#define CQSPI_REG_INDIRECTRDWATERMARK 0x64 +#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 +#define CQSPI_REG_INDIRECTRDBYTES 0x6C + +#define CQSPI_REG_CMDCTRL 0x90 +#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) +#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) +#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 +#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 +#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 +#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 +#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 +#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 +#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 +#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 +#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 +#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 + +#define CQSPI_REG_INDIRECTWR 0x70 +#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) +#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) +#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) + +#define CQSPI_REG_INDIRECTWRWATERMARK 0x74 +#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 +#define CQSPI_REG_INDIRECTWRBYTES 0x7C + +#define CQSPI_REG_CMDADDRESS 0x94 +#define CQSPI_REG_CMDREADDATALOWER 0xA0 +#define CQSPI_REG_CMDREADDATAUPPER 0xA4 +#define CQSPI_REG_CMDWRITEDATALOWER 0xA8 +#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC + +/* Interrupt status bits */ +#define CQSPI_REG_IRQ_MODE_ERR BIT(0) +#define CQSPI_REG_IRQ_UNDERFLOW BIT(1) +#define CQSPI_REG_IRQ_IND_COMP BIT(2) +#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) +#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) +#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) +#define CQSPI_REG_IRQ_WATERMARK BIT(6) +#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) + +#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ + CQSPI_REG_IRQ_IND_SRAM_FULL | \ + CQSPI_REG_IRQ_IND_COMP) + +#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ + CQSPI_REG_IRQ_WATERMARK | \ + CQSPI_REG_IRQ_UNDERFLOW) + +#define CQSPI_IRQ_STATUS_MASK 0x1FFFF + +static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr) +{ + u32 val; + + return readl_relaxed_poll_timeout(reg, val, + (((clr ? ~val : val) & mask) == mask), + 10, CQSPI_TIMEOUT_MS * 1000); +} + +static bool cqspi_is_idle(struct cqspi_st *cqspi) +{ + u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); + + return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB); +} + +static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) +{ + u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); + + reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; + return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; +} + +static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) +{ + struct cqspi_st *cqspi = dev; + unsigned int irq_status; + + /* Read interrupt status */ + irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); + + /* Clear interrupt */ + writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); + + irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; + + if (irq_status) + complete(&cqspi->transfer_complete); + + return IRQ_HANDLED; +} + +static unsigned int cqspi_calc_rdreg(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + u32 rdreg = 0; + + rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; + rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; + rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; + + return rdreg; +} + +static int cqspi_wait_idle(struct cqspi_st *cqspi) +{ + const unsigned int poll_idle_retry = 3; + unsigned int count = 0; + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); + while (1) { + /* + * Read few times in succession to ensure the controller + * is indeed idle, that is, the bit does not transition + * low again. + */ + if (cqspi_is_idle(cqspi)) + count++; + else + count = 0; + + if (count >= poll_idle_retry) + return 0; + + if (time_after(jiffies, timeout)) { + /* Timeout, in busy mode. */ + dev_err(&cqspi->pdev->dev, + "QSPI is still busy after %dms timeout.\n", + CQSPI_TIMEOUT_MS); + return -ETIMEDOUT; + } + + cpu_relax(); + } +} + +static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) +{ + void __iomem *reg_base = cqspi->iobase; + int ret; + + /* Write the CMDCTRL without start execution. */ + writel(reg, reg_base + CQSPI_REG_CMDCTRL); + /* Start execute */ + reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; + writel(reg, reg_base + CQSPI_REG_CMDCTRL); + + /* Polling for completion. */ + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL, + CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1); + if (ret) { + dev_err(&cqspi->pdev->dev, + "Flash command execution timed out.\n"); + return ret; + } + + /* Polling QSPI idle status. */ + return cqspi_wait_idle(cqspi); +} + +static int cqspi_command_read(struct spi_nor *nor, u8 opcode, + u8 *rxbuf, size_t n_rx) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int rdreg; + unsigned int reg; + size_t read_len; + int status; + + if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { + dev_err(nor->dev, + "Invalid input argument, len %zu rxbuf 0x%p\n", + n_rx, rxbuf); + return -EINVAL; + } + + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; + + rdreg = cqspi_calc_rdreg(nor); + writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); + + reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); + + /* 0 means 1 byte. */ + reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) + << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); + status = cqspi_exec_flash_cmd(cqspi, reg); + if (status) + return status; + + reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); + + /* Put the read value into rx_buf */ + read_len = (n_rx > 4) ? 4 : n_rx; + memcpy(rxbuf, ®, read_len); + rxbuf += read_len; + + if (n_rx > 4) { + reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); + + read_len = n_rx - read_len; + memcpy(rxbuf, ®, read_len); + } + + return 0; +} + +static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, + const u8 *txbuf, size_t n_tx) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + unsigned int data; + size_t write_len; + int ret; + + if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { + dev_err(nor->dev, + "Invalid input argument, cmdlen %zu txbuf 0x%p\n", + n_tx, txbuf); + return -EINVAL; + } + + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; + if (n_tx) { + reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); + reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) + << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; + data = 0; + write_len = (n_tx > 4) ? 4 : n_tx; + memcpy(&data, txbuf, write_len); + txbuf += write_len; + writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); + + if (n_tx > 4) { + data = 0; + write_len = n_tx - 4; + memcpy(&data, txbuf, write_len); + writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); + } + } + ret = cqspi_exec_flash_cmd(cqspi, reg); + return ret; +} + +static int cqspi_command_write_addr(struct spi_nor *nor, + const u8 opcode, const unsigned int addr) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + + reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; + reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); + reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) + << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; + + writel(addr, reg_base + CQSPI_REG_CMDADDRESS); + + return cqspi_exec_flash_cmd(cqspi, reg); +} + +static int cqspi_read_setup(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int dummy_clk = 0; + unsigned int reg; + + reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; + reg |= cqspi_calc_rdreg(nor); + + /* Setup dummy clock cycles */ + dummy_clk = nor->read_dummy; + if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) + dummy_clk = CQSPI_DUMMY_CLKS_MAX; + + if (dummy_clk / 8) { + reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); + /* Set mode bits high to ensure chip doesn't enter XIP */ + writel(0xFF, reg_base + CQSPI_REG_MODE_BIT); + + /* Need to subtract the mode byte (8 clocks). */ + if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD) + dummy_clk -= 8; + + if (dummy_clk) + reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) + << CQSPI_REG_RD_INSTR_DUMMY_LSB; + } + + writel(reg, reg_base + CQSPI_REG_RD_INSTR); + + /* Set address width */ + reg = readl(reg_base + CQSPI_REG_SIZE); + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; + reg |= (nor->addr_width - 1); + writel(reg, reg_base + CQSPI_REG_SIZE); + return 0; +} + +static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf, + loff_t from_addr, const size_t n_rx) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + void __iomem *ahb_base = cqspi->ahb_base; + unsigned int remaining = n_rx; + unsigned int mod_bytes = n_rx % 4; + unsigned int bytes_to_read = 0; + u8 *rxbuf_end = rxbuf + n_rx; + int ret = 0; + + writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); + writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); + + /* Clear all interrupts. */ + writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); + + writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); + + reinit_completion(&cqspi->transfer_complete); + writel(CQSPI_REG_INDIRECTRD_START_MASK, + reg_base + CQSPI_REG_INDIRECTRD); + + while (remaining > 0) { + if (!wait_for_completion_timeout(&cqspi->transfer_complete, + msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) + ret = -ETIMEDOUT; + + bytes_to_read = cqspi_get_rd_sram_level(cqspi); + + if (ret && bytes_to_read == 0) { + dev_err(nor->dev, "Indirect read timeout, no bytes\n"); + goto failrd; + } + + while (bytes_to_read != 0) { + unsigned int word_remain = round_down(remaining, 4); + + bytes_to_read *= cqspi->fifo_width; + bytes_to_read = bytes_to_read > remaining ? + remaining : bytes_to_read; + bytes_to_read = round_down(bytes_to_read, 4); + /* Read 4 byte word chunks then single bytes */ + if (bytes_to_read) { + ioread32_rep(ahb_base, rxbuf, + (bytes_to_read / 4)); + } else if (!word_remain && mod_bytes) { + unsigned int temp = ioread32(ahb_base); + + bytes_to_read = mod_bytes; + memcpy(rxbuf, &temp, min((unsigned int) + (rxbuf_end - rxbuf), + bytes_to_read)); + } + rxbuf += bytes_to_read; + remaining -= bytes_to_read; + bytes_to_read = cqspi_get_rd_sram_level(cqspi); + } + + if (remaining > 0) + reinit_completion(&cqspi->transfer_complete); + } + + /* Check indirect done status */ + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD, + CQSPI_REG_INDIRECTRD_DONE_MASK, 0); + if (ret) { + dev_err(nor->dev, + "Indirect read completion error (%i)\n", ret); + goto failrd; + } + + /* Disable interrupt */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Clear indirect completion status */ + writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); + + return 0; + +failrd: + /* Disable interrupt */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Cancel the indirect read */ + writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, + reg_base + CQSPI_REG_INDIRECTRD); + return ret; +} + +static int cqspi_write_setup(struct spi_nor *nor) +{ + unsigned int reg; + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + + /* Set opcode. */ + reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; + writel(reg, reg_base + CQSPI_REG_WR_INSTR); + reg = cqspi_calc_rdreg(nor); + writel(reg, reg_base + CQSPI_REG_RD_INSTR); + + reg = readl(reg_base + CQSPI_REG_SIZE); + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; + reg |= (nor->addr_width - 1); + writel(reg, reg_base + CQSPI_REG_SIZE); + return 0; +} + +static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, + const u8 *txbuf, const size_t n_tx) +{ + const unsigned int page_size = nor->page_size; + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int remaining = n_tx; + unsigned int write_bytes; + int ret; + + writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); + writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); + + /* Clear all interrupts. */ + writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); + + writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); + + reinit_completion(&cqspi->transfer_complete); + writel(CQSPI_REG_INDIRECTWR_START_MASK, + reg_base + CQSPI_REG_INDIRECTWR); + /* + * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access + * Controller programming sequence, couple of cycles of + * QSPI_REF_CLK delay is required for the above bit to + * be internally synchronized by the QSPI module. Provide 5 + * cycles of delay. + */ + if (cqspi->wr_delay) + ndelay(cqspi->wr_delay); + + while (remaining > 0) { + size_t write_words, mod_bytes; + + write_bytes = remaining > page_size ? page_size : remaining; + write_words = write_bytes / 4; + mod_bytes = write_bytes % 4; + /* Write 4 bytes at a time then single bytes. */ + if (write_words) { + iowrite32_rep(cqspi->ahb_base, txbuf, write_words); + txbuf += (write_words * 4); + } + if (mod_bytes) { + unsigned int temp = 0xFFFFFFFF; + + memcpy(&temp, txbuf, mod_bytes); + iowrite32(temp, cqspi->ahb_base); + txbuf += mod_bytes; + } + + if (!wait_for_completion_timeout(&cqspi->transfer_complete, + msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { + dev_err(nor->dev, "Indirect write timeout\n"); + ret = -ETIMEDOUT; + goto failwr; + } + + remaining -= write_bytes; + + if (remaining > 0) + reinit_completion(&cqspi->transfer_complete); + } + + /* Check indirect done status */ + ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR, + CQSPI_REG_INDIRECTWR_DONE_MASK, 0); + if (ret) { + dev_err(nor->dev, + "Indirect write completion error (%i)\n", ret); + goto failwr; + } + + /* Disable interrupt. */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Clear indirect completion status */ + writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); + + cqspi_wait_idle(cqspi); + + return 0; + +failwr: + /* Disable interrupt. */ + writel(0, reg_base + CQSPI_REG_IRQMASK); + + /* Cancel the indirect write */ + writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, + reg_base + CQSPI_REG_INDIRECTWR); + return ret; +} + +static void cqspi_chipselect(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *reg_base = cqspi->iobase; + unsigned int chip_select = f_pdata->cs; + unsigned int reg; + + reg = readl(reg_base + CQSPI_REG_CONFIG); + if (cqspi->is_decoded_cs) { + reg |= CQSPI_REG_CONFIG_DECODE_MASK; + } else { + reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; + + /* Convert CS if without decoder. + * CS0 to 4b'1110 + * CS1 to 4b'1101 + * CS2 to 4b'1011 + * CS3 to 4b'0111 + */ + chip_select = 0xF & ~(1 << chip_select); + } + + reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK + << CQSPI_REG_CONFIG_CHIPSELECT_LSB); + reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) + << CQSPI_REG_CONFIG_CHIPSELECT_LSB; + writel(reg, reg_base + CQSPI_REG_CONFIG); +} + +static void cqspi_configure_cs_and_sizes(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *iobase = cqspi->iobase; + unsigned int reg; + + /* configure page size and block size. */ + reg = readl(iobase + CQSPI_REG_SIZE); + reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB); + reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB); + reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; + reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB); + reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB); + reg |= (nor->addr_width - 1); + writel(reg, iobase + CQSPI_REG_SIZE); + + /* configure the chip select */ + cqspi_chipselect(nor); + + /* Store the new configuration of the controller */ + cqspi->current_page_size = nor->page_size; + cqspi->current_erase_size = nor->mtd.erasesize; + cqspi->current_addr_width = nor->addr_width; +} + +static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, + const unsigned int ns_val) +{ + unsigned int ticks; + + ticks = ref_clk_hz / 1000; /* kHz */ + ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); + + return ticks; +} + +static void cqspi_delay(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + void __iomem *iobase = cqspi->iobase; + const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; + unsigned int tshsl, tchsh, tslch, tsd2d; + unsigned int reg; + unsigned int tsclk; + + /* calculate the number of ref ticks for one sclk tick */ + tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); + + tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); + /* this particular value must be at least one sclk */ + if (tshsl < tsclk) + tshsl = tsclk; + + tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); + tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); + tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); + + reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) + << CQSPI_REG_DELAY_TSHSL_LSB; + reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) + << CQSPI_REG_DELAY_TCHSH_LSB; + reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) + << CQSPI_REG_DELAY_TSLCH_LSB; + reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) + << CQSPI_REG_DELAY_TSD2D_LSB; + writel(reg, iobase + CQSPI_REG_DELAY); +} + +static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) +{ + const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; + void __iomem *reg_base = cqspi->iobase; + u32 reg, div; + + /* Recalculate the baudrate divisor based on QSPI specification. */ + div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; + + reg = readl(reg_base + CQSPI_REG_CONFIG); + reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); + reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; + writel(reg, reg_base + CQSPI_REG_CONFIG); +} + +static void cqspi_readdata_capture(struct cqspi_st *cqspi, + const bool bypass, + const unsigned int delay) +{ + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + + reg = readl(reg_base + CQSPI_REG_READCAPTURE); + + if (bypass) + reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); + else + reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); + + reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK + << CQSPI_REG_READCAPTURE_DELAY_LSB); + + reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) + << CQSPI_REG_READCAPTURE_DELAY_LSB; + + writel(reg, reg_base + CQSPI_REG_READCAPTURE); +} + +static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) +{ + void __iomem *reg_base = cqspi->iobase; + unsigned int reg; + + reg = readl(reg_base + CQSPI_REG_CONFIG); + + if (enable) + reg |= CQSPI_REG_CONFIG_ENABLE_MASK; + else + reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; + + writel(reg, reg_base + CQSPI_REG_CONFIG); +} + +static void cqspi_configure(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + const unsigned int sclk = f_pdata->clk_rate; + int switch_cs = (cqspi->current_cs != f_pdata->cs); + int switch_ck = (cqspi->sclk != sclk); + + if ((cqspi->current_page_size != nor->page_size) || + (cqspi->current_erase_size != nor->mtd.erasesize) || + (cqspi->current_addr_width != nor->addr_width)) + switch_cs = 1; + + if (switch_cs || switch_ck) + cqspi_controller_enable(cqspi, 0); + + /* Switch chip select. */ + if (switch_cs) { + cqspi->current_cs = f_pdata->cs; + cqspi_configure_cs_and_sizes(nor); + } + + /* Setup baudrate divisor and delays */ + if (switch_ck) { + cqspi->sclk = sclk; + cqspi_config_baudrate_div(cqspi); + cqspi_delay(nor); + cqspi_readdata_capture(cqspi, !cqspi->rclk_en, + f_pdata->read_delay); + } + + if (switch_cs || switch_ck) + cqspi_controller_enable(cqspi, 1); +} + +static int cqspi_set_protocol(struct spi_nor *nor, const int read) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + + f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; + f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; + f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; + + if (read) { + switch (nor->read_proto) { + case SNOR_PROTO_1_1_1: + f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; + break; + case SNOR_PROTO_1_1_2: + f_pdata->data_width = CQSPI_INST_TYPE_DUAL; + break; + case SNOR_PROTO_1_1_4: + f_pdata->data_width = CQSPI_INST_TYPE_QUAD; + break; + case SNOR_PROTO_1_1_8: + f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; + break; + default: + return -EINVAL; + } + } + + cqspi_configure(nor); + + return 0; +} + +static ssize_t cqspi_write(struct spi_nor *nor, loff_t to, + size_t len, const u_char *buf) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (ret) + return ret; + + ret = cqspi_write_setup(nor); + if (ret) + return ret; + + if (f_pdata->use_direct_mode) { + memcpy_toio(cqspi->ahb_base + to, buf, len); + ret = cqspi_wait_idle(cqspi); + } else { + ret = cqspi_indirect_write_execute(nor, to, buf, len); + } + if (ret) + return ret; + + return len; +} + +static void cqspi_rx_dma_callback(void *param) +{ + struct cqspi_st *cqspi = param; + + complete(&cqspi->rx_dma_complete); +} + +static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf, + loff_t from, size_t len) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; + int ret = 0; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + dma_addr_t dma_dst; + + if (!cqspi->rx_chan || !virt_addr_valid(buf)) { + memcpy_fromio(buf, cqspi->ahb_base + from, len); + return 0; + } + + dma_dst = dma_map_single(nor->dev, buf, len, DMA_FROM_DEVICE); + if (dma_mapping_error(nor->dev, dma_dst)) { + dev_err(nor->dev, "dma mapping failed\n"); + return -ENOMEM; + } + tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src, + len, flags); + if (!tx) { + dev_err(nor->dev, "device_prep_dma_memcpy error\n"); + ret = -EIO; + goto err_unmap; + } + + tx->callback = cqspi_rx_dma_callback; + tx->callback_param = cqspi; + cookie = tx->tx_submit(tx); + reinit_completion(&cqspi->rx_dma_complete); + + ret = dma_submit_error(cookie); + if (ret) { + dev_err(nor->dev, "dma_submit_error %d\n", cookie); + ret = -EIO; + goto err_unmap; + } + + dma_async_issue_pending(cqspi->rx_chan); + if (!wait_for_completion_timeout(&cqspi->rx_dma_complete, + msecs_to_jiffies(len))) { + dmaengine_terminate_sync(cqspi->rx_chan); + dev_err(nor->dev, "DMA wait_for_completion_timeout\n"); + ret = -ETIMEDOUT; + goto err_unmap; + } + +err_unmap: + dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE); + + return ret; +} + +static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, + size_t len, u_char *buf) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + int ret; + + ret = cqspi_set_protocol(nor, 1); + if (ret) + return ret; + + ret = cqspi_read_setup(nor); + if (ret) + return ret; + + if (f_pdata->use_direct_mode) + ret = cqspi_direct_read_execute(nor, buf, from, len); + else + ret = cqspi_indirect_read_execute(nor, buf, from, len); + if (ret) + return ret; + + return len; +} + +static int cqspi_erase(struct spi_nor *nor, loff_t offs) +{ + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (ret) + return ret; + + /* Send write enable, then erase commands. */ + ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN, NULL, 0); + if (ret) + return ret; + + /* Set up command buffer. */ + ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs); + if (ret) + return ret; + + return 0; +} + +static int cqspi_prep(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + + mutex_lock(&cqspi->bus_mutex); + + return 0; +} + +static void cqspi_unprep(struct spi_nor *nor) +{ + struct cqspi_flash_pdata *f_pdata = nor->priv; + struct cqspi_st *cqspi = f_pdata->cqspi; + + mutex_unlock(&cqspi->bus_mutex); +} + +static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len) +{ + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (!ret) + ret = cqspi_command_read(nor, opcode, buf, len); + + return ret; +} + +static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + int ret; + + ret = cqspi_set_protocol(nor, 0); + if (!ret) + ret = cqspi_command_write(nor, opcode, buf, len); + + return ret; +} + +static int cqspi_of_get_flash_pdata(struct platform_device *pdev, + struct cqspi_flash_pdata *f_pdata, + struct device_node *np) +{ + if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { + dev_err(&pdev->dev, "couldn't determine read-delay\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { + dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { + dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { + dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { + dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { + dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); + return -ENXIO; + } + + return 0; +} + +static int cqspi_of_get_pdata(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct cqspi_st *cqspi = platform_get_drvdata(pdev); + + cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); + + if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { + dev_err(&pdev->dev, "couldn't determine fifo-depth\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { + dev_err(&pdev->dev, "couldn't determine fifo-width\n"); + return -ENXIO; + } + + if (of_property_read_u32(np, "cdns,trigger-address", + &cqspi->trigger_address)) { + dev_err(&pdev->dev, "couldn't determine trigger-address\n"); + return -ENXIO; + } + + cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); + + return 0; +} + +static void cqspi_controller_init(struct cqspi_st *cqspi) +{ + u32 reg; + + cqspi_controller_enable(cqspi, 0); + + /* Configure the remap address register, no remap */ + writel(0, cqspi->iobase + CQSPI_REG_REMAP); + + /* Disable all interrupts. */ + writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); + + /* Configure the SRAM split to 1:1 . */ + writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); + + /* Load indirect trigger address. */ + writel(cqspi->trigger_address, + cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); + + /* Program read watermark -- 1/2 of the FIFO. */ + writel(cqspi->fifo_depth * cqspi->fifo_width / 2, + cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); + /* Program write watermark -- 1/8 of the FIFO. */ + writel(cqspi->fifo_depth * cqspi->fifo_width / 8, + cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); + + /* Enable Direct Access Controller */ + reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); + reg |= CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL; + writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); + + cqspi_controller_enable(cqspi, 1); +} + +static void cqspi_request_mmap_dma(struct cqspi_st *cqspi) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + + cqspi->rx_chan = dma_request_chan_by_mask(&mask); + if (IS_ERR(cqspi->rx_chan)) { + dev_err(&cqspi->pdev->dev, "No Rx DMA available\n"); + cqspi->rx_chan = NULL; + } + init_completion(&cqspi->rx_dma_complete); +} + +static const struct spi_nor_controller_ops cqspi_controller_ops = { + .prepare = cqspi_prep, + .unprepare = cqspi_unprep, + .read_reg = cqspi_read_reg, + .write_reg = cqspi_write_reg, + .read = cqspi_read, + .write = cqspi_write, + .erase = cqspi_erase, +}; + +static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) +{ + struct platform_device *pdev = cqspi->pdev; + struct device *dev = &pdev->dev; + const struct cqspi_driver_platdata *ddata; + struct spi_nor_hwcaps hwcaps; + struct cqspi_flash_pdata *f_pdata; + struct spi_nor *nor; + struct mtd_info *mtd; + unsigned int cs; + int i, ret; + + ddata = of_device_get_match_data(dev); + if (!ddata) { + dev_err(dev, "Couldn't find driver data\n"); + return -EINVAL; + } + hwcaps.mask = ddata->hwcaps_mask; + + /* Get flash device data */ + for_each_available_child_of_node(dev->of_node, np) { + ret = of_property_read_u32(np, "reg", &cs); + if (ret) { + dev_err(dev, "Couldn't determine chip select.\n"); + goto err; + } + + if (cs >= CQSPI_MAX_CHIPSELECT) { + ret = -EINVAL; + dev_err(dev, "Chip select %d out of range.\n", cs); + goto err; + } + + f_pdata = &cqspi->f_pdata[cs]; + f_pdata->cqspi = cqspi; + f_pdata->cs = cs; + + ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); + if (ret) + goto err; + + nor = &f_pdata->nor; + mtd = &nor->mtd; + + mtd->priv = nor; + + nor->dev = dev; + spi_nor_set_flash_node(nor, np); + nor->priv = f_pdata; + nor->controller_ops = &cqspi_controller_ops; + + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", + dev_name(dev), cs); + if (!mtd->name) { + ret = -ENOMEM; + goto err; + } + + ret = spi_nor_scan(nor, NULL, &hwcaps); + if (ret) + goto err; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + goto err; + + f_pdata->registered = true; + + if (mtd->size <= cqspi->ahb_size) { + f_pdata->use_direct_mode = true; + dev_dbg(nor->dev, "using direct mode for %s\n", + mtd->name); + + if (!cqspi->rx_chan) + cqspi_request_mmap_dma(cqspi); + } + } + + return 0; + +err: + for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) + if (cqspi->f_pdata[i].registered) + mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); + return ret; +} + +static int cqspi_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct cqspi_st *cqspi; + struct resource *res; + struct resource *res_ahb; + struct reset_control *rstc, *rstc_ocp; + const struct cqspi_driver_platdata *ddata; + int ret; + int irq; + + cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL); + if (!cqspi) + return -ENOMEM; + + mutex_init(&cqspi->bus_mutex); + cqspi->pdev = pdev; + platform_set_drvdata(pdev, cqspi); + + /* Obtain configuration from OF. */ + ret = cqspi_of_get_pdata(pdev); + if (ret) { + dev_err(dev, "Cannot get mandatory OF data.\n"); + return -ENODEV; + } + + /* Obtain QSPI clock. */ + cqspi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(cqspi->clk)) { + dev_err(dev, "Cannot claim QSPI clock.\n"); + return PTR_ERR(cqspi->clk); + } + + /* Obtain and remap controller address. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + cqspi->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(cqspi->iobase)) { + dev_err(dev, "Cannot remap controller address.\n"); + return PTR_ERR(cqspi->iobase); + } + + /* Obtain and remap AHB address. */ + res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1); + cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb); + if (IS_ERR(cqspi->ahb_base)) { + dev_err(dev, "Cannot remap AHB address.\n"); + return PTR_ERR(cqspi->ahb_base); + } + cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; + cqspi->ahb_size = resource_size(res_ahb); + + init_completion(&cqspi->transfer_complete); + + /* Obtain IRQ line. */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -ENXIO; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + ret = clk_prepare_enable(cqspi->clk); + if (ret) { + dev_err(dev, "Cannot enable QSPI clock.\n"); + goto probe_clk_failed; + } + + /* Obtain QSPI reset control */ + rstc = devm_reset_control_get_optional_exclusive(dev, "qspi"); + if (IS_ERR(rstc)) { + dev_err(dev, "Cannot get QSPI reset.\n"); + return PTR_ERR(rstc); + } + + rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); + if (IS_ERR(rstc_ocp)) { + dev_err(dev, "Cannot get QSPI OCP reset.\n"); + return PTR_ERR(rstc_ocp); + } + + reset_control_assert(rstc); + reset_control_deassert(rstc); + + reset_control_assert(rstc_ocp); + reset_control_deassert(rstc_ocp); + + cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); + ddata = of_device_get_match_data(dev); + if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY)) + cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, + cqspi->master_ref_clk_hz); + + ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, + pdev->name, cqspi); + if (ret) { + dev_err(dev, "Cannot request IRQ.\n"); + goto probe_irq_failed; + } + + cqspi_wait_idle(cqspi); + cqspi_controller_init(cqspi); + cqspi->current_cs = -1; + cqspi->sclk = 0; + + ret = cqspi_setup_flash(cqspi, np); + if (ret) { + dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret); + goto probe_setup_failed; + } + + return ret; +probe_setup_failed: + cqspi_controller_enable(cqspi, 0); +probe_irq_failed: + clk_disable_unprepare(cqspi->clk); +probe_clk_failed: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return ret; +} + +static int cqspi_remove(struct platform_device *pdev) +{ + struct cqspi_st *cqspi = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) + if (cqspi->f_pdata[i].registered) + mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); + + cqspi_controller_enable(cqspi, 0); + + if (cqspi->rx_chan) + dma_release_channel(cqspi->rx_chan); + + clk_disable_unprepare(cqspi->clk); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cqspi_suspend(struct device *dev) +{ + struct cqspi_st *cqspi = dev_get_drvdata(dev); + + cqspi_controller_enable(cqspi, 0); + return 0; +} + +static int cqspi_resume(struct device *dev) +{ + struct cqspi_st *cqspi = dev_get_drvdata(dev); + + cqspi_controller_enable(cqspi, 1); + return 0; +} + +static const struct dev_pm_ops cqspi__dev_pm_ops = { + .suspend = cqspi_suspend, + .resume = cqspi_resume, +}; + +#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops) +#else +#define CQSPI_DEV_PM_OPS NULL +#endif + +static const struct cqspi_driver_platdata cdns_qspi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, +}; + +static const struct cqspi_driver_platdata k2g_qspi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, + .quirks = CQSPI_NEEDS_WR_DELAY, +}; + +static const struct cqspi_driver_platdata am654_ospi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8, + .quirks = CQSPI_NEEDS_WR_DELAY, +}; + +static const struct of_device_id cqspi_dt_ids[] = { + { + .compatible = "cdns,qspi-nor", + .data = &cdns_qspi, + }, + { + .compatible = "ti,k2g-qspi", + .data = &k2g_qspi, + }, + { + .compatible = "ti,am654-ospi", + .data = &am654_ospi, + }, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, cqspi_dt_ids); + +static struct platform_driver cqspi_platform_driver = { + .probe = cqspi_probe, + .remove = cqspi_remove, + .driver = { + .name = CQSPI_NAME, + .pm = CQSPI_DEV_PM_OPS, + .of_match_table = cqspi_dt_ids, + }, +}; + +module_platform_driver(cqspi_platform_driver); + +MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" CQSPI_NAME); +MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); +MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); diff --git a/drivers/mtd/spi-nor/controllers/hisi-sfc.c b/drivers/mtd/spi-nor/controllers/hisi-sfc.c new file mode 100644 index 000000000000..6c7a4118752e --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/hisi-sfc.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * HiSilicon FMC SPI-NOR flash controller driver + * + * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd. + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* Hardware register offsets and field definitions */ +#define FMC_CFG 0x00 +#define FMC_CFG_OP_MODE_MASK BIT_MASK(0) +#define FMC_CFG_OP_MODE_BOOT 0 +#define FMC_CFG_OP_MODE_NORMAL 1 +#define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1) +#define FMC_CFG_FLASH_SEL_MASK 0x6 +#define FMC_ECC_TYPE(type) (((type) & 0x7) << 5) +#define FMC_ECC_TYPE_MASK GENMASK(7, 5) +#define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10) +#define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10) +#define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10) +#define FMC_GLOBAL_CFG 0x04 +#define FMC_GLOBAL_CFG_WP_ENABLE BIT(6) +#define FMC_SPI_TIMING_CFG 0x08 +#define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8) +#define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4) +#define TIMING_CFG_TSHSL(nr) ((nr) & 0xf) +#define CS_HOLD_TIME 0x6 +#define CS_SETUP_TIME 0x6 +#define CS_DESELECT_TIME 0xf +#define FMC_INT 0x18 +#define FMC_INT_OP_DONE BIT(0) +#define FMC_INT_CLR 0x20 +#define FMC_CMD 0x24 +#define FMC_CMD_CMD1(cmd) ((cmd) & 0xff) +#define FMC_ADDRL 0x2c +#define FMC_OP_CFG 0x30 +#define OP_CFG_FM_CS(cs) ((cs) << 11) +#define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7) +#define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4) +#define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf) +#define FMC_DATA_NUM 0x38 +#define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0)) +#define FMC_OP 0x3c +#define FMC_OP_DUMMY_EN BIT(8) +#define FMC_OP_CMD1_EN BIT(7) +#define FMC_OP_ADDR_EN BIT(6) +#define FMC_OP_WRITE_DATA_EN BIT(5) +#define FMC_OP_READ_DATA_EN BIT(2) +#define FMC_OP_READ_STATUS_EN BIT(1) +#define FMC_OP_REG_OP_START BIT(0) +#define FMC_DMA_LEN 0x40 +#define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0)) +#define FMC_DMA_SADDR_D0 0x4c +#define HIFMC_DMA_MAX_LEN (4096) +#define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1) +#define FMC_OP_DMA 0x68 +#define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16) +#define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8) +#define OP_CTRL_RW_OP(op) ((op) << 1) +#define OP_CTRL_DMA_OP_READY BIT(0) +#define FMC_OP_READ 0x0 +#define FMC_OP_WRITE 0x1 +#define FMC_WAIT_TIMEOUT 1000000 + +enum hifmc_iftype { + IF_TYPE_STD, + IF_TYPE_DUAL, + IF_TYPE_DIO, + IF_TYPE_QUAD, + IF_TYPE_QIO, +}; + +struct hifmc_priv { + u32 chipselect; + u32 clkrate; + struct hifmc_host *host; +}; + +#define HIFMC_MAX_CHIP_NUM 2 +struct hifmc_host { + struct device *dev; + struct mutex lock; + + void __iomem *regbase; + void __iomem *iobase; + struct clk *clk; + void *buffer; + dma_addr_t dma_buffer; + + struct spi_nor *nor[HIFMC_MAX_CHIP_NUM]; + u32 num_chip; +}; + +static inline int hisi_spi_nor_wait_op_finish(struct hifmc_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + FMC_INT, reg, + (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT); +} + +static int hisi_spi_nor_get_if_type(enum spi_nor_protocol proto) +{ + enum hifmc_iftype if_type; + + switch (proto) { + case SNOR_PROTO_1_1_2: + if_type = IF_TYPE_DUAL; + break; + case SNOR_PROTO_1_2_2: + if_type = IF_TYPE_DIO; + break; + case SNOR_PROTO_1_1_4: + if_type = IF_TYPE_QUAD; + break; + case SNOR_PROTO_1_4_4: + if_type = IF_TYPE_QIO; + break; + case SNOR_PROTO_1_1_1: + default: + if_type = IF_TYPE_STD; + break; + } + + return if_type; +} + +static void hisi_spi_nor_init(struct hifmc_host *host) +{ + u32 reg; + + reg = TIMING_CFG_TCSH(CS_HOLD_TIME) + | TIMING_CFG_TCSS(CS_SETUP_TIME) + | TIMING_CFG_TSHSL(CS_DESELECT_TIME); + writel(reg, host->regbase + FMC_SPI_TIMING_CFG); +} + +static int hisi_spi_nor_prep(struct spi_nor *nor) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + mutex_lock(&host->lock); + + ret = clk_set_rate(host->clk, priv->clkrate); + if (ret) + goto out; + + ret = clk_prepare_enable(host->clk); + if (ret) + goto out; + + return 0; + +out: + mutex_unlock(&host->lock); + return ret; +} + +static void hisi_spi_nor_unprep(struct spi_nor *nor) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + clk_disable_unprepare(host->clk); + mutex_unlock(&host->lock); +} + +static int hisi_spi_nor_op_reg(struct spi_nor *nor, + u8 opcode, size_t len, u8 optype) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u32 reg; + + reg = FMC_CMD_CMD1(opcode); + writel(reg, host->regbase + FMC_CMD); + + reg = FMC_DATA_NUM_CNT(len); + writel(reg, host->regbase + FMC_DATA_NUM); + + reg = OP_CFG_FM_CS(priv->chipselect); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype; + writel(reg, host->regbase + FMC_OP); + + return hisi_spi_nor_wait_op_finish(host); +} + +static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + int ret; + + ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN); + if (ret) + return ret; + + memcpy_fromio(buf, host->iobase, len); + return 0; +} + +static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode, + const u8 *buf, size_t len) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + + if (len) + memcpy_toio(host->iobase, buf, len); + + return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN); +} + +static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, + dma_addr_t dma_buf, size_t len, u8 op_type) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + u8 if_type = 0; + u32 reg; + + reg = readl(host->regbase + FMC_CFG); + reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK); + reg |= FMC_CFG_OP_MODE_NORMAL; + reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES + : SPI_NOR_ADDR_MODE_3BYTES; + writel(reg, host->regbase + FMC_CFG); + + writel(start_off, host->regbase + FMC_ADDRL); + writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0); + writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN); + + reg = OP_CFG_FM_CS(priv->chipselect); + if (op_type == FMC_OP_READ) + if_type = hisi_spi_nor_get_if_type(nor->read_proto); + else + if_type = hisi_spi_nor_get_if_type(nor->write_proto); + reg |= OP_CFG_MEM_IF_TYPE(if_type); + if (op_type == FMC_OP_READ) + reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3); + writel(reg, host->regbase + FMC_OP_CFG); + + writel(0xff, host->regbase + FMC_INT_CLR); + reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY; + reg |= (op_type == FMC_OP_READ) + ? OP_CTRL_RD_OPCODE(nor->read_opcode) + : OP_CTRL_WR_OPCODE(nor->program_opcode); + writel(reg, host->regbase + FMC_OP_DMA); + + return hisi_spi_nor_wait_op_finish(host); +} + +static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *read_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + ret = hisi_spi_nor_dma_transfer(nor, + from + offset, host->dma_buffer, trans, FMC_OP_READ); + if (ret) { + dev_warn(nor->dev, "DMA read timeout\n"); + return ret; + } + memcpy(read_buf + offset, host->buffer, trans); + } + + return len; +} + +static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf) +{ + struct hifmc_priv *priv = nor->priv; + struct hifmc_host *host = priv->host; + size_t offset; + int ret; + + for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { + size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); + + memcpy(host->buffer, write_buf + offset, trans); + ret = hisi_spi_nor_dma_transfer(nor, + to + offset, host->dma_buffer, trans, FMC_OP_WRITE); + if (ret) { + dev_warn(nor->dev, "DMA write timeout\n"); + return ret; + } + } + + return len; +} + +static const struct spi_nor_controller_ops hisi_controller_ops = { + .prepare = hisi_spi_nor_prep, + .unprepare = hisi_spi_nor_unprep, + .read_reg = hisi_spi_nor_read_reg, + .write_reg = hisi_spi_nor_write_reg, + .read = hisi_spi_nor_read, + .write = hisi_spi_nor_write, +}; + +/** + * Get spi flash device information and register it as a mtd device. + */ +static int hisi_spi_nor_register(struct device_node *np, + struct hifmc_host *host) +{ + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_READ_1_1_2 | + SNOR_HWCAPS_READ_1_1_4 | + SNOR_HWCAPS_PP, + }; + struct device *dev = host->dev; + struct spi_nor *nor; + struct hifmc_priv *priv; + struct mtd_info *mtd; + int ret; + + nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL); + if (!nor) + return -ENOMEM; + + nor->dev = dev; + spi_nor_set_flash_node(nor, np); + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + ret = of_property_read_u32(np, "reg", &priv->chipselect); + if (ret) { + dev_err(dev, "There's no reg property for %pOF\n", + np); + return ret; + } + + ret = of_property_read_u32(np, "spi-max-frequency", + &priv->clkrate); + if (ret) { + dev_err(dev, "There's no spi-max-frequency property for %pOF\n", + np); + return ret; + } + priv->host = host; + nor->priv = priv; + nor->controller_ops = &hisi_controller_ops; + + ret = spi_nor_scan(nor, NULL, &hwcaps); + if (ret) + return ret; + + mtd = &nor->mtd; + mtd->name = np->name; + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + return ret; + + host->nor[host->num_chip] = nor; + host->num_chip++; + return 0; +} + +static void hisi_spi_nor_unregister_all(struct hifmc_host *host) +{ + int i; + + for (i = 0; i < host->num_chip; i++) + mtd_device_unregister(&host->nor[i]->mtd); +} + +static int hisi_spi_nor_register_all(struct hifmc_host *host) +{ + struct device *dev = host->dev; + struct device_node *np; + int ret; + + for_each_available_child_of_node(dev->of_node, np) { + ret = hisi_spi_nor_register(np, host); + if (ret) + goto fail; + + if (host->num_chip == HIFMC_MAX_CHIP_NUM) { + dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); + of_node_put(np); + break; + } + } + + return 0; + +fail: + hisi_spi_nor_unregister_all(host); + return ret; +} + +static int hisi_spi_nor_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct hifmc_host *host; + int ret; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + platform_set_drvdata(pdev, host); + host->dev = dev; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); + host->regbase = devm_ioremap_resource(dev, res); + if (IS_ERR(host->regbase)) + return PTR_ERR(host->regbase); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory"); + host->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(host->iobase)) + return PTR_ERR(host->iobase); + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) + return PTR_ERR(host->clk); + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_warn(dev, "Unable to set dma mask\n"); + return ret; + } + + host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN, + &host->dma_buffer, GFP_KERNEL); + if (!host->buffer) + return -ENOMEM; + + ret = clk_prepare_enable(host->clk); + if (ret) + return ret; + + mutex_init(&host->lock); + hisi_spi_nor_init(host); + ret = hisi_spi_nor_register_all(host); + if (ret) + mutex_destroy(&host->lock); + + clk_disable_unprepare(host->clk); + return ret; +} + +static int hisi_spi_nor_remove(struct platform_device *pdev) +{ + struct hifmc_host *host = platform_get_drvdata(pdev); + + hisi_spi_nor_unregister_all(host); + mutex_destroy(&host->lock); + clk_disable_unprepare(host->clk); + return 0; +} + +static const struct of_device_id hisi_spi_nor_dt_ids[] = { + { .compatible = "hisilicon,fmc-spi-nor"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids); + +static struct platform_driver hisi_spi_nor_driver = { + .driver = { + .name = "hisi-sfc", + .of_match_table = hisi_spi_nor_dt_ids, + }, + .probe = hisi_spi_nor_probe, + .remove = hisi_spi_nor_remove, +}; +module_platform_driver(hisi_spi_nor_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c new file mode 100644 index 000000000000..81329f680bec --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash PCI driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "intel-spi.h" + +#define BCR 0xdc +#define BCR_WPD BIT(0) + +static const struct intel_spi_boardinfo bxt_info = { + .type = INTEL_SPI_BXT, +}; + +static const struct intel_spi_boardinfo cnl_info = { + .type = INTEL_SPI_CNL, +}; + +static int intel_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct intel_spi_boardinfo *info; + struct intel_spi *ispi; + u32 bcr; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + /* Try to make the chip read/write */ + pci_read_config_dword(pdev, BCR, &bcr); + if (!(bcr & BCR_WPD)) { + bcr |= BCR_WPD; + pci_write_config_dword(pdev, BCR, bcr); + pci_read_config_dword(pdev, BCR, &bcr); + } + info->writeable = !!(bcr & BCR_WPD); + + ispi = intel_spi_probe(&pdev->dev, &pdev->resource[0], info); + if (IS_ERR(ispi)) + return PTR_ERR(ispi); + + pci_set_drvdata(pdev, ispi); + return 0; +} + +static void intel_spi_pci_remove(struct pci_dev *pdev) +{ + intel_spi_remove(pci_get_drvdata(pdev)); +} + +static const struct pci_device_id intel_spi_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info }, + { }, +}; +MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); + +static struct pci_driver intel_spi_pci_driver = { + .name = "intel-spi", + .id_table = intel_spi_pci_ids, + .probe = intel_spi_pci_probe, + .remove = intel_spi_pci_remove, +}; + +module_pci_driver(intel_spi_pci_driver); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash PCI driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-platform.c b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c new file mode 100644 index 000000000000..f80f1086f928 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi-platform.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash platform driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "intel-spi.h" + +static int intel_spi_platform_probe(struct platform_device *pdev) +{ + struct intel_spi_boardinfo *info; + struct intel_spi *ispi; + struct resource *mem; + + info = dev_get_platdata(&pdev->dev); + if (!info) + return -EINVAL; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ispi = intel_spi_probe(&pdev->dev, mem, info); + if (IS_ERR(ispi)) + return PTR_ERR(ispi); + + platform_set_drvdata(pdev, ispi); + return 0; +} + +static int intel_spi_platform_remove(struct platform_device *pdev) +{ + struct intel_spi *ispi = platform_get_drvdata(pdev); + + return intel_spi_remove(ispi); +} + +static struct platform_driver intel_spi_platform_driver = { + .probe = intel_spi_platform_probe, + .remove = intel_spi_platform_remove, + .driver = { + .name = "intel-spi", + }, +}; + +module_platform_driver(intel_spi_platform_driver); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash platform driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:intel-spi"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c new file mode 100644 index 000000000000..61d2a0ad2131 --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi.c @@ -0,0 +1,960 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/sizes.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/platform_data/intel-spi.h> + +#include "intel-spi.h" + +/* Offsets are from @ispi->base */ +#define BFPREG 0x00 + +#define HSFSTS_CTL 0x04 +#define HSFSTS_CTL_FSMIE BIT(31) +#define HSFSTS_CTL_FDBC_SHIFT 24 +#define HSFSTS_CTL_FDBC_MASK (0x3f << HSFSTS_CTL_FDBC_SHIFT) + +#define HSFSTS_CTL_FCYCLE_SHIFT 17 +#define HSFSTS_CTL_FCYCLE_MASK (0x0f << HSFSTS_CTL_FCYCLE_SHIFT) +/* HW sequencer opcodes */ +#define HSFSTS_CTL_FCYCLE_READ (0x00 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_WRITE (0x02 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_ERASE (0x03 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_ERASE_64K (0x04 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_RDID (0x06 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_WRSR (0x07 << HSFSTS_CTL_FCYCLE_SHIFT) +#define HSFSTS_CTL_FCYCLE_RDSR (0x08 << HSFSTS_CTL_FCYCLE_SHIFT) + +#define HSFSTS_CTL_FGO BIT(16) +#define HSFSTS_CTL_FLOCKDN BIT(15) +#define HSFSTS_CTL_FDV BIT(14) +#define HSFSTS_CTL_SCIP BIT(5) +#define HSFSTS_CTL_AEL BIT(2) +#define HSFSTS_CTL_FCERR BIT(1) +#define HSFSTS_CTL_FDONE BIT(0) + +#define FADDR 0x08 +#define DLOCK 0x0c +#define FDATA(n) (0x10 + ((n) * 4)) + +#define FRACC 0x50 + +#define FREG(n) (0x54 + ((n) * 4)) +#define FREG_BASE_MASK 0x3fff +#define FREG_LIMIT_SHIFT 16 +#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) + +/* Offset is from @ispi->pregs */ +#define PR(n) ((n) * 4) +#define PR_WPE BIT(31) +#define PR_LIMIT_SHIFT 16 +#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) +#define PR_RPE BIT(15) +#define PR_BASE_MASK 0x3fff + +/* Offsets are from @ispi->sregs */ +#define SSFSTS_CTL 0x00 +#define SSFSTS_CTL_FSMIE BIT(23) +#define SSFSTS_CTL_DS BIT(22) +#define SSFSTS_CTL_DBC_SHIFT 16 +#define SSFSTS_CTL_SPOP BIT(11) +#define SSFSTS_CTL_ACS BIT(10) +#define SSFSTS_CTL_SCGO BIT(9) +#define SSFSTS_CTL_COP_SHIFT 12 +#define SSFSTS_CTL_FRS BIT(7) +#define SSFSTS_CTL_DOFRS BIT(6) +#define SSFSTS_CTL_AEL BIT(4) +#define SSFSTS_CTL_FCERR BIT(3) +#define SSFSTS_CTL_FDONE BIT(2) +#define SSFSTS_CTL_SCIP BIT(0) + +#define PREOP_OPTYPE 0x04 +#define OPMENU0 0x08 +#define OPMENU1 0x0c + +#define OPTYPE_READ_NO_ADDR 0 +#define OPTYPE_WRITE_NO_ADDR 1 +#define OPTYPE_READ_WITH_ADDR 2 +#define OPTYPE_WRITE_WITH_ADDR 3 + +/* CPU specifics */ +#define BYT_PR 0x74 +#define BYT_SSFSTS_CTL 0x90 +#define BYT_BCR 0xfc +#define BYT_BCR_WPD BIT(0) +#define BYT_FREG_NUM 5 +#define BYT_PR_NUM 5 + +#define LPT_PR 0x74 +#define LPT_SSFSTS_CTL 0x90 +#define LPT_FREG_NUM 5 +#define LPT_PR_NUM 5 + +#define BXT_PR 0x84 +#define BXT_SSFSTS_CTL 0xa0 +#define BXT_FREG_NUM 12 +#define BXT_PR_NUM 6 + +#define CNL_PR 0x84 +#define CNL_FREG_NUM 6 +#define CNL_PR_NUM 5 + +#define LVSCC 0xc4 +#define UVSCC 0xc8 +#define ERASE_OPCODE_SHIFT 8 +#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) +#define ERASE_64K_OPCODE_SHIFT 16 +#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) + +#define INTEL_SPI_TIMEOUT 5000 /* ms */ +#define INTEL_SPI_FIFO_SZ 64 + +/** + * struct intel_spi - Driver private data + * @dev: Device pointer + * @info: Pointer to board specific info + * @nor: SPI NOR layer structure + * @base: Beginning of MMIO space + * @pregs: Start of protection registers + * @sregs: Start of software sequencer registers + * @nregions: Maximum number of regions + * @pr_num: Maximum number of protected range registers + * @writeable: Is the chip writeable + * @locked: Is SPI setting locked + * @swseq_reg: Use SW sequencer in register reads/writes + * @swseq_erase: Use SW sequencer in erase operation + * @erase_64k: 64k erase supported + * @atomic_preopcode: Holds preopcode when atomic sequence is requested + * @opcodes: Opcodes which are supported. This are programmed by BIOS + * before it locks down the controller. + */ +struct intel_spi { + struct device *dev; + const struct intel_spi_boardinfo *info; + struct spi_nor nor; + void __iomem *base; + void __iomem *pregs; + void __iomem *sregs; + size_t nregions; + size_t pr_num; + bool writeable; + bool locked; + bool swseq_reg; + bool swseq_erase; + bool erase_64k; + u8 atomic_preopcode; + u8 opcodes[8]; +}; + +static bool writeable; +module_param(writeable, bool, 0); +MODULE_PARM_DESC(writeable, "Enable write access to SPI flash chip (default=0)"); + +static void intel_spi_dump_regs(struct intel_spi *ispi) +{ + u32 value; + int i; + + dev_dbg(ispi->dev, "BFPREG=0x%08x\n", readl(ispi->base + BFPREG)); + + value = readl(ispi->base + HSFSTS_CTL); + dev_dbg(ispi->dev, "HSFSTS_CTL=0x%08x\n", value); + if (value & HSFSTS_CTL_FLOCKDN) + dev_dbg(ispi->dev, "-> Locked\n"); + + dev_dbg(ispi->dev, "FADDR=0x%08x\n", readl(ispi->base + FADDR)); + dev_dbg(ispi->dev, "DLOCK=0x%08x\n", readl(ispi->base + DLOCK)); + + for (i = 0; i < 16; i++) + dev_dbg(ispi->dev, "FDATA(%d)=0x%08x\n", + i, readl(ispi->base + FDATA(i))); + + dev_dbg(ispi->dev, "FRACC=0x%08x\n", readl(ispi->base + FRACC)); + + for (i = 0; i < ispi->nregions; i++) + dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, + readl(ispi->base + FREG(i))); + for (i = 0; i < ispi->pr_num; i++) + dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, + readl(ispi->pregs + PR(i))); + + if (ispi->sregs) { + value = readl(ispi->sregs + SSFSTS_CTL); + dev_dbg(ispi->dev, "SSFSTS_CTL=0x%08x\n", value); + dev_dbg(ispi->dev, "PREOP_OPTYPE=0x%08x\n", + readl(ispi->sregs + PREOP_OPTYPE)); + dev_dbg(ispi->dev, "OPMENU0=0x%08x\n", + readl(ispi->sregs + OPMENU0)); + dev_dbg(ispi->dev, "OPMENU1=0x%08x\n", + readl(ispi->sregs + OPMENU1)); + } + + if (ispi->info->type == INTEL_SPI_BYT) + dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); + + dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); + dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); + + dev_dbg(ispi->dev, "Protected regions:\n"); + for (i = 0; i < ispi->pr_num; i++) { + u32 base, limit; + + value = readl(ispi->pregs + PR(i)); + if (!(value & (PR_WPE | PR_RPE))) + continue; + + limit = (value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; + base = value & PR_BASE_MASK; + + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x [%c%c]\n", + i, base << 12, (limit << 12) | 0xfff, + value & PR_WPE ? 'W' : '.', + value & PR_RPE ? 'R' : '.'); + } + + dev_dbg(ispi->dev, "Flash regions:\n"); + for (i = 0; i < ispi->nregions; i++) { + u32 region, base, limit; + + region = readl(ispi->base + FREG(i)); + base = region & FREG_BASE_MASK; + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; + + if (base >= limit || (i > 0 && limit == 0)) + dev_dbg(ispi->dev, " %02d disabled\n", i); + else + dev_dbg(ispi->dev, " %02d base: 0x%08x limit: 0x%08x\n", + i, base << 12, (limit << 12) | 0xfff); + } + + dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", + ispi->swseq_reg ? 'S' : 'H'); + dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", + ispi->swseq_erase ? 'S' : 'H'); +} + +/* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ +static int intel_spi_read_block(struct intel_spi *ispi, void *buf, size_t size) +{ + size_t bytes; + int i = 0; + + if (size > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + while (size > 0) { + bytes = min_t(size_t, size, 4); + memcpy_fromio(buf, ispi->base + FDATA(i), bytes); + size -= bytes; + buf += bytes; + i++; + } + + return 0; +} + +/* Writes max INTEL_SPI_FIFO_SZ bytes to the device fifo */ +static int intel_spi_write_block(struct intel_spi *ispi, const void *buf, + size_t size) +{ + size_t bytes; + int i = 0; + + if (size > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + while (size > 0) { + bytes = min_t(size_t, size, 4); + memcpy_toio(ispi->base + FDATA(i), buf, bytes); + size -= bytes; + buf += bytes; + i++; + } + + return 0; +} + +static int intel_spi_wait_hw_busy(struct intel_spi *ispi) +{ + u32 val; + + return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, + !(val & HSFSTS_CTL_SCIP), 40, + INTEL_SPI_TIMEOUT * 1000); +} + +static int intel_spi_wait_sw_busy(struct intel_spi *ispi) +{ + u32 val; + + return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, + !(val & SSFSTS_CTL_SCIP), 40, + INTEL_SPI_TIMEOUT * 1000); +} + +static int intel_spi_init(struct intel_spi *ispi) +{ + u32 opmenu0, opmenu1, lvscc, uvscc, val; + int i; + + switch (ispi->info->type) { + case INTEL_SPI_BYT: + ispi->sregs = ispi->base + BYT_SSFSTS_CTL; + ispi->pregs = ispi->base + BYT_PR; + ispi->nregions = BYT_FREG_NUM; + ispi->pr_num = BYT_PR_NUM; + ispi->swseq_reg = true; + + if (writeable) { + /* Disable write protection */ + val = readl(ispi->base + BYT_BCR); + if (!(val & BYT_BCR_WPD)) { + val |= BYT_BCR_WPD; + writel(val, ispi->base + BYT_BCR); + val = readl(ispi->base + BYT_BCR); + } + + ispi->writeable = !!(val & BYT_BCR_WPD); + } + + break; + + case INTEL_SPI_LPT: + ispi->sregs = ispi->base + LPT_SSFSTS_CTL; + ispi->pregs = ispi->base + LPT_PR; + ispi->nregions = LPT_FREG_NUM; + ispi->pr_num = LPT_PR_NUM; + ispi->swseq_reg = true; + break; + + case INTEL_SPI_BXT: + ispi->sregs = ispi->base + BXT_SSFSTS_CTL; + ispi->pregs = ispi->base + BXT_PR; + ispi->nregions = BXT_FREG_NUM; + ispi->pr_num = BXT_PR_NUM; + ispi->erase_64k = true; + break; + + case INTEL_SPI_CNL: + ispi->sregs = NULL; + ispi->pregs = ispi->base + CNL_PR; + ispi->nregions = CNL_FREG_NUM; + ispi->pr_num = CNL_PR_NUM; + break; + + default: + return -EINVAL; + } + + /* Disable #SMI generation from HW sequencer */ + val = readl(ispi->base + HSFSTS_CTL); + val &= ~HSFSTS_CTL_FSMIE; + writel(val, ispi->base + HSFSTS_CTL); + + /* + * Determine whether erase operation should use HW or SW sequencer. + * + * The HW sequencer has a predefined list of opcodes, with only the + * erase opcode being programmable in LVSCC and UVSCC registers. + * If these registers don't contain a valid erase opcode, erase + * cannot be done using HW sequencer. + */ + lvscc = readl(ispi->base + LVSCC); + uvscc = readl(ispi->base + UVSCC); + if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) + ispi->swseq_erase = true; + /* SPI controller on Intel BXT supports 64K erase opcode */ + if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) + if (!(lvscc & ERASE_64K_OPCODE_MASK) || + !(uvscc & ERASE_64K_OPCODE_MASK)) + ispi->erase_64k = false; + + if (ispi->sregs == NULL && (ispi->swseq_reg || ispi->swseq_erase)) { + dev_err(ispi->dev, "software sequencer not supported, but required\n"); + return -EINVAL; + } + + /* + * Some controllers can only do basic operations using hardware + * sequencer. All other operations are supposed to be carried out + * using software sequencer. + */ + if (ispi->swseq_reg) { + /* Disable #SMI generation from SW sequencer */ + val = readl(ispi->sregs + SSFSTS_CTL); + val &= ~SSFSTS_CTL_FSMIE; + writel(val, ispi->sregs + SSFSTS_CTL); + } + + /* Check controller's lock status */ + val = readl(ispi->base + HSFSTS_CTL); + ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); + + if (ispi->locked && ispi->sregs) { + /* + * BIOS programs allowed opcodes and then locks down the + * register. So read back what opcodes it decided to support. + * That's the set we are going to support as well. + */ + opmenu0 = readl(ispi->sregs + OPMENU0); + opmenu1 = readl(ispi->sregs + OPMENU1); + + if (opmenu0 && opmenu1) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { + ispi->opcodes[i] = opmenu0 >> i * 8; + ispi->opcodes[i + 4] = opmenu1 >> i * 8; + } + } + } + + intel_spi_dump_regs(ispi); + + return 0; +} + +static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) +{ + int i; + int preop; + + if (ispi->locked) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) + if (ispi->opcodes[i] == opcode) + return i; + + return -EINVAL; + } + + /* The lock is off, so just use index 0 */ + writel(opcode, ispi->sregs + OPMENU0); + preop = readw(ispi->sregs + PREOP_OPTYPE); + writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); + + return 0; +} + +static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, size_t len) +{ + u32 val, status; + int ret; + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FCYCLE_MASK | HSFSTS_CTL_FDBC_MASK); + + switch (opcode) { + case SPINOR_OP_RDID: + val |= HSFSTS_CTL_FCYCLE_RDID; + break; + case SPINOR_OP_WRSR: + val |= HSFSTS_CTL_FCYCLE_WRSR; + break; + case SPINOR_OP_RDSR: + val |= HSFSTS_CTL_FCYCLE_RDSR; + break; + default: + return -EINVAL; + } + + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + return -EIO; + else if (status & HSFSTS_CTL_AEL) + return -EACCES; + + return 0; +} + +static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, size_t len, + int optype) +{ + u32 val = 0, status; + u8 atomic_preopcode; + int ret; + + ret = intel_spi_opcode_index(ispi, opcode, optype); + if (ret < 0) + return ret; + + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + /* + * Always clear it after each SW sequencer operation regardless + * of whether it is successful or not. + */ + atomic_preopcode = ispi->atomic_preopcode; + ispi->atomic_preopcode = 0; + + /* Only mark 'Data Cycle' bit when there is data to be transferred */ + if (len > 0) + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; + val |= ret << SSFSTS_CTL_COP_SHIFT; + val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; + val |= SSFSTS_CTL_SCGO; + if (atomic_preopcode) { + u16 preop; + + switch (optype) { + case OPTYPE_WRITE_NO_ADDR: + case OPTYPE_WRITE_WITH_ADDR: + /* Pick matching preopcode for the atomic sequence */ + preop = readw(ispi->sregs + PREOP_OPTYPE); + if ((preop & 0xff) == atomic_preopcode) + ; /* Do nothing */ + else if ((preop >> 8) == atomic_preopcode) + val |= SSFSTS_CTL_SPOP; + else + return -EINVAL; + + /* Enable atomic sequence */ + val |= SSFSTS_CTL_ACS; + break; + + default: + return -EINVAL; + } + + } + writel(val, ispi->sregs + SSFSTS_CTL); + + ret = intel_spi_wait_sw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->sregs + SSFSTS_CTL); + if (status & SSFSTS_CTL_FCERR) + return -EIO; + else if (status & SSFSTS_CTL_AEL) + return -EACCES; + + return 0; +} + +static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct intel_spi *ispi = nor->priv; + int ret; + + /* Address of the first chip */ + writel(0, ispi->base + FADDR); + + if (ispi->swseq_reg) + ret = intel_spi_sw_cycle(ispi, opcode, len, + OPTYPE_READ_NO_ADDR); + else + ret = intel_spi_hw_cycle(ispi, opcode, len); + + if (ret) + return ret; + + return intel_spi_read_block(ispi, buf, len); +} + +static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + struct intel_spi *ispi = nor->priv; + int ret; + + /* + * This is handled with atomic operation and preop code in Intel + * controller so we only verify that it is available. If the + * controller is not locked, program the opcode to the PREOP + * register for later use. + * + * When hardware sequencer is used there is no need to program + * any opcodes (it handles them automatically as part of a command). + */ + if (opcode == SPINOR_OP_WREN) { + u16 preop; + + if (!ispi->swseq_reg) + return 0; + + preop = readw(ispi->sregs + PREOP_OPTYPE); + if ((preop & 0xff) != opcode && (preop >> 8) != opcode) { + if (ispi->locked) + return -EINVAL; + writel(opcode, ispi->sregs + PREOP_OPTYPE); + } + + /* + * This enables atomic sequence on next SW sycle. Will + * be cleared after next operation. + */ + ispi->atomic_preopcode = opcode; + return 0; + } + + writel(0, ispi->base + FADDR); + + /* Write the value beforehand */ + ret = intel_spi_write_block(ispi, buf, len); + if (ret) + return ret; + + if (ispi->swseq_reg) + return intel_spi_sw_cycle(ispi, opcode, len, + OPTYPE_WRITE_NO_ADDR); + return intel_spi_hw_cycle(ispi, opcode, len); +} + +static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *read_buf) +{ + struct intel_spi *ispi = nor->priv; + size_t block_size, retlen = 0; + u32 val, status; + ssize_t ret; + + /* + * Atomic sequence is not expected with HW sequencer reads. Make + * sure it is cleared regardless. + */ + if (WARN_ON_ONCE(ispi->atomic_preopcode)) + ispi->atomic_preopcode = 0; + + switch (nor->read_opcode) { + case SPINOR_OP_READ: + case SPINOR_OP_READ_FAST: + case SPINOR_OP_READ_4B: + case SPINOR_OP_READ_FAST_4B: + break; + default: + return -EINVAL; + } + + while (len > 0) { + block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + + /* Read cannot cross 4K boundary */ + block_size = min_t(loff_t, from + block_size, + round_up(from + 1, SZ_4K)) - from; + + writel(from, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCYCLE_READ; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + ret = -EIO; + else if (status & HSFSTS_CTL_AEL) + ret = -EACCES; + + if (ret < 0) { + dev_err(ispi->dev, "read error: %llx: %#x\n", from, + status); + return ret; + } + + ret = intel_spi_read_block(ispi, read_buf, block_size); + if (ret) + return ret; + + len -= block_size; + from += block_size; + retlen += block_size; + read_buf += block_size; + } + + return retlen; +} + +static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *write_buf) +{ + struct intel_spi *ispi = nor->priv; + size_t block_size, retlen = 0; + u32 val, status; + ssize_t ret; + + /* Not needed with HW sequencer write, make sure it is cleared */ + ispi->atomic_preopcode = 0; + + while (len > 0) { + block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); + + /* Write cannot cross 4K boundary */ + block_size = min_t(loff_t, to + block_size, + round_up(to + 1, SZ_4K)) - to; + + writel(to, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; + val |= HSFSTS_CTL_FCYCLE_WRITE; + + ret = intel_spi_write_block(ispi, write_buf, block_size); + if (ret) { + dev_err(ispi->dev, "failed to write block\n"); + return ret; + } + + /* Start the write now */ + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) { + dev_err(ispi->dev, "timeout\n"); + return ret; + } + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + ret = -EIO; + else if (status & HSFSTS_CTL_AEL) + ret = -EACCES; + + if (ret < 0) { + dev_err(ispi->dev, "write error: %llx: %#x\n", to, + status); + return ret; + } + + len -= block_size; + to += block_size; + retlen += block_size; + write_buf += block_size; + } + + return retlen; +} + +static int intel_spi_erase(struct spi_nor *nor, loff_t offs) +{ + size_t erase_size, len = nor->mtd.erasesize; + struct intel_spi *ispi = nor->priv; + u32 val, status, cmd; + int ret; + + /* If the hardware can do 64k erase use that when possible */ + if (len >= SZ_64K && ispi->erase_64k) { + cmd = HSFSTS_CTL_FCYCLE_ERASE_64K; + erase_size = SZ_64K; + } else { + cmd = HSFSTS_CTL_FCYCLE_ERASE; + erase_size = SZ_4K; + } + + if (ispi->swseq_erase) { + while (len > 0) { + writel(offs, ispi->base + FADDR); + + ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, + 0, OPTYPE_WRITE_WITH_ADDR); + if (ret) + return ret; + + offs += erase_size; + len -= erase_size; + } + + return 0; + } + + /* Not needed with HW sequencer erase, make sure it is cleared */ + ispi->atomic_preopcode = 0; + + while (len > 0) { + writel(offs, ispi->base + FADDR); + + val = readl(ispi->base + HSFSTS_CTL); + val &= ~(HSFSTS_CTL_FDBC_MASK | HSFSTS_CTL_FCYCLE_MASK); + val |= HSFSTS_CTL_AEL | HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; + val |= cmd; + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); + + ret = intel_spi_wait_hw_busy(ispi); + if (ret) + return ret; + + status = readl(ispi->base + HSFSTS_CTL); + if (status & HSFSTS_CTL_FCERR) + return -EIO; + else if (status & HSFSTS_CTL_AEL) + return -EACCES; + + offs += erase_size; + len -= erase_size; + } + + return 0; +} + +static bool intel_spi_is_protected(const struct intel_spi *ispi, + unsigned int base, unsigned int limit) +{ + int i; + + for (i = 0; i < ispi->pr_num; i++) { + u32 pr_base, pr_limit, pr_value; + + pr_value = readl(ispi->pregs + PR(i)); + if (!(pr_value & (PR_WPE | PR_RPE))) + continue; + + pr_limit = (pr_value & PR_LIMIT_MASK) >> PR_LIMIT_SHIFT; + pr_base = pr_value & PR_BASE_MASK; + + if (pr_base >= base && pr_limit <= limit) + return true; + } + + return false; +} + +/* + * There will be a single partition holding all enabled flash regions. We + * call this "BIOS". + */ +static void intel_spi_fill_partition(struct intel_spi *ispi, + struct mtd_partition *part) +{ + u64 end; + int i; + + memset(part, 0, sizeof(*part)); + + /* Start from the mandatory descriptor region */ + part->size = 4096; + part->name = "BIOS"; + + /* + * Now try to find where this partition ends based on the flash + * region registers. + */ + for (i = 1; i < ispi->nregions; i++) { + u32 region, base, limit; + + region = readl(ispi->base + FREG(i)); + base = region & FREG_BASE_MASK; + limit = (region & FREG_LIMIT_MASK) >> FREG_LIMIT_SHIFT; + + if (base >= limit || limit == 0) + continue; + + /* + * If any of the regions have protection bits set, make the + * whole partition read-only to be on the safe side. + */ + if (intel_spi_is_protected(ispi, base, limit)) + ispi->writeable = false; + + end = (limit << 12) + 4096; + if (end > part->size) + part->size = end; + } +} + +static const struct spi_nor_controller_ops intel_spi_controller_ops = { + .read_reg = intel_spi_read_reg, + .write_reg = intel_spi_write_reg, + .read = intel_spi_read, + .write = intel_spi_write, + .erase = intel_spi_erase, +}; + +struct intel_spi *intel_spi_probe(struct device *dev, + struct resource *mem, const struct intel_spi_boardinfo *info) +{ + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + struct mtd_partition part; + struct intel_spi *ispi; + int ret; + + if (!info || !mem) + return ERR_PTR(-EINVAL); + + ispi = devm_kzalloc(dev, sizeof(*ispi), GFP_KERNEL); + if (!ispi) + return ERR_PTR(-ENOMEM); + + ispi->base = devm_ioremap_resource(dev, mem); + if (IS_ERR(ispi->base)) + return ERR_CAST(ispi->base); + + ispi->dev = dev; + ispi->info = info; + ispi->writeable = info->writeable; + + ret = intel_spi_init(ispi); + if (ret) + return ERR_PTR(ret); + + ispi->nor.dev = ispi->dev; + ispi->nor.priv = ispi; + ispi->nor.controller_ops = &intel_spi_controller_ops; + + ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps); + if (ret) { + dev_info(dev, "failed to locate the chip\n"); + return ERR_PTR(ret); + } + + intel_spi_fill_partition(ispi, &part); + + /* Prevent writes if not explicitly enabled */ + if (!ispi->writeable || !writeable) + ispi->nor.mtd.flags &= ~MTD_WRITEABLE; + + ret = mtd_device_register(&ispi->nor.mtd, &part, 1); + if (ret) + return ERR_PTR(ret); + + return ispi; +} +EXPORT_SYMBOL_GPL(intel_spi_probe); + +int intel_spi_remove(struct intel_spi *ispi) +{ + return mtd_device_unregister(&ispi->nor.mtd); +} +EXPORT_SYMBOL_GPL(intel_spi_remove); + +MODULE_DESCRIPTION("Intel PCH/PCU SPI flash core driver"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.h b/drivers/mtd/spi-nor/controllers/intel-spi.h new file mode 100644 index 000000000000..e2f41b8827bf --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/intel-spi.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel PCH/PCU SPI flash driver. + * + * Copyright (C) 2016, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#ifndef INTEL_SPI_H +#define INTEL_SPI_H + +#include <linux/platform_data/intel-spi.h> + +struct intel_spi; +struct resource; + +struct intel_spi *intel_spi_probe(struct device *dev, + struct resource *mem, const struct intel_spi_boardinfo *info); +int intel_spi_remove(struct intel_spi *ispi); + +#endif /* INTEL_SPI_H */ diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c new file mode 100644 index 000000000000..9a5b1a7c636a --- /dev/null +++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SPI-NOR driver for NXP SPI Flash Interface (SPIFI) + * + * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com> + * + * Based on Freescale QuadSPI driver: + * Copyright (C) 2013 Freescale Semiconductor, Inc. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/spi-nor.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> + +/* NXP SPIFI registers, bits and macros */ +#define SPIFI_CTRL 0x000 +#define SPIFI_CTRL_TIMEOUT(timeout) (timeout) +#define SPIFI_CTRL_CSHIGH(cshigh) ((cshigh) << 16) +#define SPIFI_CTRL_MODE3 BIT(23) +#define SPIFI_CTRL_DUAL BIT(28) +#define SPIFI_CTRL_FBCLK BIT(30) +#define SPIFI_CMD 0x004 +#define SPIFI_CMD_DATALEN(dlen) ((dlen) & 0x3fff) +#define SPIFI_CMD_DOUT BIT(15) +#define SPIFI_CMD_INTLEN(ilen) ((ilen) << 16) +#define SPIFI_CMD_FIELDFORM(field) ((field) << 19) +#define SPIFI_CMD_FIELDFORM_ALL_SERIAL SPIFI_CMD_FIELDFORM(0x0) +#define SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA SPIFI_CMD_FIELDFORM(0x1) +#define SPIFI_CMD_FRAMEFORM(frame) ((frame) << 21) +#define SPIFI_CMD_FRAMEFORM_OPCODE_ONLY SPIFI_CMD_FRAMEFORM(0x1) +#define SPIFI_CMD_OPCODE(op) ((op) << 24) +#define SPIFI_ADDR 0x008 +#define SPIFI_IDATA 0x00c +#define SPIFI_CLIMIT 0x010 +#define SPIFI_DATA 0x014 +#define SPIFI_MCMD 0x018 +#define SPIFI_STAT 0x01c +#define SPIFI_STAT_MCINIT BIT(0) +#define SPIFI_STAT_CMD BIT(1) +#define SPIFI_STAT_RESET BIT(4) + +#define SPI_NOR_MAX_ID_LEN 6 + +struct nxp_spifi { + struct device *dev; + struct clk *clk_spifi; + struct clk *clk_reg; + void __iomem *io_base; + void __iomem *flash_base; + struct spi_nor nor; + bool memory_mode; + u32 mcmd; +}; + +static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + !(stat & SPIFI_STAT_CMD), 10, 30); + if (ret) + dev_warn(spifi->dev, "command timed out\n"); + + return ret; +} + +static int nxp_spifi_reset(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT); + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + !(stat & SPIFI_STAT_RESET), 10, 30); + if (ret) + dev_warn(spifi->dev, "state reset timed out\n"); + + return ret; +} + +static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi) +{ + int ret; + + if (!spifi->memory_mode) + return 0; + + ret = nxp_spifi_reset(spifi); + if (ret) + dev_err(spifi->dev, "unable to enter command mode\n"); + else + spifi->memory_mode = false; + + return ret; +} + +static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi) +{ + u8 stat; + int ret; + + if (spifi->memory_mode) + return 0; + + writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD); + ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat, + stat & SPIFI_STAT_MCINIT, 10, 30); + if (ret) + dev_err(spifi->dev, "unable to enter memory mode\n"); + else + spifi->memory_mode = true; + + return ret; +} + +static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, + size_t len) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + cmd = SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_OPCODE(opcode) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_FRAMEFORM_OPCODE_ONLY; + writel(cmd, spifi->io_base + SPIFI_CMD); + + while (len--) + *buf++ = readb(spifi->io_base + SPIFI_DATA); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, + size_t len) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + cmd = SPIFI_CMD_DOUT | + SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_OPCODE(opcode) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_FRAMEFORM_OPCODE_ONLY; + writel(cmd, spifi->io_base + SPIFI_CMD); + + while (len--) + writeb(*buf++, spifi->io_base + SPIFI_DATA); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, + u_char *buf) +{ + struct nxp_spifi *spifi = nor->priv; + int ret; + + ret = nxp_spifi_set_memory_mode_on(spifi); + if (ret) + return ret; + + memcpy_fromio(buf, spifi->flash_base + from, len); + + return len; +} + +static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, + const u_char *buf) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + size_t i; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + writel(to, spifi->io_base + SPIFI_ADDR); + + cmd = SPIFI_CMD_DOUT | + SPIFI_CMD_DATALEN(len) | + SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_OPCODE(nor->program_opcode) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + writel(cmd, spifi->io_base + SPIFI_CMD); + + for (i = 0; i < len; i++) + writeb(buf[i], spifi->io_base + SPIFI_DATA); + + ret = nxp_spifi_wait_for_cmd(spifi); + if (ret) + return ret; + + return len; +} + +static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) +{ + struct nxp_spifi *spifi = nor->priv; + u32 cmd; + int ret; + + ret = nxp_spifi_set_memory_mode_off(spifi); + if (ret) + return ret; + + writel(offs, spifi->io_base + SPIFI_ADDR); + + cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL | + SPIFI_CMD_OPCODE(nor->erase_opcode) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + writel(cmd, spifi->io_base + SPIFI_CMD); + + return nxp_spifi_wait_for_cmd(spifi); +} + +static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi) +{ + switch (spifi->nor.read_proto) { + case SNOR_PROTO_1_1_1: + spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL; + break; + case SNOR_PROTO_1_1_2: + case SNOR_PROTO_1_1_4: + spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA; + break; + default: + dev_err(spifi->dev, "unsupported SPI read mode\n"); + return -EINVAL; + } + + /* Memory mode supports address length between 1 and 4 */ + if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4) + return -EINVAL; + + spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) | + SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) | + SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); + + return 0; +} + +static void nxp_spifi_dummy_id_read(struct spi_nor *nor) +{ + u8 id[SPI_NOR_MAX_ID_LEN]; + nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, + SPI_NOR_MAX_ID_LEN); +} + +static const struct spi_nor_controller_ops nxp_spifi_controller_ops = { + .read_reg = nxp_spifi_read_reg, + .write_reg = nxp_spifi_write_reg, + .read = nxp_spifi_read, + .write = nxp_spifi_write, + .erase = nxp_spifi_erase, +}; + +static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, + struct device_node *np) +{ + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + u32 ctrl, property; + u16 mode = 0; + int ret; + + if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) { + switch (property) { + case 1: + break; + case 2: + mode |= SPI_RX_DUAL; + break; + case 4: + mode |= SPI_RX_QUAD; + break; + default: + dev_err(spifi->dev, "unsupported rx-bus-width\n"); + return -EINVAL; + } + } + + if (of_find_property(np, "spi-cpha", NULL)) + mode |= SPI_CPHA; + + if (of_find_property(np, "spi-cpol", NULL)) + mode |= SPI_CPOL; + + /* Setup control register defaults */ + ctrl = SPIFI_CTRL_TIMEOUT(1000) | + SPIFI_CTRL_CSHIGH(15) | + SPIFI_CTRL_FBCLK; + + if (mode & SPI_RX_DUAL) { + ctrl |= SPIFI_CTRL_DUAL; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + } else if (mode & SPI_RX_QUAD) { + ctrl &= ~SPIFI_CTRL_DUAL; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + } else { + ctrl |= SPIFI_CTRL_DUAL; + } + + switch (mode & (SPI_CPHA | SPI_CPOL)) { + case SPI_MODE_0: + ctrl &= ~SPIFI_CTRL_MODE3; + break; + case SPI_MODE_3: + ctrl |= SPIFI_CTRL_MODE3; + break; + default: + dev_err(spifi->dev, "only mode 0 and 3 supported\n"); + return -EINVAL; + } + + writel(ctrl, spifi->io_base + SPIFI_CTRL); + + spifi->nor.dev = spifi->dev; + spi_nor_set_flash_node(&spifi->nor, np); + spifi->nor.priv = spifi; + spifi->nor.controller_ops = &nxp_spifi_controller_ops; + + /* + * The first read on a hard reset isn't reliable so do a + * dummy read of the id before calling spi_nor_scan(). + * The reason for this problem is unknown. + * + * The official NXP spifilib uses more or less the same + * workaround that is applied here by reading the device + * id multiple times. + */ + nxp_spifi_dummy_id_read(&spifi->nor); + + ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps); + if (ret) { + dev_err(spifi->dev, "device scan failed\n"); + return ret; + } + + ret = nxp_spifi_setup_memory_cmd(spifi); + if (ret) { + dev_err(spifi->dev, "memory command setup failed\n"); + return ret; + } + + ret = mtd_device_register(&spifi->nor.mtd, NULL, 0); + if (ret) { + dev_err(spifi->dev, "mtd device parse failed\n"); + return ret; + } + + return 0; +} + +static int nxp_spifi_probe(struct platform_device *pdev) +{ + struct device_node *flash_np; + struct nxp_spifi *spifi; + struct resource *res; + int ret; + + spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL); + if (!spifi) + return -ENOMEM; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spifi"); + spifi->io_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->io_base)) + return PTR_ERR(spifi->io_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash"); + spifi->flash_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spifi->flash_base)) + return PTR_ERR(spifi->flash_base); + + spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi"); + if (IS_ERR(spifi->clk_spifi)) { + dev_err(&pdev->dev, "spifi clock not found\n"); + return PTR_ERR(spifi->clk_spifi); + } + + spifi->clk_reg = devm_clk_get(&pdev->dev, "reg"); + if (IS_ERR(spifi->clk_reg)) { + dev_err(&pdev->dev, "reg clock not found\n"); + return PTR_ERR(spifi->clk_reg); + } + + ret = clk_prepare_enable(spifi->clk_reg); + if (ret) { + dev_err(&pdev->dev, "unable to enable reg clock\n"); + return ret; + } + + ret = clk_prepare_enable(spifi->clk_spifi); + if (ret) { + dev_err(&pdev->dev, "unable to enable spifi clock\n"); + goto dis_clk_reg; + } + + spifi->dev = &pdev->dev; + platform_set_drvdata(pdev, spifi); + + /* Initialize and reset device */ + nxp_spifi_reset(spifi); + writel(0, spifi->io_base + SPIFI_IDATA); + writel(0, spifi->io_base + SPIFI_MCMD); + nxp_spifi_reset(spifi); + + flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); + if (!flash_np) { + dev_err(&pdev->dev, "no SPI flash device to configure\n"); + ret = -ENODEV; + goto dis_clks; + } + + ret = nxp_spifi_setup_flash(spifi, flash_np); + of_node_put(flash_np); + if (ret) { + dev_err(&pdev->dev, "unable to setup flash chip\n"); + goto dis_clks; + } + + return 0; + +dis_clks: + clk_disable_unprepare(spifi->clk_spifi); +dis_clk_reg: + clk_disable_unprepare(spifi->clk_reg); + return ret; +} + +static int nxp_spifi_remove(struct platform_device *pdev) +{ + struct nxp_spifi *spifi = platform_get_drvdata(pdev); + + mtd_device_unregister(&spifi->nor.mtd); + clk_disable_unprepare(spifi->clk_spifi); + clk_disable_unprepare(spifi->clk_reg); + + return 0; +} + +static const struct of_device_id nxp_spifi_match[] = { + {.compatible = "nxp,lpc1773-spifi"}, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nxp_spifi_match); + +static struct platform_driver nxp_spifi_driver = { + .probe = nxp_spifi_probe, + .remove = nxp_spifi_remove, + .driver = { + .name = "nxp-spifi", + .of_match_table = nxp_spifi_match, + }, +}; +module_platform_driver(nxp_spifi_driver); + +MODULE_DESCRIPTION("NXP SPI Flash Interface driver"); +MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>"); +MODULE_LICENSE("GPL v2"); |