diff options
Diffstat (limited to 'drivers/spi')
55 files changed, 6382 insertions, 600 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f51f9466e518..ed38f6d41f47 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -55,6 +55,9 @@ config SPI_MEM This extension is meant to simplify interaction with SPI memories by providing a high-level interface to send memory-like commands. +config SPI_OFFLOAD + bool + comment "SPI Master Controller Drivers" config SPI_AIROHA_SNFI @@ -176,6 +179,7 @@ config SPI_AU1550 config SPI_AXI_SPI_ENGINE tristate "Analog Devices AXI SPI Engine controller" depends on HAS_IOMEM + select SPI_OFFLOAD help This enables support for the Analog Devices AXI SPI Engine SPI controller. It is part of the SPI Engine framework that is used in some Analog Devices @@ -542,6 +546,18 @@ config SPI_JCORE This enables support for the SPI master controller in the J-Core synthesizable, open source SoC. +config SPI_KSPI2 + tristate "Support for KEBA SPI master type 2 hardware" + depends on HAS_IOMEM + depends on KEBA_CP500 || COMPILE_TEST + select AUXILIARY_BUS + help + This driver supports KEBA SPI master type 2 FPGA implementation, + as found on CP500 devices for example. + + This driver can also be built as a module. If so, the module + will be called spi-kspi2. + config SPI_LM70_LLP tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)" depends on PARPORT @@ -920,6 +936,15 @@ config SPI_QCOM_QSPI help QSPI(Quad SPI) driver for Qualcomm QSPI controller. +config SPI_QPIC_SNAND + tristate "QPIC SNAND controller" + depends on ARCH_QCOM || COMPILE_TEST + depends on MTD + help + QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller. + QPIC controller supports both parallel nand and serial nand. + This config will enable serial nand driver for QPIC controller. + config SPI_QUP tristate "Qualcomm SPI controller with QUP interface" depends on ARCH_QCOM || COMPILE_TEST @@ -1009,6 +1034,15 @@ config SPI_SN_F_OSPI for connecting an SPI Flash memory over up to 8-bit wide bus. It supports indirect access mode only. +config SPI_SG2044_NOR + tristate "SG2044 SPI NOR Controller" + depends on ARCH_SOPHGO || COMPILE_TEST + help + This enables support for the SG2044 SPI NOR controller, + which supports Dual/Quad read and write operations while + also supporting 3Byte address devices and 4Byte address + devices. + config SPI_SPRD tristate "Spreadtrum SPI controller" depends on ARCH_SPRD || COMPILE_TEST @@ -1033,6 +1067,16 @@ config SPI_STM32 is not available, the driver automatically falls back to PIO mode. +config SPI_STM32_OSPI + tristate "STMicroelectronics STM32 OCTO SPI controller" + depends on ARCH_STM32 || COMPILE_TEST + depends on OF + depends on SPI_MEM + help + This enables support for the Octo SPI controller in master mode. + This driver does not support generic SPI. The implementation only + supports spi-mem interface. + config SPI_STM32_QSPI tristate "STMicroelectronics STM32 QUAD SPI controller" depends on ARCH_STM32 || COMPILE_TEST @@ -1305,4 +1349,16 @@ endif # SPI_SLAVE config SPI_DYNAMIC def_bool ACPI || OF_DYNAMIC || SPI_SLAVE +if SPI_OFFLOAD + +comment "SPI Offload triggers" + +config SPI_OFFLOAD_TRIGGER_PWM + tristate "SPI offload trigger using PWM" + depends on PWM + help + Generic SPI offload trigger implemented using PWM output. + +endif # SPI_OFFLOAD + endif # SPI diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index aea5e54de195..c3a1a47b3bf4 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -10,6 +10,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG obj-$(CONFIG_SPI_MASTER) += spi.o obj-$(CONFIG_SPI_MEM) += spi-mem.o obj-$(CONFIG_SPI_MUX) += spi-mux.o +obj-$(CONFIG_SPI_OFFLOAD) += spi-offload.o obj-$(CONFIG_SPI_SPIDEV) += spidev.o obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o @@ -74,6 +75,7 @@ obj-$(CONFIG_SPI_INTEL_PCI) += spi-intel-pci.o obj-$(CONFIG_SPI_INTEL_PLATFORM) += spi-intel-platform.o obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o obj-$(CONFIG_SPI_JCORE) += spi-jcore.o +obj-$(CONFIG_SPI_KSPI2) += spi-kspi2.o obj-$(CONFIG_SPI_LJCA) += spi-ljca.o obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o obj-$(CONFIG_SPI_LOONGSON_CORE) += spi-loongson-core.o @@ -115,6 +117,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o +obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o obj-$(CONFIG_SPI_QUP) += spi-qup.o obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o @@ -133,9 +136,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o +obj-$(CONFIG_SPI_SG2044_NOR) += spi-sg2044-nor.o obj-$(CONFIG_SPI_SPRD) += spi-sprd.o obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o obj-$(CONFIG_SPI_STM32) += spi-stm32.o +obj-$(CONFIG_SPI_STM32_OSPI) += spi-stm32-ospi.o obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o @@ -162,3 +167,6 @@ obj-$(CONFIG_SPI_AMD) += spi-amd.o # SPI slave protocol handlers obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o + +# SPI offload triggers +obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 8887f6bd41c7..e7b61dc4ce67 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -11,11 +11,15 @@ * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> @@ -34,6 +38,7 @@ #define QSPI_IDR 0x0018 /* Interrupt Disable Register */ #define QSPI_IMR 0x001c /* Interrupt Mask Register */ #define QSPI_SCR 0x0020 /* Serial Clock Register */ +#define QSPI_SR2 0x0024 /* SAMA7G5 Status Register */ #define QSPI_IAR 0x0030 /* Instruction Address Register */ #define QSPI_ICR 0x0034 /* Instruction Code Register */ @@ -44,16 +49,32 @@ #define QSPI_SMR 0x0040 /* Scrambling Mode Register */ #define QSPI_SKR 0x0044 /* Scrambling Key Register */ +#define QSPI_REFRESH 0x0050 /* Refresh Register */ +#define QSPI_WRACNT 0x0054 /* Write Access Counter Register */ +#define QSPI_DLLCFG 0x0058 /* DLL Configuration Register */ +#define QSPI_PCALCFG 0x005C /* Pad Calibration Configuration Register */ +#define QSPI_PCALBP 0x0060 /* Pad Calibration Bypass Register */ +#define QSPI_TOUT 0x0064 /* Timeout Register */ + #define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */ #define QSPI_WPSR 0x00E8 /* Write Protection Status Register */ #define QSPI_VERSION 0x00FC /* Version Register */ +#define SAMA7G5_QSPI0_MAX_SPEED_HZ 200000000 +#define SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ 133000000 /* Bitfields in QSPI_CR (Control Register) */ #define QSPI_CR_QSPIEN BIT(0) #define QSPI_CR_QSPIDIS BIT(1) +#define QSPI_CR_DLLON BIT(2) +#define QSPI_CR_DLLOFF BIT(3) +#define QSPI_CR_STPCAL BIT(4) +#define QSPI_CR_SRFRSH BIT(5) #define QSPI_CR_SWRST BIT(7) +#define QSPI_CR_UPDCFG BIT(8) +#define QSPI_CR_STTFR BIT(9) +#define QSPI_CR_RTOUT BIT(10) #define QSPI_CR_LASTXFER BIT(24) /* Bitfields in QSPI_MR (Mode Register) */ @@ -61,12 +82,14 @@ #define QSPI_MR_LLB BIT(1) #define QSPI_MR_WDRBT BIT(2) #define QSPI_MR_SMRM BIT(3) +#define QSPI_MR_DQSDLYEN BIT(3) #define QSPI_MR_CSMODE_MASK GENMASK(5, 4) #define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4) #define QSPI_MR_CSMODE_LASTXFER (1 << 4) #define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4) #define QSPI_MR_NBBITS_MASK GENMASK(11, 8) #define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK) +#define QSPI_MR_OENSD BIT(15) #define QSPI_MR_DLYBCT_MASK GENMASK(23, 16) #define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK) #define QSPI_MR_DLYCS_MASK GENMASK(31, 24) @@ -80,6 +103,13 @@ #define QSPI_SR_CSR BIT(8) #define QSPI_SR_CSS BIT(9) #define QSPI_SR_INSTRE BIT(10) +#define QSPI_SR_LWRA BIT(11) +#define QSPI_SR_QITF BIT(12) +#define QSPI_SR_QITR BIT(13) +#define QSPI_SR_CSFA BIT(14) +#define QSPI_SR_CSRA BIT(15) +#define QSPI_SR_RFRSHD BIT(16) +#define QSPI_SR_TOUT BIT(17) #define QSPI_SR_QSPIENS BIT(24) #define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR) @@ -92,9 +122,22 @@ #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) +/* Bitfields in QSPI_SR2 (SAMA7G5 Status Register) */ +#define QSPI_SR2_SYNCBSY BIT(0) +#define QSPI_SR2_QSPIENS BIT(1) +#define QSPI_SR2_CSS BIT(2) +#define QSPI_SR2_RBUSY BIT(3) +#define QSPI_SR2_HIDLE BIT(4) +#define QSPI_SR2_DLOCK BIT(5) +#define QSPI_SR2_CALBSY BIT(6) + +/* Bitfields in QSPI_IAR (Instruction Address Register) */ +#define QSPI_IAR_ADDR GENMASK(31, 0) + /* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */ #define QSPI_ICR_INST_MASK GENMASK(7, 0) #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) +#define QSPI_ICR_INST_MASK_SAMA7G5 GENMASK(15, 0) #define QSPI_ICR_OPT_MASK GENMASK(23, 16) #define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK) @@ -107,6 +150,9 @@ #define QSPI_IFR_WIDTH_QUAD_IO (4 << 0) #define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0) #define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0) +#define QSPI_IFR_WIDTH_OCT_OUTPUT (7 << 0) +#define QSPI_IFR_WIDTH_OCT_IO (8 << 0) +#define QSPI_IFR_WIDTH_OCT_CMD (9 << 0) #define QSPI_IFR_INSTEN BIT(4) #define QSPI_IFR_ADDREN BIT(5) #define QSPI_IFR_OPTEN BIT(6) @@ -117,19 +163,60 @@ #define QSPI_IFR_OPTL_4BIT (2 << 8) #define QSPI_IFR_OPTL_8BIT (3 << 8) #define QSPI_IFR_ADDRL BIT(10) +#define QSPI_IFR_ADDRL_SAMA7G5 GENMASK(11, 10) #define QSPI_IFR_TFRTYP_MEM BIT(12) #define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13) #define QSPI_IFR_CRM BIT(14) +#define QSPI_IFR_DDREN BIT(15) #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) +#define QSPI_IFR_END BIT(22) +#define QSPI_IFR_SMRM BIT(23) #define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */ +#define QSPI_IFR_DQSEN BIT(25) +#define QSPI_IFR_DDRCMDEN BIT(26) +#define QSPI_IFR_HFWBEN BIT(27) +#define QSPI_IFR_PROTTYP GENMASK(29, 28) +#define QSPI_IFR_PROTTYP_STD_SPI 0 +#define QSPI_IFR_PROTTYP_TWIN_QUAD 1 +#define QSPI_IFR_PROTTYP_OCTAFLASH 2 +#define QSPI_IFR_PROTTYP_HYPERFLASH 3 /* Bitfields in QSPI_SMR (Scrambling Mode Register) */ #define QSPI_SMR_SCREN BIT(0) #define QSPI_SMR_RVDIS BIT(1) +#define QSPI_SMR_SCRKL BIT(2) + +/* Bitfields in QSPI_REFRESH (Refresh Register) */ +#define QSPI_REFRESH_DELAY_COUNTER GENMASK(31, 0) + +/* Bitfields in QSPI_WRACNT (Write Access Counter Register) */ +#define QSPI_WRACNT_NBWRA GENMASK(31, 0) + +/* Bitfields in QSPI_DLLCFG (DLL Configuration Register) */ +#define QSPI_DLLCFG_RANGE BIT(0) + +/* Bitfields in QSPI_PCALCFG (DLL Pad Calibration Configuration Register) */ +#define QSPI_PCALCFG_AAON BIT(0) +#define QSPI_PCALCFG_DAPCAL BIT(1) +#define QSPI_PCALCFG_DIFFPM BIT(2) +#define QSPI_PCALCFG_CLKDIV GENMASK(6, 4) +#define QSPI_PCALCFG_CALCNT GENMASK(16, 8) +#define QSPI_PCALCFG_CALP GENMASK(27, 24) +#define QSPI_PCALCFG_CALN GENMASK(31, 28) + +/* Bitfields in QSPI_PCALBP (DLL Pad Calibration Bypass Register) */ +#define QSPI_PCALBP_BPEN BIT(0) +#define QSPI_PCALBP_CALPBP GENMASK(11, 8) +#define QSPI_PCALBP_CALNBP GENMASK(19, 16) + +/* Bitfields in QSPI_TOUT (Timeout Register) */ +#define QSPI_TOUT_TCNTM GENMASK(15, 0) /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */ #define QSPI_WPMR_WPEN BIT(0) +#define QSPI_WPMR_WPITEN BIT(1) +#define QSPI_WPMR_WPCREN BIT(2) #define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8) #define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK) @@ -139,10 +226,42 @@ #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) #define ATMEL_QSPI_TIMEOUT 1000 /* ms */ +#define ATMEL_QSPI_SYNC_TIMEOUT 300 /* ms */ +#define QSPI_DLLCFG_THRESHOLD_FREQ 90000000U +#define QSPI_CALIB_TIME 2000 /* 2 us */ + +/* Use PIO for small transfers. */ +#define ATMEL_QSPI_DMA_MIN_BYTES 16 +/** + * struct atmel_qspi_pcal - Pad Calibration Clock Division + * @pclk_rate: peripheral clock rate. + * @pclk_div: calibration clock division. The clock applied to the calibration + * cell is divided by pclk_div + 1. + */ +struct atmel_qspi_pcal { + u32 pclk_rate; + u8 pclk_div; +}; + +#define ATMEL_QSPI_PCAL_ARRAY_SIZE 8 +static const struct atmel_qspi_pcal pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE] = { + {25000000, 0}, + {50000000, 1}, + {75000000, 2}, + {100000000, 3}, + {125000000, 4}, + {150000000, 5}, + {175000000, 6}, + {200000000, 7}, +}; struct atmel_qspi_caps { + u32 max_speed_hz; bool has_qspick; + bool has_gclk; bool has_ricr; + bool octal; + bool has_dma; }; struct atmel_qspi_ops; @@ -152,6 +271,7 @@ struct atmel_qspi { void __iomem *mem; struct clk *pclk; struct clk *qspick; + struct clk *gclk; struct platform_device *pdev; const struct atmel_qspi_caps *caps; const struct atmel_qspi_ops *ops; @@ -160,7 +280,12 @@ struct atmel_qspi { u32 irq_mask; u32 mr; u32 scr; + u32 target_max_speed_hz; struct completion cmd_completion; + struct completion dma_completion; + dma_addr_t mmap_phys_base; + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; }; struct atmel_qspi_ops { @@ -187,6 +312,19 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = { { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, }; +static const struct atmel_qspi_mode atmel_qspi_sama7g5_modes[] = { + { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI }, + { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT }, + { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT }, + { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO }, + { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO }, + { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD }, + { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, + { 1, 1, 8, QSPI_IFR_WIDTH_OCT_OUTPUT }, + { 1, 8, 8, QSPI_IFR_WIDTH_OCT_IO }, + { 8, 8, 8, QSPI_IFR_WIDTH_OCT_CMD }, +}; + #ifdef VERBOSE_DEBUG static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) { @@ -209,6 +347,8 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) return "IMR"; case QSPI_SCR: return "SCR"; + case QSPI_SR2: + return "SR2"; case QSPI_IAR: return "IAR"; case QSPI_ICR: @@ -221,6 +361,18 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) return "SMR"; case QSPI_SKR: return "SKR"; + case QSPI_REFRESH: + return "REFRESH"; + case QSPI_WRACNT: + return "WRACNT"; + case QSPI_DLLCFG: + return "DLLCFG"; + case QSPI_PCALCFG: + return "PCALCFG"; + case QSPI_PCALBP: + return "PCALBP"; + case QSPI_TOUT: + return "TOUT"; case QSPI_WPMR: return "WPMR"; case QSPI_WPSR: @@ -262,6 +414,28 @@ static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset) writel_relaxed(value, aq->regs + offset); } +static int atmel_qspi_reg_sync(struct atmel_qspi *aq) +{ + u32 val; + int ret; + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_SYNCBSY), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + return ret; +} + +static int atmel_qspi_update_config(struct atmel_qspi *aq) +{ + int ret; + + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_UPDCFG, aq, QSPI_CR); + return atmel_qspi_reg_sync(aq); +} + static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op, const struct atmel_qspi_mode *mode) { @@ -288,12 +462,31 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op) return -EOPNOTSUPP; } +static int atmel_qspi_sama7g5_find_mode(const struct spi_mem_op *op) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(atmel_qspi_sama7g5_modes); i++) + if (atmel_qspi_is_compatible(op, &atmel_qspi_sama7g5_modes[i])) + return i; + + return -EOPNOTSUPP; +} + static bool atmel_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller); if (!spi_mem_default_supports_op(mem, op)) return false; + if (aq->caps->octal) { + if (atmel_qspi_sama7g5_find_mode(op) < 0) + return false; + else + return true; + } + if (atmel_qspi_find_mode(op) < 0) return false; @@ -305,6 +498,25 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem, return true; } +/* + * If the QSPI controller is set in regular SPI mode, set it in + * Serial Memory Mode (SMM). + */ +static int atmel_qspi_set_serial_memory_mode(struct atmel_qspi *aq) +{ + int ret = 0; + + if (!(aq->mr & QSPI_MR_SMM)) { + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); + + if (aq->caps->has_gclk) + ret = atmel_qspi_update_config(aq); + } + + return ret; +} + static int atmel_qspi_set_cfg(struct atmel_qspi *aq, const struct spi_mem_op *op, u32 *offset) { @@ -384,14 +596,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, ifr |= QSPI_IFR_TFRTYP_MEM; } - /* - * If the QSPI controller is set in regular SPI mode, set it in - * Serial Memory Mode (SMM). - */ - if (!(aq->mr & QSPI_MR_SMM)) { - aq->mr |= QSPI_MR_SMM; - atmel_qspi_write(aq->mr, aq, QSPI_MR); - } + mode = atmel_qspi_set_serial_memory_mode(aq); + if (mode < 0) + return mode; /* Clear pending interrupts */ (void)atmel_qspi_read(aq, QSPI_SR); @@ -474,6 +681,262 @@ static int atmel_qspi_transfer(struct spi_mem *mem, return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED); } +static int atmel_qspi_sama7g5_set_cfg(struct atmel_qspi *aq, + const struct spi_mem_op *op, u32 *offset) +{ + u32 iar, icr, ifr; + int mode, ret; + + iar = 0; + icr = FIELD_PREP(QSPI_ICR_INST_MASK_SAMA7G5, op->cmd.opcode); + ifr = QSPI_IFR_INSTEN; + + mode = atmel_qspi_sama7g5_find_mode(op); + if (mode < 0) + return mode; + ifr |= atmel_qspi_sama7g5_modes[mode].config; + + if (op->dummy.buswidth && op->dummy.nbytes) { + if (op->addr.dtr && op->dummy.dtr && op->data.dtr) + ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 / + (2 * op->dummy.buswidth)); + else + ifr |= QSPI_IFR_NBDUM(op->dummy.nbytes * 8 / + op->dummy.buswidth); + } + + if (op->addr.buswidth && op->addr.nbytes) { + ifr |= FIELD_PREP(QSPI_IFR_ADDRL_SAMA7G5, op->addr.nbytes - 1) | + QSPI_IFR_ADDREN; + iar = FIELD_PREP(QSPI_IAR_ADDR, op->addr.val); + } + + if (op->addr.dtr && op->dummy.dtr && op->data.dtr) { + ifr |= QSPI_IFR_DDREN; + if (op->cmd.dtr) + ifr |= QSPI_IFR_DDRCMDEN; + + ifr |= QSPI_IFR_DQSEN; + } + + if (op->cmd.buswidth == 8 || op->addr.buswidth == 8 || + op->data.buswidth == 8) + ifr |= FIELD_PREP(QSPI_IFR_PROTTYP, QSPI_IFR_PROTTYP_OCTAFLASH); + + /* offset of the data access in the QSPI memory space */ + *offset = iar; + + /* Set data enable */ + if (op->data.nbytes) { + ifr |= QSPI_IFR_DATAEN; + + if (op->addr.nbytes) + ifr |= QSPI_IFR_TFRTYP_MEM; + } + + ret = atmel_qspi_set_serial_memory_mode(aq); + if (ret < 0) + return ret; + + /* Clear pending interrupts */ + (void)atmel_qspi_read(aq, QSPI_SR); + + /* Set QSPI Instruction Frame registers */ + if (op->addr.nbytes && !op->data.nbytes) + atmel_qspi_write(iar, aq, QSPI_IAR); + + if (op->data.dir == SPI_MEM_DATA_IN) { + atmel_qspi_write(icr, aq, QSPI_RICR); + } else { + atmel_qspi_write(icr, aq, QSPI_WICR); + if (op->data.nbytes) + atmel_qspi_write(FIELD_PREP(QSPI_WRACNT_NBWRA, + op->data.nbytes), + aq, QSPI_WRACNT); + } + + atmel_qspi_write(ifr, aq, QSPI_IFR); + + return atmel_qspi_update_config(aq); +} + +static void atmel_qspi_dma_callback(void *param) +{ + struct atmel_qspi *aq = param; + + complete(&aq->dma_completion); +} + +static int atmel_qspi_dma_xfer(struct atmel_qspi *aq, struct dma_chan *chan, + dma_addr_t dma_dst, dma_addr_t dma_src, + unsigned int len) +{ + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + int ret; + + tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(&aq->pdev->dev, "device_prep_dma_memcpy error\n"); + return -EIO; + } + + reinit_completion(&aq->dma_completion); + tx->callback = atmel_qspi_dma_callback; + tx->callback_param = aq; + cookie = tx->tx_submit(tx); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(&aq->pdev->dev, "dma_submit_error %d\n", cookie); + return ret; + } + + dma_async_issue_pending(chan); + ret = wait_for_completion_timeout(&aq->dma_completion, + msecs_to_jiffies(20 * ATMEL_QSPI_TIMEOUT)); + if (ret == 0) { + dmaengine_terminate_sync(chan); + dev_err(&aq->pdev->dev, "DMA wait_for_completion_timeout\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int atmel_qspi_dma_rx_xfer(struct spi_mem *mem, + const struct spi_mem_op *op, + struct sg_table *sgt, loff_t loff) +{ + struct atmel_qspi *aq = + spi_controller_get_devdata(mem->spi->controller); + struct scatterlist *sg; + dma_addr_t dma_src; + unsigned int i, len; + int ret; + + dma_src = aq->mmap_phys_base + loff; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + len = sg_dma_len(sg); + ret = atmel_qspi_dma_xfer(aq, aq->rx_chan, sg_dma_address(sg), + dma_src, len); + if (ret) + return ret; + dma_src += len; + } + + return 0; +} + +static int atmel_qspi_dma_tx_xfer(struct spi_mem *mem, + const struct spi_mem_op *op, + struct sg_table *sgt, loff_t loff) +{ + struct atmel_qspi *aq = + spi_controller_get_devdata(mem->spi->controller); + struct scatterlist *sg; + dma_addr_t dma_dst; + unsigned int i, len; + int ret; + + dma_dst = aq->mmap_phys_base + loff; + + for_each_sg(sgt->sgl, sg, sgt->nents, i) { + len = sg_dma_len(sg); + ret = atmel_qspi_dma_xfer(aq, aq->tx_chan, dma_dst, + sg_dma_address(sg), len); + if (ret) + return ret; + dma_dst += len; + } + + return 0; +} + +static int atmel_qspi_dma_transfer(struct spi_mem *mem, + const struct spi_mem_op *op, loff_t loff) +{ + struct sg_table sgt; + int ret; + + ret = spi_controller_dma_map_mem_op_data(mem->spi->controller, op, + &sgt); + if (ret) + return ret; + + if (op->data.dir == SPI_MEM_DATA_IN) + ret = atmel_qspi_dma_rx_xfer(mem, op, &sgt, loff); + else + ret = atmel_qspi_dma_tx_xfer(mem, op, &sgt, loff); + + spi_controller_dma_unmap_mem_op_data(mem->spi->controller, op, &sgt); + + return ret; +} + +static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem, + const struct spi_mem_op *op, u32 offset) +{ + struct atmel_qspi *aq = + spi_controller_get_devdata(mem->spi->controller); + u32 val; + int ret; + + if (!op->data.nbytes) { + /* Start the transfer. */ + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_STTFR, aq, QSPI_CR); + + return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA); + } + + /* Send/Receive data. */ + if (op->data.dir == SPI_MEM_DATA_IN) { + if (aq->rx_chan && op->addr.nbytes && + op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) { + ret = atmel_qspi_dma_transfer(mem, op, offset); + if (ret) + return ret; + } else { + memcpy_fromio(op->data.buf.in, aq->mem + offset, + op->data.nbytes); + } + + if (op->addr.nbytes) { + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_RBUSY), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + } + } else { + if (aq->tx_chan && op->addr.nbytes && + op->data.nbytes > ATMEL_QSPI_DMA_MIN_BYTES) { + ret = atmel_qspi_dma_transfer(mem, op, offset); + if (ret) + return ret; + } else { + memcpy_toio(aq->mem + offset, op->data.buf.out, + op->data.nbytes); + } + + ret = atmel_qspi_wait_for_completion(aq, QSPI_SR_LWRA); + if (ret) + return ret; + } + + /* Release the chip-select. */ + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); + + return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA); +} + static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller); @@ -518,6 +981,159 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = { .get_name = atmel_qspi_get_name }; +static int atmel_qspi_set_pad_calibration(struct atmel_qspi *aq) +{ + unsigned long pclk_rate; + u32 status, val; + int i, ret; + u8 pclk_div = 0; + + pclk_rate = clk_get_rate(aq->pclk); + if (!pclk_rate) + return -EINVAL; + + for (i = 0; i < ATMEL_QSPI_PCAL_ARRAY_SIZE; i++) { + if (pclk_rate <= pcal[i].pclk_rate) { + pclk_div = pcal[i].pclk_div; + break; + } + } + + /* + * Use the biggest divider in case the peripheral clock exceeds + * 200MHZ. + */ + if (pclk_rate > pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_rate) + pclk_div = pcal[ATMEL_QSPI_PCAL_ARRAY_SIZE - 1].pclk_div; + + /* Disable QSPI while configuring the pad calibration. */ + status = atmel_qspi_read(aq, QSPI_SR2); + if (status & QSPI_SR2_QSPIENS) { + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); + } + + /* + * The analog circuitry is not shut down at the end of the calibration + * and the start-up time is only required for the first calibration + * sequence, thus increasing performance. Set the delay between the Pad + * calibration analog circuitry and the calibration request to 2us. + */ + atmel_qspi_write(QSPI_PCALCFG_AAON | + FIELD_PREP(QSPI_PCALCFG_CLKDIV, pclk_div) | + FIELD_PREP(QSPI_PCALCFG_CALCNT, + 2 * (pclk_rate / 1000000)), + aq, QSPI_PCALCFG); + + /* DLL On + start calibration. */ + atmel_qspi_write(QSPI_CR_DLLON | QSPI_CR_STPCAL, aq, QSPI_CR); + + /* Check synchronization status before updating configuration. */ + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + (val & QSPI_SR2_DLOCK) && + !(val & QSPI_SR2_CALBSY), 40, + ATMEL_QSPI_TIMEOUT); + + /* Refresh analogic blocks every 1 ms.*/ + atmel_qspi_write(FIELD_PREP(QSPI_REFRESH_DELAY_COUNTER, + aq->target_max_speed_hz / 1000), + aq, QSPI_REFRESH); + + return ret; +} + +static int atmel_qspi_set_gclk(struct atmel_qspi *aq) +{ + u32 status, val; + int ret; + + /* Disable DLL before setting GCLK */ + status = atmel_qspi_read(aq, QSPI_SR2); + if (status & QSPI_SR2_DLOCK) { + atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR); + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_DLOCK), 40, + ATMEL_QSPI_TIMEOUT); + if (ret) + return ret; + } + + if (aq->target_max_speed_hz > QSPI_DLLCFG_THRESHOLD_FREQ) + atmel_qspi_write(QSPI_DLLCFG_RANGE, aq, QSPI_DLLCFG); + else + atmel_qspi_write(0, aq, QSPI_DLLCFG); + + ret = clk_set_rate(aq->gclk, aq->target_max_speed_hz); + if (ret) { + dev_err(&aq->pdev->dev, "Failed to set generic clock rate.\n"); + return ret; + } + + /* Enable the QSPI generic clock */ + ret = clk_prepare_enable(aq->gclk); + if (ret) + dev_err(&aq->pdev->dev, "Failed to enable generic clock.\n"); + + return ret; +} + +static int atmel_qspi_sama7g5_init(struct atmel_qspi *aq) +{ + u32 val; + int ret; + + ret = atmel_qspi_set_gclk(aq); + if (ret) + return ret; + + if (aq->caps->octal) { + ret = atmel_qspi_set_pad_calibration(aq); + if (ret) + return ret; + } else { + atmel_qspi_write(QSPI_CR_DLLON, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + (val & QSPI_SR2_DLOCK), 40, + ATMEL_QSPI_TIMEOUT); + } + + /* Set the QSPI controller by default in Serial Memory Mode */ + aq->mr |= QSPI_MR_DQSDLYEN; + ret = atmel_qspi_set_serial_memory_mode(aq); + if (ret < 0) + return ret; + + /* Enable the QSPI controller. */ + atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + val & QSPI_SR2_QSPIENS, 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + + if (aq->caps->octal) { + ret = readl_poll_timeout(aq->regs + QSPI_SR, val, + val & QSPI_SR_RFRSHD, 40, + ATMEL_QSPI_TIMEOUT); + } + + atmel_qspi_write(QSPI_TOUT_TCNTM, aq, QSPI_TOUT); + return ret; +} + +static int atmel_qspi_sama7g5_setup(struct spi_device *spi) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(spi->controller); + + /* The controller can communicate with a single peripheral device (target). */ + aq->target_max_speed_hz = spi->max_speed_hz; + + return atmel_qspi_sama7g5_init(aq); +} + static int atmel_qspi_setup(struct spi_device *spi) { struct spi_controller *ctrl = spi->controller; @@ -532,6 +1148,9 @@ static int atmel_qspi_setup(struct spi_device *spi) if (!spi->max_speed_hz) return -EINVAL; + if (aq->caps->has_gclk) + return atmel_qspi_sama7g5_setup(spi); + src_rate = clk_get_rate(aq->pclk); if (!src_rate) return -EINVAL; @@ -617,17 +1236,29 @@ static int atmel_qspi_set_cs_timing(struct spi_device *spi) return 0; } -static void atmel_qspi_init(struct atmel_qspi *aq) +static int atmel_qspi_init(struct atmel_qspi *aq) { + int ret; + + if (aq->caps->has_gclk) { + ret = atmel_qspi_reg_sync(aq); + if (ret) + return ret; + atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); + return 0; + } + /* Reset the QSPI controller */ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); /* Set the QSPI controller by default in Serial Memory Mode */ - aq->mr |= QSPI_MR_SMM; - atmel_qspi_write(aq->mr, aq, QSPI_MR); + ret = atmel_qspi_set_serial_memory_mode(aq); + if (ret < 0) + return ret; /* Enable the QSPI controller */ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); + return 0; } static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) @@ -649,11 +1280,59 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static int atmel_qspi_dma_init(struct spi_controller *ctrl) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + int ret; + + aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx"); + if (IS_ERR(aq->rx_chan)) { + aq->rx_chan = NULL; + return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan), + "RX DMA channel is not available\n"); + } + + aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx"); + if (IS_ERR(aq->tx_chan)) { + ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->tx_chan), + "TX DMA channel is not available\n"); + goto release_rx_chan; + } + + ctrl->dma_rx = aq->rx_chan; + ctrl->dma_tx = aq->tx_chan; + init_completion(&aq->dma_completion); + + dev_info(&aq->pdev->dev, "Using %s (tx) and %s (rx) for DMA transfers\n", + dma_chan_name(aq->tx_chan), dma_chan_name(aq->rx_chan)); + + return 0; + +release_rx_chan: + dma_release_channel(aq->rx_chan); + aq->rx_chan = NULL; + aq->tx_chan = NULL; + return ret; +} + +static void atmel_qspi_dma_release(struct atmel_qspi *aq) +{ + if (aq->rx_chan) + dma_release_channel(aq->rx_chan); + if (aq->tx_chan) + dma_release_channel(aq->tx_chan); +} + static const struct atmel_qspi_ops atmel_qspi_ops = { .set_cfg = atmel_qspi_set_cfg, .transfer = atmel_qspi_transfer, }; +static const struct atmel_qspi_ops atmel_qspi_sama7g5_ops = { + .set_cfg = atmel_qspi_sama7g5_set_cfg, + .transfer = atmel_qspi_sama7g5_transfer, +}; + static int atmel_qspi_probe(struct platform_device *pdev) { struct spi_controller *ctrl; @@ -665,7 +1344,27 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (!ctrl) return -ENOMEM; + aq = spi_controller_get_devdata(ctrl); + + aq->caps = of_device_get_match_data(&pdev->dev); + if (!aq->caps) { + dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); + return -EINVAL; + } + + init_completion(&aq->cmd_completion); + aq->pdev = pdev; + ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; + if (aq->caps->octal) + ctrl->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; + + if (aq->caps->has_gclk) + aq->ops = &atmel_qspi_sama7g5_ops; + else + aq->ops = &atmel_qspi_ops; + + ctrl->max_speed_hz = aq->caps->max_speed_hz; ctrl->setup = atmel_qspi_setup; ctrl->set_cs_timing = atmel_qspi_set_cs_timing; ctrl->bus_num = -1; @@ -674,12 +1373,6 @@ static int atmel_qspi_probe(struct platform_device *pdev) ctrl->dev.of_node = pdev->dev.of_node; platform_set_drvdata(pdev, ctrl); - aq = spi_controller_get_devdata(ctrl); - - init_completion(&aq->cmd_completion); - aq->pdev = pdev; - aq->ops = &atmel_qspi_ops; - /* Map the registers */ aq->regs = devm_platform_ioremap_resource_byname(pdev, "qspi_base"); if (IS_ERR(aq->regs)) @@ -694,87 +1387,115 @@ static int atmel_qspi_probe(struct platform_device *pdev) "missing AHB memory\n"); aq->mmap_size = resource_size(res); + aq->mmap_phys_base = (dma_addr_t)res->start; /* Get the peripheral clock */ - aq->pclk = devm_clk_get(&pdev->dev, "pclk"); + aq->pclk = devm_clk_get_enabled(&pdev->dev, "pclk"); if (IS_ERR(aq->pclk)) - aq->pclk = devm_clk_get(&pdev->dev, NULL); + aq->pclk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(aq->pclk)) return dev_err_probe(&pdev->dev, PTR_ERR(aq->pclk), "missing peripheral clock\n"); - /* Enable the peripheral clock */ - err = clk_prepare_enable(aq->pclk); - if (err) - return dev_err_probe(&pdev->dev, err, - "failed to enable the peripheral clock\n"); - - aq->caps = of_device_get_match_data(&pdev->dev); - if (!aq->caps) { - dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); - err = -EINVAL; - goto disable_pclk; - } - if (aq->caps->has_qspick) { /* Get the QSPI system clock */ - aq->qspick = devm_clk_get(&pdev->dev, "qspick"); + aq->qspick = devm_clk_get_enabled(&pdev->dev, "qspick"); if (IS_ERR(aq->qspick)) { dev_err(&pdev->dev, "missing system clock\n"); err = PTR_ERR(aq->qspick); - goto disable_pclk; + return err; } - /* Enable the QSPI system clock */ - err = clk_prepare_enable(aq->qspick); - if (err) { - dev_err(&pdev->dev, - "failed to enable the QSPI system clock\n"); - goto disable_pclk; + } else if (aq->caps->has_gclk) { + /* Get the QSPI generic clock */ + aq->gclk = devm_clk_get(&pdev->dev, "gclk"); + if (IS_ERR(aq->gclk)) { + dev_err(&pdev->dev, "missing Generic clock\n"); + err = PTR_ERR(aq->gclk); + return err; } } + if (aq->caps->has_dma) { + err = atmel_qspi_dma_init(ctrl); + if (err == -EPROBE_DEFER) + return err; + } + /* Request the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) { err = irq; - goto disable_qspick; + goto dma_release; } err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, 0, dev_name(&pdev->dev), aq); if (err) - goto disable_qspick; + goto dma_release; pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); + devm_pm_runtime_set_active_enabled(&pdev->dev); + devm_pm_runtime_get_noresume(&pdev->dev); - atmel_qspi_init(aq); + err = atmel_qspi_init(aq); + if (err) + goto dma_release; err = spi_register_controller(ctrl); - if (err) { - pm_runtime_put_noidle(&pdev->dev); - pm_runtime_disable(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); - goto disable_qspick; - } + if (err) + goto dma_release; + pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); return 0; -disable_qspick: - clk_disable_unprepare(aq->qspick); -disable_pclk: - clk_disable_unprepare(aq->pclk); +dma_release: + if (aq->caps->has_dma) + atmel_qspi_dma_release(aq); return err; } +static int atmel_qspi_sama7g5_suspend(struct atmel_qspi *aq) +{ + int ret; + u32 val; + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_RBUSY) && + (val & QSPI_SR2_HIDLE), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_QSPIENS), 40, + ATMEL_QSPI_SYNC_TIMEOUT); + if (ret) + return ret; + + clk_disable_unprepare(aq->gclk); + + atmel_qspi_write(QSPI_CR_DLLOFF, aq, QSPI_CR); + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_DLOCK), 40, + ATMEL_QSPI_TIMEOUT); + if (ret) + return ret; + + ret = readl_poll_timeout(aq->regs + QSPI_SR2, val, + !(val & QSPI_SR2_CALBSY), 40, + ATMEL_QSPI_TIMEOUT); + if (ret) + return ret; + + return 0; +} + static void atmel_qspi_remove(struct platform_device *pdev) { struct spi_controller *ctrl = platform_get_drvdata(pdev); @@ -785,9 +1506,17 @@ static void atmel_qspi_remove(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret >= 0) { + if (aq->caps->has_dma) + atmel_qspi_dma_release(aq); + + if (aq->caps->has_gclk) { + ret = atmel_qspi_sama7g5_suspend(aq); + if (ret) + dev_warn(&pdev->dev, "Failed to de-init device on remove: %d\n", ret); + return; + } + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); - clk_disable(aq->qspick); - clk_disable(aq->pclk); } else { /* * atmel_qspi_runtime_{suspend,resume} just disable and enable @@ -796,13 +1525,6 @@ static void atmel_qspi_remove(struct platform_device *pdev) */ dev_warn(&pdev->dev, "Failed to resume device on remove\n"); } - - clk_unprepare(aq->qspick); - clk_unprepare(aq->pclk); - - pm_runtime_disable(&pdev->dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); } static int __maybe_unused atmel_qspi_suspend(struct device *dev) @@ -815,6 +1537,12 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev) if (ret < 0) return ret; + if (aq->caps->has_gclk) { + ret = atmel_qspi_sama7g5_suspend(aq); + clk_disable_unprepare(aq->pclk); + return ret; + } + atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR); pm_runtime_mark_last_busy(dev); @@ -842,6 +1570,9 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) return ret; } + if (aq->caps->has_gclk) + return atmel_qspi_sama7g5_init(aq); + ret = pm_runtime_force_resume(dev); if (ret < 0) return ret; @@ -897,6 +1628,19 @@ static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = { .has_ricr = true, }; +static const struct atmel_qspi_caps atmel_sama7g5_ospi_caps = { + .max_speed_hz = SAMA7G5_QSPI0_MAX_SPEED_HZ, + .has_gclk = true, + .octal = true, + .has_dma = true, +}; + +static const struct atmel_qspi_caps atmel_sama7g5_qspi_caps = { + .max_speed_hz = SAMA7G5_QSPI1_SDR_MAX_SPEED_HZ, + .has_gclk = true, + .has_dma = true, +}; + static const struct of_device_id atmel_qspi_dt_ids[] = { { .compatible = "atmel,sama5d2-qspi", @@ -906,6 +1650,15 @@ static const struct of_device_id atmel_qspi_dt_ids[] = { .compatible = "microchip,sam9x60-qspi", .data = &atmel_sam9x60_qspi_caps, }, + { + .compatible = "microchip,sama7g5-ospi", + .data = &atmel_sama7g5_ospi_caps, + }, + { + .compatible = "microchip,sama7g5-qspi", + .data = &atmel_sama7g5_qspi_caps, + }, + { /* sentinel */ } }; diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index d30a21b0b05f..17fc0b17e756 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -298,19 +298,16 @@ static const struct amd_spi_freq amd_spi_freq[] = { { AMD_SPI_MIN_HZ, F_800KHz, 0}, }; -static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz) +static void amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz) { unsigned int i, spd7_val, alt_spd; - if (speed_hz < AMD_SPI_MIN_HZ) - return -EINVAL; - - for (i = 0; i < ARRAY_SIZE(amd_spi_freq); i++) + for (i = 0; i < ARRAY_SIZE(amd_spi_freq)-1; i++) if (speed_hz >= amd_spi_freq[i].speed_hz) break; if (amd_spi->speed_hz == amd_spi_freq[i].speed_hz) - return 0; + return; amd_spi->speed_hz = amd_spi_freq[i].speed_hz; @@ -329,8 +326,6 @@ static int amd_set_spi_freq(struct amd_spi *amd_spi, u32 speed_hz) amd_spi_setclear_reg32(amd_spi, AMD_SPI_SPEED_REG, spd7_val, AMD_SPI_SPD7_MASK); } - - return 0; } static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi, @@ -479,6 +474,9 @@ static bool amd_spi_supports_op(struct spi_mem *mem, return false; } + if (op->max_freq < mem->spi->controller->min_speed_hz) + return false; + return spi_mem_default_supports_op(mem, op); } @@ -672,13 +670,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct amd_spi *amd_spi; - int ret; amd_spi = spi_controller_get_devdata(mem->spi->controller); - ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz); - if (ret) - return ret; + amd_set_spi_freq(amd_spi, op->max_freq); if (amd_spi->version == AMD_SPI_V2) amd_set_spi_addr_mode(amd_spi, op); @@ -693,10 +688,10 @@ static int amd_spi_exec_mem_op(struct spi_mem *mem, amd_spi_mem_data_out(amd_spi, op); break; default: - ret = -EOPNOTSUPP; + return -EOPNOTSUPP; } - return ret; + return 0; } static const struct spi_controller_mem_ops amd_spi_mem_ops = { @@ -705,6 +700,10 @@ static const struct spi_controller_mem_ops amd_spi_mem_ops = { .supports_op = amd_spi_supports_op, }; +static const struct spi_controller_mem_caps amd_spi_mem_caps = { + .per_op_freq = true, +}; + static int amd_spi_host_transfer(struct spi_controller *host, struct spi_message *msg) { @@ -782,6 +781,7 @@ static int amd_spi_probe(struct platform_device *pdev) host->setup = amd_spi_host_setup; host->transfer_one_message = amd_spi_host_transfer; host->mem_ops = &amd_spi_mem_ops; + host->mem_caps = &amd_spi_mem_caps; host->max_transfer_size = amd_spi_max_transfer_size; host->max_message_size = amd_spi_max_transfer_size; diff --git a/drivers/spi/spi-amlogic-spifc-a1.c b/drivers/spi/spi-amlogic-spifc-a1.c index fadf6667cd51..18c9aa2cbc29 100644 --- a/drivers/spi/spi-amlogic-spifc-a1.c +++ b/drivers/spi/spi-amlogic-spifc-a1.c @@ -259,7 +259,7 @@ static int amlogic_spifc_a1_exec_op(struct spi_mem *mem, size_t data_size = op->data.nbytes; int ret; - ret = amlogic_spifc_a1_set_freq(spifc, mem->spi->max_speed_hz); + ret = amlogic_spifc_a1_set_freq(spifc, op->max_freq); if (ret) return ret; @@ -320,6 +320,10 @@ static const struct spi_controller_mem_ops amlogic_spifc_a1_mem_ops = { .adjust_op_size = amlogic_spifc_a1_adjust_op_size, }; +static const struct spi_controller_mem_caps amlogic_spifc_a1_mem_caps = { + .per_op_freq = true, +}; + static int amlogic_spifc_a1_probe(struct platform_device *pdev) { struct spi_controller *ctrl; @@ -356,6 +360,7 @@ static int amlogic_spifc_a1_probe(struct platform_device *pdev) ctrl->bits_per_word_mask = SPI_BPW_MASK(8); ctrl->auto_runtime_pm = true; ctrl->mem_ops = &amlogic_spifc_a1_mem_ops; + ctrl->mem_caps = &amlogic_spifc_a1_mem_caps; ctrl->min_speed_hz = SPIFC_A1_MIN_HZ; ctrl->max_speed_hz = SPIFC_A1_MAX_HZ; ctrl->mode_bits = (SPI_RX_DUAL | SPI_TX_DUAL | diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index e9beae95dded..62a11142bd63 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -303,13 +303,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o u32 ctl_val; int ret = 0; - dev_dbg(aspi->dev, - "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x", - chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth, - op->addr.nbytes, op->dummy.nbytes, op->data.nbytes); - addr_mode = readl(aspi->regs + CE_CTRL_REG); addr_mode_backup = addr_mode; diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 7c252126b33e..da9840957778 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -2,11 +2,15 @@ /* * SPI-Engine SPI controller driver * Copyright 2015 Analog Devices Inc. + * Copyright 2024 BayLibre, SAS * Author: Lars-Peter Clausen <lars@metafoo.de> */ +#include <linux/bitfield.h> +#include <linux/bitops.h> #include <linux/clk.h> #include <linux/completion.h> +#include <linux/dmaengine.h> #include <linux/fpga/adi-axi-common.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -14,9 +18,11 @@ #include <linux/module.h> #include <linux/overflow.h> #include <linux/platform_device.h> +#include <linux/spi/offload/provider.h> #include <linux/spi/spi.h> #include <trace/events/spi.h> +#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10 #define SPI_ENGINE_REG_RESET 0x40 #define SPI_ENGINE_REG_INT_ENABLE 0x80 @@ -24,6 +30,7 @@ #define SPI_ENGINE_REG_INT_SOURCE 0x88 #define SPI_ENGINE_REG_SYNC_ID 0xc0 +#define SPI_ENGINE_REG_OFFLOAD_SYNC_ID 0xc4 #define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0 #define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4 @@ -34,10 +41,24 @@ #define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec +#define SPI_ENGINE_MAX_NUM_OFFLOADS 32 + +#define SPI_ENGINE_REG_OFFLOAD_CTRL(x) (0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_STATUS(x) (0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_RESET(x) (0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x) (0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x) (0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) + +#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO GENMASK(15, 8) +#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD GENMASK(7, 0) + #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0) #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1) #define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2) #define SPI_ENGINE_INT_SYNC BIT(3) +#define SPI_ENGINE_INT_OFFLOAD_SYNC BIT(4) + +#define SPI_ENGINE_OFFLOAD_CTRL_ENABLE BIT(0) #define SPI_ENGINE_CONFIG_CPHA BIT(0) #define SPI_ENGINE_CONFIG_CPOL BIT(1) @@ -79,6 +100,10 @@ #define SPI_ENGINE_CMD_CS_INV(flags) \ SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags)) +/* default sizes - can be changed when SPI Engine firmware is compiled */ +#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16 +#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16 + struct spi_engine_program { unsigned int length; uint16_t instructions[] __counted_by(length); @@ -106,6 +131,17 @@ struct spi_engine_message_state { uint8_t *rx_buf; }; +enum { + SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, + SPI_ENGINE_OFFLOAD_FLAG_PREPARED, +}; + +struct spi_engine_offload { + struct spi_engine *spi_engine; + unsigned long flags; + unsigned int offload_num; +}; + struct spi_engine { struct clk *clk; struct clk *ref_clk; @@ -118,6 +154,11 @@ struct spi_engine { unsigned int int_enable; /* shadows hardware CS inversion flag state */ u8 cs_inv; + + unsigned int offload_ctrl_mem_size; + unsigned int offload_sdo_mem_size; + struct spi_offload *offload; + u32 offload_caps; }; static void spi_engine_program_add_cmd(struct spi_engine_program *p, @@ -163,9 +204,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, unsigned int n = min(len, 256U); unsigned int flags = 0; - if (xfer->tx_buf) + if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM)) flags |= SPI_ENGINE_TRANSFER_WRITE; - if (xfer->rx_buf) + if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM)) flags |= SPI_ENGINE_TRANSFER_READ; spi_engine_program_add_cmd(p, dry, @@ -217,16 +258,24 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry, * * NB: This is separate from spi_engine_compile_message() because the latter * is called twice and would otherwise result in double-evaluation. + * + * Returns 0 on success, -EINVAL on failure. */ -static void spi_engine_precompile_message(struct spi_message *msg) +static int spi_engine_precompile_message(struct spi_message *msg) { unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz; struct spi_transfer *xfer; list_for_each_entry(xfer, &msg->transfers, transfer_list) { + /* If we have an offload transfer, we can't rx to buffer */ + if (msg->offload && xfer->rx_buf) + return -EINVAL; + clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz); xfer->effective_speed_hz = max_hz / min(clk_div, 256U); } + + return 0; } static void spi_engine_compile_message(struct spi_message *msg, bool dry, @@ -521,11 +570,105 @@ static irqreturn_t spi_engine_irq(int irq, void *devid) return IRQ_HANDLED; } +static int spi_engine_offload_prepare(struct spi_message *msg) +{ + struct spi_controller *host = msg->spi->controller; + struct spi_engine *spi_engine = spi_controller_get_devdata(host); + struct spi_engine_program *p = msg->opt_state; + struct spi_engine_offload *priv = msg->offload->priv; + struct spi_transfer *xfer; + void __iomem *cmd_addr; + void __iomem *sdo_addr; + size_t tx_word_count = 0; + unsigned int i; + + if (p->length > spi_engine->offload_ctrl_mem_size) + return -EINVAL; + + /* count total number of tx words in message */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + /* no support for reading to rx_buf */ + if (xfer->rx_buf) + return -EINVAL; + + if (!xfer->tx_buf) + continue; + + if (xfer->bits_per_word <= 8) + tx_word_count += xfer->len; + else if (xfer->bits_per_word <= 16) + tx_word_count += xfer->len / 2; + else + tx_word_count += xfer->len / 4; + } + + if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA)) + return -EINVAL; + + if (tx_word_count > spi_engine->offload_sdo_mem_size) + return -EINVAL; + + /* + * This protects against calling spi_optimize_message() with an offload + * that has already been prepared with a different message. + */ + if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags)) + return -EBUSY; + + cmd_addr = spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num); + sdo_addr = spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!xfer->tx_buf) + continue; + + if (xfer->bits_per_word <= 8) { + const u8 *buf = xfer->tx_buf; + + for (i = 0; i < xfer->len; i++) + writel_relaxed(buf[i], sdo_addr); + } else if (xfer->bits_per_word <= 16) { + const u16 *buf = xfer->tx_buf; + + for (i = 0; i < xfer->len / 2; i++) + writel_relaxed(buf[i], sdo_addr); + } else { + const u32 *buf = xfer->tx_buf; + + for (i = 0; i < xfer->len / 4; i++) + writel_relaxed(buf[i], sdo_addr); + } + } + + for (i = 0; i < p->length; i++) + writel_relaxed(p->instructions[i], cmd_addr); + + return 0; +} + +static void spi_engine_offload_unprepare(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + struct spi_engine *spi_engine = priv->spi_engine; + + writel_relaxed(1, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num)); + writel_relaxed(0, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num)); + + clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags); +} + static int spi_engine_optimize_message(struct spi_message *msg) { struct spi_engine_program p_dry, *p; + int ret; - spi_engine_precompile_message(msg); + ret = spi_engine_precompile_message(msg); + if (ret) + return ret; p_dry.length = 0; spi_engine_compile_message(msg, true, &p_dry); @@ -537,20 +680,61 @@ static int spi_engine_optimize_message(struct spi_message *msg) spi_engine_compile_message(msg, false, p); spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC( - AXI_SPI_ENGINE_CUR_MSG_SYNC_ID)); + msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID)); msg->opt_state = p; + if (msg->offload) { + ret = spi_engine_offload_prepare(msg); + if (ret) { + msg->opt_state = NULL; + kfree(p); + return ret; + } + } + return 0; } static int spi_engine_unoptimize_message(struct spi_message *msg) { + if (msg->offload) + spi_engine_offload_unprepare(msg->offload); + kfree(msg->opt_state); return 0; } +static struct spi_offload +*spi_engine_get_offload(struct spi_device *spi, + const struct spi_offload_config *config) +{ + struct spi_controller *host = spi->controller; + struct spi_engine *spi_engine = spi_controller_get_devdata(host); + struct spi_engine_offload *priv; + + if (!spi_engine->offload) + return ERR_PTR(-ENODEV); + + if (config->capability_flags & ~spi_engine->offload_caps) + return ERR_PTR(-EINVAL); + + priv = spi_engine->offload->priv; + + if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags)) + return ERR_PTR(-EBUSY); + + return spi_engine->offload; +} + +static void spi_engine_put_offload(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + + clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags); +} + static int spi_engine_setup(struct spi_device *device) { struct spi_controller *host = device->controller; @@ -583,6 +767,12 @@ static int spi_engine_transfer_one_message(struct spi_controller *host, unsigned int int_enable = 0; unsigned long flags; + if (msg->offload) { + dev_err(&host->dev, "Single transfer offload not supported\n"); + msg->status = -EOPNOTSUPP; + goto out; + } + /* reinitialize message state for this transfer */ memset(st, 0, sizeof(*st)); st->cmd_buf = p->instructions; @@ -632,11 +822,68 @@ static int spi_engine_transfer_one_message(struct spi_controller *host, trace_spi_transfer_stop(msg, xfer); } +out: spi_finalize_current_message(host); return msg->status; } +static int spi_engine_trigger_enable(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + struct spi_engine *spi_engine = priv->spi_engine; + unsigned int reg; + + reg = readl_relaxed(spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); + reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE; + writel_relaxed(reg, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); + return 0; +} + +static void spi_engine_trigger_disable(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + struct spi_engine *spi_engine = priv->spi_engine; + unsigned int reg; + + reg = readl_relaxed(spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); + reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE; + writel_relaxed(reg, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); +} + +static struct dma_chan +*spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + char name[16]; + + snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num); + + return dma_request_chan(offload->provider_dev, name); +} + +static struct dma_chan +*spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + char name[16]; + + snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num); + + return dma_request_chan(offload->provider_dev, name); +} + +static const struct spi_offload_ops spi_engine_offload_ops = { + .trigger_enable = spi_engine_trigger_enable, + .trigger_disable = spi_engine_trigger_disable, + .tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan, + .rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan, +}; + static void spi_engine_release_hw(void *p) { struct spi_engine *spi_engine = p; @@ -651,8 +898,7 @@ static int spi_engine_probe(struct platform_device *pdev) struct spi_engine *spi_engine; struct spi_controller *host; unsigned int version; - int irq; - int ret; + int irq, ret; irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -667,6 +913,46 @@ static int spi_engine_probe(struct platform_device *pdev) spin_lock_init(&spi_engine->lock); init_completion(&spi_engine->msg_complete); + /* + * REVISIT: for now, all SPI Engines only have one offload. In the + * future, this should be read from a memory mapped register to + * determine the number of offloads enabled at HDL compile time. For + * now, we can tell if an offload is present if there is a trigger + * source wired up to it. + */ + if (device_property_present(&pdev->dev, "trigger-sources")) { + struct spi_engine_offload *priv; + + spi_engine->offload = + devm_spi_offload_alloc(&pdev->dev, + sizeof(struct spi_engine_offload)); + if (IS_ERR(spi_engine->offload)) + return PTR_ERR(spi_engine->offload); + + priv = spi_engine->offload->priv; + priv->spi_engine = spi_engine; + priv->offload_num = 0; + + spi_engine->offload->ops = &spi_engine_offload_ops; + spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER; + + if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) { + spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA; + spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM; + } + + if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) { + spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA; + spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM; + } else { + /* + * HDL compile option to enable TX DMA stream also disables + * the SDO memory, so can't do both at the same time. + */ + spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA; + } + } + spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); if (IS_ERR(spi_engine->clk)) return PTR_ERR(spi_engine->clk); @@ -688,6 +974,19 @@ static int spi_engine_probe(struct platform_device *pdev) return -ENODEV; } + if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) { + unsigned int sizes = readl(spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH); + + spi_engine->offload_ctrl_mem_size = 1 << + FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes); + spi_engine->offload_sdo_mem_size = 1 << + FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes); + } else { + spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE; + spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE; + } + writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET); writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); @@ -709,6 +1008,8 @@ static int spi_engine_probe(struct platform_device *pdev) host->transfer_one_message = spi_engine_transfer_one_message; host->optimize_message = spi_engine_optimize_message; host->unoptimize_message = spi_engine_unoptimize_message; + host->get_offload = spi_engine_get_offload; + host->put_offload = spi_engine_put_offload; host->num_chipselect = 8; /* Some features depend of the IP core version. */ diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 0d1aa6592484..77de5a07639a 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1162,7 +1162,8 @@ static void bcm2835_spi_cleanup(struct spi_device *spi) sizeof(u32), DMA_TO_DEVICE); - gpiod_put(bs->cs_gpio); + if (!IS_ERR(bs->cs_gpio)) + gpiod_put(bs->cs_gpio); spi_set_csgpiod(spi, 0, NULL); kfree(target); @@ -1225,7 +1226,12 @@ static int bcm2835_spi_setup(struct spi_device *spi) struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct bcm2835_spidev *target = spi_get_ctldata(spi); struct gpiod_lookup_table *lookup __free(kfree) = NULL; - int ret; + const char *pinctrl_compats[] = { + "brcm,bcm2835-gpio", + "brcm,bcm2711-gpio", + "brcm,bcm7211-gpio", + }; + int ret, i; u32 cs; if (!target) { @@ -1290,6 +1296,14 @@ static int bcm2835_spi_setup(struct spi_device *spi) goto err_cleanup; } + for (i = 0; i < ARRAY_SIZE(pinctrl_compats); i++) { + if (of_find_compatible_node(NULL, NULL, pinctrl_compats[i])) + break; + } + + if (i == ARRAY_SIZE(pinctrl_compats)) + return 0; + /* * TODO: The code below is a slightly better alternative to the utter * abuse of the GPIO API that I found here before. It creates a diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index 644b44d2aef2..18261cbd413b 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -745,7 +745,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - reset = devm_reset_control_get_optional_exclusive(dev, NULL); + reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index c8f64ec69344..b56210734caa 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -523,7 +523,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) return PTR_ERR(clk); } - reset = devm_reset_control_get_optional_exclusive(dev, NULL); + reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index a031ecb358e0..b3a0bc811593 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -43,10 +43,13 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); #define CQSPI_SLOW_SRAM BIT(4) #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) #define CQSPI_RD_NO_IRQ BIT(6) -#define CQSPI_DISABLE_STIG_MODE BIT(7) +#define CQSPI_DMA_SET_MASK BIT(7) +#define CQSPI_SUPPORT_DEVICE_RESET BIT(8) +#define CQSPI_DISABLE_STIG_MODE BIT(9) /* Capabilities */ #define CQSPI_SUPPORTS_OCTAL BIT(0) +#define CQSPI_SUPPORTS_QUAD BIT(1) #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) @@ -111,7 +114,7 @@ struct cqspi_st { struct cqspi_driver_platdata { u32 hwcaps_mask; - u8 quirks; + u16 quirks; int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, u_char *rxbuf, loff_t from_addr, size_t n_rx); u32 (*get_dma_status)(struct cqspi_st *cqspi); @@ -146,6 +149,8 @@ struct cqspi_driver_platdata { #define CQSPI_REG_CONFIG_IDLE_LSB 31 #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF #define CQSPI_REG_CONFIG_BAUD_MASK 0xF +#define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5) +#define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6) #define CQSPI_REG_RD_INSTR 0x04 #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 @@ -832,6 +837,25 @@ failrd: return ret; } +static void cqspi_device_reset(struct cqspi_st *cqspi) +{ + u32 reg; + + reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); + reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK; + writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); + /* + * NOTE: Delay timing implementation is derived from + * spi_nor_hw_reset() + */ + writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); + usleep_range(1, 5); + writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); + usleep_range(100, 150); + writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); + usleep_range(1000, 1200); +} + static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) { void __iomem *reg_base = cqspi->iobase; @@ -1409,7 +1433,7 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) struct cqspi_flash_pdata *f_pdata; f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)]; - cqspi_configure(f_pdata, mem->spi->max_speed_hz); + cqspi_configure(f_pdata, op->max_freq); if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { /* @@ -1634,6 +1658,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) int ret = PTR_ERR(cqspi->rx_chan); cqspi->rx_chan = NULL; + if (ret == -ENODEV) { + /* DMA support is not mandatory */ + dev_info(&cqspi->pdev->dev, "No Rx DMA available\n"); + return 0; + } + return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n"); } init_completion(&cqspi->rx_dma_complete); @@ -1658,6 +1688,7 @@ static const struct spi_controller_mem_ops cqspi_mem_ops = { static const struct spi_controller_mem_caps cqspi_mem_caps = { .dtr = true, + .per_op_freq = true, }; static int cqspi_setup_flash(struct cqspi_st *cqspi) @@ -1865,6 +1896,8 @@ static int cqspi_probe(struct platform_device *pdev) cqspi->master_ref_clk_hz); if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; + if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD) + host->mode_bits |= SPI_TX_QUAD; if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) { cqspi->use_direct_mode = true; cqspi->use_direct_mode_wr = true; @@ -1886,8 +1919,7 @@ static int cqspi_probe(struct platform_device *pdev) if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) cqspi->disable_stig_mode = true; - if (of_device_is_compatible(pdev->dev.of_node, - "xlnx,versal-ospi-1.0")) { + if (ddata->quirks & CQSPI_DMA_SET_MASK) { ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (ret) goto probe_reset_failed; @@ -1917,18 +1949,16 @@ static int cqspi_probe(struct platform_device *pdev) host->num_chipselect = cqspi->num_chipselect; + if (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET) + cqspi_device_reset(cqspi); + if (cqspi->use_direct_mode) { ret = cqspi_request_mmap_dma(cqspi); if (ret == -EPROBE_DEFER) goto probe_setup_failed; } - ret = devm_pm_runtime_enable(dev); - if (ret) { - if (cqspi->rx_chan) - dma_release_channel(cqspi->rx_chan); - goto probe_setup_failed; - } + pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); @@ -1946,6 +1976,7 @@ static int cqspi_probe(struct platform_device *pdev) return 0; probe_setup_failed: cqspi_controller_enable(cqspi, 0); + pm_runtime_disable(dev); probe_reset_failed: if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); @@ -1964,7 +1995,8 @@ static void cqspi_remove(struct platform_device *pdev) if (cqspi->rx_chan) dma_release_channel(cqspi->rx_chan); - clk_disable_unprepare(cqspi->clk); + if (pm_runtime_get_sync(&pdev->dev) >= 0) + clk_disable(cqspi->clk); if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); @@ -2037,7 +2069,7 @@ static const struct cqspi_driver_platdata k2g_qspi = { }; static const struct cqspi_driver_platdata am654_ospi = { - .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, + .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD, .quirks = CQSPI_NEEDS_WR_DELAY, }; @@ -2054,7 +2086,17 @@ static const struct cqspi_driver_platdata socfpga_qspi = { static const struct cqspi_driver_platdata versal_ospi = { .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, - .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA + | CQSPI_DMA_SET_MASK, + .indirect_read_dma = cqspi_versal_indirect_read_dma, + .get_dma_status = cqspi_get_versal_dma_status, +}; + +static const struct cqspi_driver_platdata versal2_ospi = { + .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, + .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA + | CQSPI_DMA_SET_MASK + | CQSPI_SUPPORT_DEVICE_RESET, .indirect_read_dma = cqspi_versal_indirect_read_dma, .get_dma_status = cqspi_get_versal_dma_status, }; @@ -2111,6 +2153,10 @@ static const struct of_device_id cqspi_dt_ids[] = { .compatible = "mobileye,eyeq5-ospi", .data = &mobileye_eyeq5_ospi, }, + { + .compatible = "amd,versal2-ospi", + .data = &versal2_ospi, + }, { /* end of table */ } }; diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c index aed98ab14334..6dcba0e0ddaa 100644 --- a/drivers/spi/spi-cadence-xspi.c +++ b/drivers/spi/spi-cadence-xspi.c @@ -432,7 +432,7 @@ static bool cdns_mrvl_xspi_setup_clock(struct cdns_xspi_dev *cdns_xspi, u32 clk_reg; bool update_clk = false; - while (i < ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list)) { + while (i < (ARRAY_SIZE(cdns_mrvl_xspi_clk_div_list) - 1)) { clk_val = MRVL_XSPI_CLOCK_DIVIDED( cdns_mrvl_xspi_clk_div_list[i]); if (clk_val <= requested_clk) diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index ea517af9435f..941ecc6f59f8 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -677,7 +677,7 @@ static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) * operation. Transmit-only mode is suitable for the rest of them. */ cfg.dfs = 8; - cfg.freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_mem_freq); + cfg.freq = clamp(op->max_freq, 0U, dws->max_mem_freq); if (op->data.dir == SPI_MEM_DATA_IN) { cfg.tmode = DW_SPI_CTRLR0_TMOD_EPROMREAD; cfg.ndf = op->data.nbytes; @@ -894,6 +894,10 @@ static void dw_spi_hw_init(struct device *dev, struct dw_spi *dws) dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF); } +static const struct spi_controller_mem_caps dw_spi_mem_caps = { + .per_op_freq = true, +}; + int dw_spi_add_host(struct device *dev, struct dw_spi *dws) { struct spi_controller *host; @@ -941,8 +945,10 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) host->set_cs = dw_spi_set_cs; host->transfer_one = dw_spi_transfer_one; host->handle_err = dw_spi_handle_err; - if (dws->mem_ops.exec_op) + if (dws->mem_ops.exec_op) { host->mem_ops = &dws->mem_ops; + host->mem_caps = &dw_spi_mem_caps; + } host->max_speed_hz = dws->max_freq; host->flags = SPI_CONTROLLER_GPIO_SS; host->auto_runtime_pm = true; diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c index fc9e33be1e0e..e01c63d23b64 100644 --- a/drivers/spi/spi-fsi.c +++ b/drivers/spi/spi-fsi.c @@ -479,6 +479,19 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len); fsi_spi_sequence_add(&seq, shift); + } else if (next->tx_buf) { + if ((next->len + transfer->len) > (SPI_FSI_MAX_TX_SIZE + 8)) { + rc = -EINVAL; + goto error; + } + + len = next->len; + while (len > 8) { + fsi_spi_sequence_add(&seq, + SPI_FSI_SEQUENCE_SHIFT_OUT(8)); + len -= 8; + } + fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len)); } else { next = NULL; } diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 067c954cb6ea..0dcd49114095 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. -// Copyright 2020 NXP +// Copyright 2020-2025 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -62,6 +62,7 @@ #define SPI_SR_TFIWF BIT(18) #define SPI_SR_RFDF BIT(17) #define SPI_SR_CMDFFF BIT(16) +#define SPI_SR_TXRXS BIT(30) #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ SPI_SR_TFUF | SPI_SR_TFFF | \ SPI_SR_CMDTCF | SPI_SR_SPEF | \ @@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, struct spi_transfer *transfer; bool cs = false; int status = 0; + u32 val = 0; + bool cs_change = false; message->actual_length = 0; + /* Put DSPI in running mode if halted. */ + regmap_read(dspi->regmap, SPI_MCR, &val); + if (val & SPI_MCR_HALT) { + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + !(val & SPI_SR_TXRXS)) + ; + } + list_for_each_entry(transfer, &message->transfers, transfer_list) { dspi->cur_transfer = transfer; dspi->cur_msg = message; @@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; } + cs_change = transfer->cs_change; dspi->tx = transfer->tx_buf; dspi->rx = transfer->rx_buf; dspi->len = transfer->len; @@ -962,17 +975,28 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); + regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); + spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, dspi->progress, !dspi->irq); if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { status = dspi_dma_xfer(dspi); } else { + /* + * Reinitialize the completion before transferring data + * to avoid the case where it might remain in the done + * state due to a spurious interrupt from a previous + * transfer. This could falsely signal that the current + * transfer has completed. + */ + if (dspi->irq) + reinit_completion(&dspi->xfer_done); + dspi_fifo_write(dspi); if (dspi->irq) { wait_for_completion(&dspi->xfer_done); - reinit_completion(&dspi->xfer_done); } else { do { status = dspi_poll(dspi); @@ -988,6 +1012,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi_deassert_cs(spi, &cs); } + if (status || !cs_change) { + /* Put DSPI in stop mode */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_HALT, SPI_MCR_HALT); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + val & SPI_SR_TXRXS) + ; + } + message->status = status; spi_finalize_current_message(ctlr); @@ -1167,6 +1200,20 @@ static int dspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); +static const struct regmap_range dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(3)), + regmap_reg_range(SPI_SR, SPI_TXFR3), + regmap_reg_range(SPI_RXFR0, SPI_RXFR3), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_access_table = { + .yes_ranges = dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), +}; + static const struct regmap_range dspi_volatile_ranges[] = { regmap_reg_range(SPI_MCR, SPI_TCR), regmap_reg_range(SPI_SR, SPI_SR), @@ -1184,6 +1231,8 @@ static const struct regmap_config dspi_regmap_config = { .reg_stride = 4, .max_register = 0x88, .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }; static const struct regmap_range dspi_xspi_volatile_ranges[] = { @@ -1205,6 +1254,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { .reg_stride = 4, .max_register = 0x13c, .volatile_table = &dspi_xspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }, { .name = "pushr", @@ -1227,6 +1278,8 @@ static int dspi_init(struct fsl_dspi *dspi) if (!spi_controller_is_target(dspi->ctlr)) mcr |= SPI_MCR_HOST; + mcr |= SPI_MCR_HALT; + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 40f5c8fdba76..5e3818445234 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -572,7 +572,7 @@ static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi, timeout += 1; /* Double calculated timeout */ - return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); + return secs_to_jiffies(2 * timeout); } static int fsl_lpspi_dma_transfer(struct spi_controller *controller, diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 9ec53bf0dda8..b5ecffcaf795 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -522,9 +522,10 @@ static void fsl_qspi_invalidate(struct fsl_qspi *q) qspi_writel(q, reg, q->iobase + QUADSPI_MCR); } -static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi) +static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi, + const struct spi_mem_op *op) { - unsigned long rate = spi->max_speed_hz; + unsigned long rate = op->max_freq; int ret; if (q->selected == spi_get_chipselect(spi, 0)) @@ -652,7 +653,7 @@ static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK), 10, 1000); - fsl_qspi_select_mem(q, mem->spi); + fsl_qspi_select_mem(q, mem->spi, op); if (needs_amba_base_offset(q)) addr_offset = q->memmap_phy; @@ -839,6 +840,23 @@ static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { .get_name = fsl_qspi_get_name, }; +static const struct spi_controller_mem_caps fsl_qspi_mem_caps = { + .per_op_freq = true, +}; + +static void fsl_qspi_cleanup(void *data) +{ + struct fsl_qspi *q = data; + + /* disable the hardware */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); + qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); + + fsl_qspi_clk_disable_unprep(q); + + mutex_destroy(&q->lock); +} + static int fsl_qspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -923,43 +941,31 @@ static int fsl_qspi_probe(struct platform_device *pdev) ctlr->bus_num = -1; ctlr->num_chipselect = 4; ctlr->mem_ops = &fsl_qspi_mem_ops; + ctlr->mem_caps = &fsl_qspi_mem_caps; fsl_qspi_default_setup(q); ctlr->dev.of_node = np; + ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); + if (ret) + goto err_put_ctrl; + ret = devm_spi_register_controller(dev, ctlr); if (ret) - goto err_destroy_mutex; + goto err_put_ctrl; return 0; -err_destroy_mutex: - mutex_destroy(&q->lock); - err_disable_clk: fsl_qspi_clk_disable_unprep(q); err_put_ctrl: spi_controller_put(ctlr); - dev_err(dev, "Freescale QuadSPI probe failed\n"); return ret; } -static void fsl_qspi_remove(struct platform_device *pdev) -{ - struct fsl_qspi *q = platform_get_drvdata(pdev); - - /* disable the hardware */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); - qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); - - fsl_qspi_clk_disable_unprep(q); - - mutex_destroy(&q->lock); -} - static int fsl_qspi_suspend(struct device *dev) { return 0; @@ -997,7 +1003,6 @@ static struct platform_driver fsl_qspi_driver = { .pm = &fsl_qspi_pm_ops, }, .probe = fsl_qspi_probe, - .remove = fsl_qspi_remove, }; module_platform_driver(fsl_qspi_driver); diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 856a4a9def66..2f2082652a1a 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -618,7 +618,7 @@ static struct spi_controller *fsl_spi_probe(struct device *dev, if (ret < 0) goto err_probe; - dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, + dev_info(dev, "at MMIO %pa (irq = %d), %s mode\n", &mem->start, mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); return host; diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 4f192e013cd6..405deb6677c1 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -39,36 +39,8 @@ struct spi_gpio { /*----------------------------------------------------------------------*/ -/* - * Because the overhead of going through four GPIO procedure calls - * per transferred bit can make performance a problem, this code - * is set up so that you can use it in either of two ways: - * - * - The slow generic way: set up platform_data to hold the GPIO - * numbers used for MISO/MOSI/SCK, and issue procedure calls for - * each of them. This driver can handle several such busses. - * - * - The quicker inlined way: only helps with platform GPIO code - * that inlines operations for constant GPIOs. This can give - * you tight (fast!) inner loops, but each such bus needs a - * new driver. You'll define a new C file, with Makefile and - * Kconfig support; the C code can be a total of six lines: - * - * #define DRIVER_NAME "myboard_spi2" - * #define SPI_MISO_GPIO 119 - * #define SPI_MOSI_GPIO 120 - * #define SPI_SCK_GPIO 121 - * #define SPI_N_CHIPSEL 4 - * #include "spi-gpio.c" - */ - -#ifndef DRIVER_NAME #define DRIVER_NAME "spi_gpio" -#define GENERIC_BITBANG /* vs tight inlines */ - -#endif - /*----------------------------------------------------------------------*/ static inline struct spi_gpio *__pure @@ -341,16 +313,14 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev, struct spi_gpio *spi_gpio = spi_controller_get_devdata(host); int i; -#ifdef GENERIC_BITBANG - if (!pdata || !pdata->num_chipselect) + if (!pdata) return -ENODEV; -#endif - /* - * The host needs to think there is a chipselect even if not - * connected - */ - host->num_chipselect = pdata->num_chipselect ?: 1; + /* It's just one always-selected device, fine to continue */ + if (!pdata->num_chipselect) + return 0; + + host->num_chipselect = pdata->num_chipselect; spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect, sizeof(*spi_gpio->cs_gpios), GFP_KERNEL); @@ -445,8 +415,6 @@ static int spi_gpio_probe(struct platform_device *pdev) return devm_spi_register_controller(&pdev->dev, host); } -MODULE_ALIAS("platform:" DRIVER_NAME); - static const struct of_device_id spi_gpio_dt_ids[] = { { .compatible = "spi-gpio" }, {} @@ -465,3 +433,4 @@ module_platform_driver(spi_gpio_driver); MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO "); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index eeb7d082c247..c93d80a4d734 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1449,7 +1449,7 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size) timeout += 1; /* Double calculated timeout */ - return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); + return secs_to_jiffies(2 * timeout); } static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, @@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { + int ret; struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); - spi_imx_setupxfer(spi, transfer); + ret = spi_imx_setupxfer(spi, transfer); + if (ret < 0) + return ret; transfer->effective_speed_hz = spi_imx->spi_bus_clk; /* flush rxfifo before transfer */ diff --git a/drivers/spi/spi-kspi2.c b/drivers/spi/spi-kspi2.c new file mode 100644 index 000000000000..ca73ec52ce63 --- /dev/null +++ b/drivers/spi/spi-kspi2.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) KEBA Industrial Automation Gmbh 2024 + * + * Driver for KEBA SPI host controller type 2 FPGA IP core + */ + +#include <linux/iopoll.h> +#include <linux/misc/keba.h> +#include <linux/spi/spi.h> + +#define KSPI2 "kspi2" + +#define KSPI2_CLK_FREQ_REG 0x03 +#define KSPI2_CLK_FREQ_MASK 0x0f +#define KSPI2_CLK_FREQ_62_5M 0x0 +#define KSPI2_CLK_FREQ_33_3M 0x1 +#define KSPI2_CLK_FREQ_125M 0x2 +#define KSPI2_CLK_FREQ_50M 0x3 +#define KSPI2_CLK_FREQ_100M 0x4 + +#define KSPI2_CONTROL_REG 0x04 +#define KSPI2_CONTROL_CLK_DIV_MAX 0x0f +#define KSPI2_CONTROL_CLK_DIV_MASK 0x0f +#define KSPI2_CONTROL_CPHA 0x10 +#define KSPI2_CONTROL_CPOL 0x20 +#define KSPI2_CONTROL_CLK_MODE_MASK 0x30 +#define KSPI2_CONTROL_INIT KSPI2_CONTROL_CLK_DIV_MAX + +#define KSPI2_STATUS_REG 0x08 +#define KSPI2_STATUS_IN_USE 0x01 +#define KSPI2_STATUS_BUSY 0x02 + +#define KSPI2_DATA_REG 0x0c + +#define KSPI2_CS_NR_REG 0x10 +#define KSPI2_CS_NR_NONE 0xff + +#define KSPI2_MODE_BITS (SPI_CPHA | SPI_CPOL) +#define KSPI2_NUM_CS 255 + +#define KSPI2_SPEED_HZ_MIN(kspi) (kspi->base_speed_hz / 65536) +#define KSPI2_SPEED_HZ_MAX(kspi) (kspi->base_speed_hz / 2) + +/* timeout is 10 times the time to transfer one byte at slowest clock */ +#define KSPI2_XFER_TIMEOUT_US(kspi) (USEC_PER_SEC / \ + KSPI2_SPEED_HZ_MIN(kspi) * 8 * 10) + +#define KSPI2_INUSE_SLEEP_US (2 * USEC_PER_MSEC) +#define KSPI2_INUSE_TIMEOUT_US (10 * USEC_PER_SEC) + +struct kspi2 { + struct keba_spi_auxdev *auxdev; + void __iomem *base; + struct spi_controller *host; + + u32 base_speed_hz; /* SPI base clock frequency in HZ */ + u8 control_shadow; + + struct spi_device **device; + int device_size; +}; + +static int kspi2_inuse_lock(struct kspi2 *kspi) +{ + u8 sts; + int ret; + + /* + * The SPI controller has an IN_USE bit for locking access to the + * controller. This enables the use of the SPI controller by other none + * Linux processors. + * + * If the SPI controller is free, then the first read returns + * IN_USE == 0. After that the SPI controller is locked and further + * reads of IN_USE return 1. + * + * The SPI controller is unlocked by writing 1 into IN_USE. + * + * The IN_USE bit acts as a hardware semaphore for the SPI controller. + * Poll for semaphore, but sleep while polling to free the CPU. + */ + ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG, + sts, (sts & KSPI2_STATUS_IN_USE) == 0, + KSPI2_INUSE_SLEEP_US, KSPI2_INUSE_TIMEOUT_US); + if (ret != 0) + dev_warn(&kspi->auxdev->auxdev.dev, "%s err!\n", __func__); + + return ret; +} + +static void kspi2_inuse_unlock(struct kspi2 *kspi) +{ + /* unlock the controller by writing 1 into IN_USE */ + iowrite8(KSPI2_STATUS_IN_USE, kspi->base + KSPI2_STATUS_REG); +} + +static int kspi2_prepare_hardware(struct spi_controller *host) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + + /* lock hardware semaphore before actual use of controller */ + return kspi2_inuse_lock(kspi); +} + +static int kspi2_unprepare_hardware(struct spi_controller *host) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + + /* unlock hardware semaphore after actual use of controller */ + kspi2_inuse_unlock(kspi); + + return 0; +} + +static u8 kspi2_calc_minimal_divider(struct kspi2 *kspi, u32 max_speed_hz) +{ + u8 div; + + /* + * Divider values 2, 4, 8, 16, ..., 65536 are possible. They are coded + * as 0, 1, 2, 3, ..., 15 in the CONTROL_CLK_DIV bit. + */ + for (div = 0; div < KSPI2_CONTROL_CLK_DIV_MAX; div++) { + if ((kspi->base_speed_hz >> (div + 1)) <= max_speed_hz) + return div; + } + + /* return divider for slowest clock if loop fails to find one */ + return KSPI2_CONTROL_CLK_DIV_MAX; +} + +static void kspi2_write_control_reg(struct kspi2 *kspi, u8 val, u8 mask) +{ + /* write control register only when necessary to improve performance */ + if (val != (kspi->control_shadow & mask)) { + kspi->control_shadow = (kspi->control_shadow & ~mask) | val; + iowrite8(kspi->control_shadow, kspi->base + KSPI2_CONTROL_REG); + } +} + +static int kspi2_txrx_byte(struct kspi2 *kspi, u8 tx, u8 *rx) +{ + u8 sts; + int ret; + + /* start transfer by writing TX byte */ + iowrite8(tx, kspi->base + KSPI2_DATA_REG); + + /* wait till finished (BUSY == 0) */ + ret = readb_poll_timeout(kspi->base + KSPI2_STATUS_REG, + sts, (sts & KSPI2_STATUS_BUSY) == 0, + 0, KSPI2_XFER_TIMEOUT_US(kspi)); + if (ret != 0) + return ret; + + /* read RX byte */ + if (rx) + *rx = ioread8(kspi->base + KSPI2_DATA_REG); + + return 0; +} + +static int kspi2_process_transfer(struct kspi2 *kspi, struct spi_transfer *t) +{ + u8 tx = 0; + u8 rx; + int i; + int ret; + + for (i = 0; i < t->len; i++) { + if (t->tx_buf) + tx = ((const u8 *)t->tx_buf)[i]; + + ret = kspi2_txrx_byte(kspi, tx, &rx); + if (ret) + return ret; + + if (t->rx_buf) + ((u8 *)t->rx_buf)[i] = rx; + } + + return 0; +} + +static int kspi2_setup_transfer(struct kspi2 *kspi, + struct spi_device *spi, + struct spi_transfer *t) +{ + u32 max_speed_hz = spi->max_speed_hz; + u8 clk_div; + + /* + * spi_device (spi) has default parameters. Some of these can be + * overwritten by parameters in spi_transfer (t). + */ + if (t->bits_per_word && ((t->bits_per_word % 8) != 0)) { + dev_err(&spi->dev, "Word width %d not supported!\n", + t->bits_per_word); + + return -EINVAL; + } + + if (t->speed_hz && (t->speed_hz < max_speed_hz)) + max_speed_hz = t->speed_hz; + + clk_div = kspi2_calc_minimal_divider(kspi, max_speed_hz); + kspi2_write_control_reg(kspi, clk_div, KSPI2_CONTROL_CLK_DIV_MASK); + + return 0; +} + +static int kspi2_transfer_one(struct spi_controller *host, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + int ret; + + ret = kspi2_setup_transfer(kspi, spi, t); + if (ret != 0) + return ret; + + if (t->len) { + ret = kspi2_process_transfer(kspi, t); + if (ret != 0) + return ret; + } + + return 0; +} + +static void kspi2_set_cs(struct spi_device *spi, bool enable) +{ + struct spi_controller *host = spi->controller; + struct kspi2 *kspi = spi_controller_get_devdata(host); + + /* controller is using active low chip select signals by design */ + if (!enable) + iowrite8(spi_get_chipselect(spi, 0), kspi->base + KSPI2_CS_NR_REG); + else + iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG); +} + +static int kspi2_prepare_message(struct spi_controller *host, + struct spi_message *msg) +{ + struct kspi2 *kspi = spi_controller_get_devdata(host); + struct spi_device *spi = msg->spi; + u8 mode = 0; + + /* setup SPI clock phase and polarity */ + if (spi->mode & SPI_CPHA) + mode |= KSPI2_CONTROL_CPHA; + if (spi->mode & SPI_CPOL) + mode |= KSPI2_CONTROL_CPOL; + kspi2_write_control_reg(kspi, mode, KSPI2_CONTROL_CLK_MODE_MASK); + + return 0; +} + +static int kspi2_setup(struct spi_device *spi) +{ + struct kspi2 *kspi = spi_controller_get_devdata(spi->controller); + + /* + * Check only parameters. Actual setup is done in kspi2_prepare_message + * and directly before the SPI transfer starts. + */ + + if (spi->mode & ~KSPI2_MODE_BITS) { + dev_err(&spi->dev, "Mode %d not supported!\n", spi->mode); + + return -EINVAL; + } + + if ((spi->bits_per_word % 8) != 0) { + dev_err(&spi->dev, "Word width %d not supported!\n", + spi->bits_per_word); + + return -EINVAL; + } + + if ((spi->max_speed_hz == 0) || + (spi->max_speed_hz > KSPI2_SPEED_HZ_MAX(kspi))) + spi->max_speed_hz = KSPI2_SPEED_HZ_MAX(kspi); + + if (spi->max_speed_hz < KSPI2_SPEED_HZ_MIN(kspi)) { + dev_err(&spi->dev, "Requested speed of %d Hz is too low!\n", + spi->max_speed_hz); + + return -EINVAL; + } + + return 0; +} + +static void kspi2_unregister_devices(struct kspi2 *kspi) +{ + int i; + + for (i = 0; i < kspi->device_size; i++) { + struct spi_device *device = kspi->device[i]; + + if (device) + spi_unregister_device(device); + } +} + +static int kspi2_register_devices(struct kspi2 *kspi) +{ + struct spi_board_info *info = kspi->auxdev->info; + int i; + + /* register all known SPI devices */ + for (i = 0; i < kspi->auxdev->info_size; i++) { + struct spi_device *device = spi_new_device(kspi->host, &info[i]); + + if (!device) { + kspi2_unregister_devices(kspi); + + return -ENODEV; + } + kspi->device[i] = device; + } + + return 0; +} + +static void kspi2_init(struct kspi2 *kspi) +{ + iowrite8(KSPI2_CONTROL_INIT, kspi->base + KSPI2_CONTROL_REG); + kspi->control_shadow = KSPI2_CONTROL_INIT; + + iowrite8(KSPI2_CS_NR_NONE, kspi->base + KSPI2_CS_NR_REG); +} + +static int kspi2_probe(struct auxiliary_device *auxdev, + const struct auxiliary_device_id *id) +{ + struct device *dev = &auxdev->dev; + struct spi_controller *host; + struct kspi2 *kspi; + u8 clk_reg; + int ret; + + host = devm_spi_alloc_host(dev, sizeof(struct kspi2)); + if (!host) + return -ENOMEM; + kspi = spi_controller_get_devdata(host); + kspi->auxdev = container_of(auxdev, struct keba_spi_auxdev, auxdev); + kspi->host = host; + kspi->device = devm_kcalloc(dev, kspi->auxdev->info_size, + sizeof(*kspi->device), GFP_KERNEL); + if (!kspi->device) + return -ENOMEM; + kspi->device_size = kspi->auxdev->info_size; + auxiliary_set_drvdata(auxdev, kspi); + + kspi->base = devm_ioremap_resource(dev, &kspi->auxdev->io); + if (IS_ERR(kspi->base)) + return PTR_ERR(kspi->base); + + /* read the SPI base clock frequency */ + clk_reg = ioread8(kspi->base + KSPI2_CLK_FREQ_REG); + switch (clk_reg & KSPI2_CLK_FREQ_MASK) { + case KSPI2_CLK_FREQ_62_5M: + kspi->base_speed_hz = 62500000; break; + case KSPI2_CLK_FREQ_33_3M: + kspi->base_speed_hz = 33333333; break; + case KSPI2_CLK_FREQ_125M: + kspi->base_speed_hz = 125000000; break; + case KSPI2_CLK_FREQ_50M: + kspi->base_speed_hz = 50000000; break; + case KSPI2_CLK_FREQ_100M: + kspi->base_speed_hz = 100000000; break; + default: + dev_err(dev, "Undefined SPI base clock frequency!\n"); + return -ENODEV; + } + + kspi2_init(kspi); + + host->bus_num = -1; + host->num_chipselect = KSPI2_NUM_CS; + host->mode_bits = KSPI2_MODE_BITS; + host->setup = kspi2_setup; + host->prepare_transfer_hardware = kspi2_prepare_hardware; + host->unprepare_transfer_hardware = kspi2_unprepare_hardware; + host->prepare_message = kspi2_prepare_message; + host->set_cs = kspi2_set_cs; + host->transfer_one = kspi2_transfer_one; + ret = devm_spi_register_controller(dev, host); + if (ret) { + dev_err(dev, "Failed to register host (%d)!\n", ret); + return ret; + } + + ret = kspi2_register_devices(kspi); + if (ret) { + dev_err(dev, "Failed to register devices (%d)!\n", ret); + return ret; + } + + return 0; +} + +static void kspi2_remove(struct auxiliary_device *auxdev) +{ + struct kspi2 *kspi = auxiliary_get_drvdata(auxdev); + + kspi2_unregister_devices(kspi); +} + +static const struct auxiliary_device_id kspi2_devtype_aux[] = { + { .name = "keba.spi" }, + { }, +}; +MODULE_DEVICE_TABLE(auxiliary, kspi2_devtype_aux); + +static struct auxiliary_driver kspi2_driver_aux = { + .name = KSPI2, + .id_table = kspi2_devtype_aux, + .probe = kspi2_probe, + .remove = kspi2_remove, +}; +module_auxiliary_driver(kspi2_driver_aux); + +MODULE_AUTHOR("Gerhard Engleder <eg@keba.com>"); +MODULE_DESCRIPTION("KEBA SPI host controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 31a878d9458d..7740f94847a8 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -420,7 +420,7 @@ MODULE_LICENSE("GPL"); static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) { /* limit the hex_dump */ - if (len < 1024) { + if (len <= 1024) { print_hex_dump(KERN_INFO, pre, DUMP_PREFIX_OFFSET, 16, 1, ptr, len, 0); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index abc6792e738c..5db0639d3b01 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -187,6 +187,16 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, return false; } + if (op->max_freq && mem->spi->controller->min_speed_hz && + op->max_freq < mem->spi->controller->min_speed_hz) + return false; + + if (op->max_freq && + op->max_freq < mem->spi->max_speed_hz) { + if (!spi_mem_controller_is_capable(ctlr, per_op_freq)) + return false; + } + return spi_mem_check_buswidth(mem, op); } EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); @@ -364,6 +374,20 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) u8 *tmpbuf; int ret; + /* Make sure the operation frequency is correct before going futher */ + spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op); + + dev_vdbg(&mem->spi->dev, "[cmd: 0x%02x][%dB addr: %#8llx][%2dB dummy][%4dB data %s] %d%c-%d%c-%d%c-%d%c @ %uHz\n", + op->cmd.opcode, + op->addr.nbytes, (op->addr.nbytes ? op->addr.val : 0), + op->dummy.nbytes, + op->data.nbytes, (op->data.nbytes ? (op->data.dir == SPI_MEM_DATA_IN ? " read" : "write") : " "), + op->cmd.buswidth, op->cmd.dtr ? 'D' : 'S', + op->addr.buswidth, op->addr.dtr ? 'D' : 'S', + op->dummy.buswidth, op->dummy.dtr ? 'D' : 'S', + op->data.buswidth, op->data.dtr ? 'D' : 'S', + op->max_freq ? op->max_freq : mem->spi->max_speed_hz); + ret = spi_mem_check_op(op); if (ret) return ret; @@ -410,6 +434,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) xfers[xferpos].tx_buf = tmpbuf; xfers[xferpos].len = op->cmd.nbytes; xfers[xferpos].tx_nbits = op->cmd.buswidth; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen++; @@ -424,6 +449,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) xfers[xferpos].tx_buf = tmpbuf + 1; xfers[xferpos].len = op->addr.nbytes; xfers[xferpos].tx_nbits = op->addr.buswidth; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen += op->addr.nbytes; @@ -435,6 +461,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) xfers[xferpos].len = op->dummy.nbytes; xfers[xferpos].tx_nbits = op->dummy.buswidth; xfers[xferpos].dummy_data = 1; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen += op->dummy.nbytes; @@ -450,6 +477,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) } xfers[xferpos].len = op->data.nbytes; + xfers[xferpos].speed_hz = op->max_freq; spi_message_add_tail(&xfers[xferpos], &msg); xferpos++; totalxferlen += op->data.nbytes; @@ -528,6 +556,57 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) } EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); +/** + * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to + * match controller, PCB and chip limitations + * @mem: the SPI memory + * @op: the operation to adjust + * + * Some chips have per-op frequency limitations and must adapt the maximum + * speed. This function allows SPI mem drivers to set @op->max_freq to the + * maximum supported value. + */ +void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op) +{ + if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz) + op->max_freq = mem->spi->max_speed_hz; +} +EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq); + +/** + * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an + * operation. This helps finding the best variant + * among a list of possible choices. + * @op: the operation to benchmark + * + * Some chips have per-op frequency limitations, PCBs usually have their own + * limitations as well, and controllers can support dual, quad or even octal + * modes, sometimes in DTR. All these combinations make it impossible to + * statically list the best combination for all situations. If we want something + * accurate, all these combinations should be rated (eg. with a time estimate) + * and the best pick should be taken based on these calculations. + * + * Returns a ns estimate for the time this op would take. + */ +u64 spi_mem_calc_op_duration(struct spi_mem_op *op) +{ + u64 ncycles = 0; + u32 ns_per_cycles; + + ns_per_cycles = 1000000000 / op->max_freq; + ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1); + ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1); + + /* Dummy bytes are optional for some SPI flash memory operations */ + if (op->dummy.nbytes) + ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + + ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1); + + return ncycles * ns_per_cycles; +} +EXPORT_SYMBOL_GPL(spi_mem_calc_op_duration); + static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf) { diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c index ad2b5ffa6153..fa828fcaaef2 100644 --- a/drivers/spi/spi-microchip-core-qspi.c +++ b/drivers/spi/spi-microchip-core-qspi.c @@ -265,7 +265,8 @@ static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id) return ret; } -static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi) +static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi, + const struct spi_mem_op *op) { unsigned long clk_hz; u32 control, baud_rate_val = 0; @@ -274,11 +275,11 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi if (!clk_hz) return -EINVAL; - baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * spi->max_speed_hz); + baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) { dev_err(&spi->dev, "could not configure the clock for spi clock %d Hz & system clock %ld Hz\n", - spi->max_speed_hz, clk_hz); + op->max_freq, clk_hz); return -EINVAL; } @@ -399,7 +400,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o if (err) goto error; - err = mchp_coreqspi_setup_clock(qspi, mem->spi); + err = mchp_coreqspi_setup_clock(qspi, mem->spi, op); if (err) goto error; @@ -457,6 +458,10 @@ error: static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { + struct mchp_coreqspi *qspi = spi_controller_get_devdata(mem->spi->controller); + unsigned long clk_hz; + u32 baud_rate_val; + if (!spi_mem_default_supports_op(mem, op)) return false; @@ -479,6 +484,14 @@ static bool mchp_coreqspi_supports_op(struct spi_mem *mem, const struct spi_mem_ return false; } + clk_hz = clk_get_rate(qspi->clk); + if (!clk_hz) + return false; + + baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); + if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) + return false; + return true; } @@ -498,6 +511,10 @@ static const struct spi_controller_mem_ops mchp_coreqspi_mem_ops = { .exec_op = mchp_coreqspi_exec_op, }; +static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = { + .per_op_freq = true, +}; + static int mchp_coreqspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -540,6 +557,7 @@ static int mchp_coreqspi_probe(struct platform_device *pdev) ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->mem_ops = &mchp_coreqspi_mem_ops; + ctlr->mem_caps = &mchp_coreqspi_mem_caps; ctlr->setup = mchp_coreqspi_setup_op; ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c index 5b6af55855ef..62ba0bd9cbb7 100644 --- a/drivers/spi/spi-microchip-core.c +++ b/drivers/spi/spi-microchip-core.c @@ -70,8 +70,7 @@ #define INT_RX_CHANNEL_OVERFLOW BIT(2) #define INT_TX_CHANNEL_UNDERRUN BIT(3) -#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \ - CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) +#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) #define REG_CONTROL (0x00) #define REG_FRAME_SIZE (0x04) @@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi) mchp_corespi_write(spi, REG_CONTROL, control); } -static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) +static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max) { - while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) { - u32 data = mchp_corespi_read(spi, REG_RX_DATA); + for (int i = 0; i < fifo_max; i++) { + u32 data; + + while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY) + ; + + data = mchp_corespi_read(spi, REG_RX_DATA); spi->rx_len -= spi->n_bytes; @@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) mchp_corespi_write(spi, REG_FRAMESUP, len); } -static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) +static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max) { - int fifo_max, i = 0; + int i = 0; - fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes); mchp_corespi_set_xfer_size(spi, fifo_max); while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) { @@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) if (intfield == 0) return IRQ_NONE; - if (intfield & INT_TXDONE) - mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); - - if (intfield & INT_RXRDY) { - mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); - - if (spi->rx_len) - mchp_corespi_read_fifo(spi); - } - - if (!spi->rx_len && !spi->tx_len) - finalise = true; - if (intfield & INT_RX_CHANNEL_OVERFLOW) { mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); finalise = true; @@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host, mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select); - while (spi->tx_len) - mchp_corespi_write_fifo(spi); + while (spi->tx_len) { + int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes); + + mchp_corespi_write_fifo(spi, fifo_max); + mchp_corespi_read_fifo(spi, fifo_max); + } + spi_finalize_current_transfer(host); return 1; } diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 85f3bafc975d..4b0a1c0db041 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -20,6 +20,7 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> #include <linux/dma-mapping.h> +#include <linux/pm_qos.h> #define SPI_CFG0_REG 0x0000 #define SPI_CFG1_REG 0x0004 @@ -147,6 +148,7 @@ struct mtk_spi_compatible { * @tx_sgl_len: Size of TX DMA transfer * @rx_sgl_len: Size of RX DMA transfer * @dev_comp: Device data structure + * @qos_request: QoS request * @spi_clk_hz: Current SPI clock in Hz * @spimem_done: SPI-MEM operation completion * @use_spimem: Enables SPI-MEM @@ -166,6 +168,7 @@ struct mtk_spi { struct scatterlist *tx_sgl, *rx_sgl; u32 tx_sgl_len, rx_sgl_len; const struct mtk_spi_compatible *dev_comp; + struct pm_qos_request qos_request; u32 spi_clk_hz; struct completion spimem_done; bool use_spimem; @@ -356,6 +359,7 @@ static int mtk_spi_hw_init(struct spi_controller *host, struct mtk_chip_config *chip_config = spi->controller_data; struct mtk_spi *mdata = spi_controller_get_devdata(host); + cpu_latency_qos_update_request(&mdata->qos_request, 500); cpha = spi->mode & SPI_CPHA ? 1 : 0; cpol = spi->mode & SPI_CPOL ? 1 : 0; @@ -459,6 +463,15 @@ static int mtk_spi_prepare_message(struct spi_controller *host, return mtk_spi_hw_init(host, msg->spi); } +static int mtk_spi_unprepare_message(struct spi_controller *host, + struct spi_message *message) +{ + struct mtk_spi *mdata = spi_controller_get_devdata(host); + + cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE); + return 0; +} + static void mtk_spi_set_cs(struct spi_device *spi, bool enable) { u32 reg_val; @@ -961,7 +974,7 @@ static int mtk_spi_mem_exec_op(struct spi_mem *mem, mtk_spi_reset(mdata); mtk_spi_hw_init(mem->spi->controller, mem->spi); - mtk_spi_prepare_transfer(mem->spi->controller, mem->spi->max_speed_hz); + mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq); reg_val = readl(mdata->base + SPI_CFG3_IPM_REG); /* opcode byte len */ @@ -1122,6 +1135,10 @@ static const struct spi_controller_mem_ops mtk_spi_mem_ops = { .exec_op = mtk_spi_mem_exec_op, }; +static const struct spi_controller_mem_caps mtk_spi_mem_caps = { + .per_op_freq = true, +}; + static int mtk_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1139,6 +1156,7 @@ static int mtk_spi_probe(struct platform_device *pdev) host->set_cs = mtk_spi_set_cs; host->prepare_message = mtk_spi_prepare_message; + host->unprepare_message = mtk_spi_unprepare_message; host->transfer_one = mtk_spi_transfer_one; host->can_dma = mtk_spi_can_dma; host->setup = mtk_spi_setup; @@ -1160,6 +1178,7 @@ static int mtk_spi_probe(struct platform_device *pdev) if (mdata->dev_comp->ipm_design) { mdata->dev = dev; host->mem_ops = &mtk_spi_mem_ops; + host->mem_caps = &mtk_spi_mem_caps; init_completion(&mdata->spimem_done); } @@ -1244,6 +1263,8 @@ static int mtk_spi_probe(struct platform_device *pdev) clk_disable_unprepare(mdata->spi_hclk); } + cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE); + if (mdata->dev_comp->need_pad_sel) { if (mdata->pad_num != host->num_chipselect) return dev_err_probe(dev, -EINVAL, @@ -1287,6 +1308,7 @@ static void mtk_spi_remove(struct platform_device *pdev) struct mtk_spi *mdata = spi_controller_get_devdata(host); int ret; + cpu_latency_qos_remove_request(&mdata->qos_request); if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) complete(&mdata->spimem_done); diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c index fdbea9dffb62..e82ee6dcf498 100644 --- a/drivers/spi/spi-mtk-snfi.c +++ b/drivers/spi/spi-mtk-snfi.c @@ -1284,9 +1284,6 @@ static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller); - dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, - op->addr.val, op->addr.buswidth, op->addr.nbytes, - op->data.buswidth, op->data.nbytes); if (mtk_snand_is_page_ops(op)) { if (op->data.dir == SPI_MEM_DATA_IN) return mtk_snand_read_page_cache(ms, op); diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index c02c4204442f..0eb35c4e3987 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi) priv->current_cs = spi_get_chipselect(spi, 0); - spi_setup(priv->spi); - - return 0; + return spi_setup(priv->spi); } static int spi_mux_setup(struct spi_device *spi) diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c index 809767d3145c..eeaea6a5e310 100644 --- a/drivers/spi/spi-mxic.c +++ b/drivers/spi/spi-mxic.c @@ -522,7 +522,7 @@ static int mxic_spi_mem_exec_op(struct spi_mem *mem, int i, ret; u8 addr[8], cmd[2]; - ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz); + ret = mxic_spi_set_freq(mxic, op->max_freq); if (ret) return ret; @@ -582,6 +582,7 @@ static const struct spi_controller_mem_caps mxic_spi_mem_caps = { .dtr = true, .ecc = true, .swap16 = true, + .per_op_freq = true, }; static void mxic_spi_set_cs(struct spi_device *spi, bool lvl) diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index e6d955d964f4..43455305fdf4 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -381,6 +381,8 @@ static int mxs_spi_transfer_one(struct spi_controller *host, if (status) break; + t->effective_speed_hz = ssp->clk_rate; + /* De-assert on last transfer, inverted by cs_change flag */ flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ? TXRX_DEASSERT_CS : 0; diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index 958bab27a081..67cc1d86de42 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -550,11 +550,6 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) int ret = 0; u8 *buf; - dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth, op->addr.val, - op->data.nbytes); - if (fiu->spix_mode || op->addr.nbytes > 4) return -EOPNOTSUPP; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index 1161b9e5a4dc..bad6b30bab0e 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -705,9 +705,10 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f) * Value for rest of the CS FLSHxxCR0 register would be zero. * */ -static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi) +static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi, + const struct spi_mem_op *op) { - unsigned long rate = spi->max_speed_hz; + unsigned long rate = op->max_freq; int ret; uint64_t size_kb; @@ -931,7 +932,7 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true); WARN_ON(err); - nxp_fspi_select_mem(f, mem->spi); + nxp_fspi_select_mem(f, mem->spi, op); nxp_fspi_prepare_lut(f, op); /* @@ -1149,6 +1150,10 @@ static const struct spi_controller_mem_ops nxp_fspi_mem_ops = { .get_name = nxp_fspi_get_name, }; +static const struct spi_controller_mem_caps nxp_fspi_mem_caps = { + .per_op_freq = true, +}; + static int nxp_fspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -1246,6 +1251,7 @@ static int nxp_fspi_probe(struct platform_device *pdev) ctlr->bus_num = -1; ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; ctlr->mem_ops = &nxp_fspi_mem_ops; + ctlr->mem_caps = &nxp_fspi_mem_caps; nxp_fspi_default_setup(f); diff --git a/drivers/spi/spi-offload-trigger-pwm.c b/drivers/spi/spi-offload-trigger-pwm.c new file mode 100644 index 000000000000..805ed41560df --- /dev/null +++ b/drivers/spi/spi-offload-trigger-pwm.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Analog Devices Inc. + * Copyright (C) 2024 BayLibre, SAS + * + * Generic PWM trigger for SPI offload. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/math.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/pwm.h> +#include <linux/spi/offload/provider.h> +#include <linux/spi/offload/types.h> +#include <linux/time.h> +#include <linux/types.h> + +struct spi_offload_trigger_pwm_state { + struct device *dev; + struct pwm_device *pwm; +}; + +static bool spi_offload_trigger_pwm_match(struct spi_offload_trigger *trigger, + enum spi_offload_trigger_type type, + u64 *args, u32 nargs) +{ + if (nargs) + return false; + + return type == SPI_OFFLOAD_TRIGGER_PERIODIC; +} + +static int spi_offload_trigger_pwm_validate(struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger); + struct spi_offload_trigger_periodic *periodic = &config->periodic; + struct pwm_waveform wf = { }; + int ret; + + if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC) + return -EINVAL; + + if (!periodic->frequency_hz) + return -EINVAL; + + wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz); + /* REVISIT: 50% duty-cycle for now - may add config parameter later */ + wf.duty_length_ns = wf.period_length_ns / 2; + + ret = pwm_round_waveform_might_sleep(st->pwm, &wf); + if (ret < 0) + return ret; + + periodic->frequency_hz = DIV_ROUND_UP_ULL(NSEC_PER_SEC, wf.period_length_ns); + + return 0; +} + +static int spi_offload_trigger_pwm_enable(struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger); + struct spi_offload_trigger_periodic *periodic = &config->periodic; + struct pwm_waveform wf = { }; + + if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC) + return -EINVAL; + + if (!periodic->frequency_hz) + return -EINVAL; + + wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz); + /* REVISIT: 50% duty-cycle for now - may add config parameter later */ + wf.duty_length_ns = wf.period_length_ns / 2; + + return pwm_set_waveform_might_sleep(st->pwm, &wf, false); +} + +static void spi_offload_trigger_pwm_disable(struct spi_offload_trigger *trigger) +{ + struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger); + struct pwm_waveform wf; + int ret; + + ret = pwm_get_waveform_might_sleep(st->pwm, &wf); + if (ret < 0) { + dev_err(st->dev, "failed to get waveform: %d\n", ret); + return; + } + + wf.duty_length_ns = 0; + + ret = pwm_set_waveform_might_sleep(st->pwm, &wf, false); + if (ret < 0) + dev_err(st->dev, "failed to disable PWM: %d\n", ret); +} + +static const struct spi_offload_trigger_ops spi_offload_trigger_pwm_ops = { + .match = spi_offload_trigger_pwm_match, + .validate = spi_offload_trigger_pwm_validate, + .enable = spi_offload_trigger_pwm_enable, + .disable = spi_offload_trigger_pwm_disable, +}; + +static void spi_offload_trigger_pwm_release(void *data) +{ + pwm_disable(data); +} + +static int spi_offload_trigger_pwm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_offload_trigger_info info = { + .fwnode = dev_fwnode(dev), + .ops = &spi_offload_trigger_pwm_ops, + }; + struct spi_offload_trigger_pwm_state *st; + struct pwm_state state; + int ret; + + st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + info.priv = st; + st->dev = dev; + + st->pwm = devm_pwm_get(dev, NULL); + if (IS_ERR(st->pwm)) + return dev_err_probe(dev, PTR_ERR(st->pwm), "failed to get PWM\n"); + + /* init with duty_cycle = 0, output enabled to ensure trigger off */ + pwm_init_state(st->pwm, &state); + state.enabled = true; + + ret = pwm_apply_might_sleep(st->pwm, &state); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to apply PWM state\n"); + + ret = devm_add_action_or_reset(dev, spi_offload_trigger_pwm_release, st->pwm); + if (ret) + return ret; + + return devm_spi_offload_trigger_register(dev, &info); +} + +static const struct of_device_id spi_offload_trigger_pwm_of_match_table[] = { + { .compatible = "pwm-trigger" }, + { } +}; +MODULE_DEVICE_TABLE(of, spi_offload_trigger_pwm_of_match_table); + +static struct platform_driver spi_offload_trigger_pwm_driver = { + .driver = { + .name = "pwm-trigger", + .of_match_table = spi_offload_trigger_pwm_of_match_table, + }, + .probe = spi_offload_trigger_pwm_probe, +}; +module_platform_driver(spi_offload_trigger_pwm_driver); + +MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>"); +MODULE_DESCRIPTION("Generic PWM trigger"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c new file mode 100644 index 000000000000..6bad042fe437 --- /dev/null +++ b/drivers/spi/spi-offload.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Analog Devices Inc. + * Copyright (C) 2024 BayLibre, SAS + */ + +/* + * SPI Offloading support. + * + * Some SPI controllers support offloading of SPI transfers. Essentially, this + * is the ability for a SPI controller to perform SPI transfers with minimal + * or even no CPU intervention, e.g. via a specialized SPI controller with a + * hardware trigger or via a conventional SPI controller using a non-Linux MCU + * processor core to offload the work. + */ + +#define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD" + +#include <linux/cleanup.h> +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/export.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/property.h> +#include <linux/spi/offload/consumer.h> +#include <linux/spi/offload/provider.h> +#include <linux/spi/offload/types.h> +#include <linux/spi/spi.h> +#include <linux/types.h> + +struct spi_controller_and_offload { + struct spi_controller *controller; + struct spi_offload *offload; +}; + +struct spi_offload_trigger { + struct list_head list; + struct kref ref; + struct fwnode_handle *fwnode; + /* synchronizes calling ops and driver registration */ + struct mutex lock; + /* + * If the provider goes away while the consumer still has a reference, + * ops and priv will be set to NULL and all calls will fail with -ENODEV. + */ + const struct spi_offload_trigger_ops *ops; + void *priv; +}; + +static LIST_HEAD(spi_offload_triggers); +static DEFINE_MUTEX(spi_offload_triggers_lock); + +/** + * devm_spi_offload_alloc() - Allocate offload instance + * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev + * @priv_size: Size of private data to allocate + * + * Offload providers should use this to allocate offload instances. + * + * Return: Pointer to new offload instance or error on failure. + */ +struct spi_offload *devm_spi_offload_alloc(struct device *dev, + size_t priv_size) +{ + struct spi_offload *offload; + void *priv; + + offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL); + if (!offload) + return ERR_PTR(-ENOMEM); + + priv = devm_kzalloc(dev, priv_size, GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + offload->provider_dev = dev; + offload->priv = priv; + + return offload; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_alloc); + +static void spi_offload_put(void *data) +{ + struct spi_controller_and_offload *resource = data; + + resource->controller->put_offload(resource->offload); + kfree(resource); +} + +/** + * devm_spi_offload_get() - Get an offload instance + * @dev: Device for devm purposes + * @spi: SPI device to use for the transfers + * @config: Offload configuration + * + * Peripheral drivers call this function to get an offload instance that meets + * the requirements specified in @config. If no suitable offload instance is + * available, -ENODEV is returned. + * + * Return: Offload instance or error on failure. + */ +struct spi_offload *devm_spi_offload_get(struct device *dev, + struct spi_device *spi, + const struct spi_offload_config *config) +{ + struct spi_controller_and_offload *resource; + struct spi_offload *offload; + int ret; + + if (!spi || !config) + return ERR_PTR(-EINVAL); + + if (!spi->controller->get_offload) + return ERR_PTR(-ENODEV); + + resource = kzalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + return ERR_PTR(-ENOMEM); + + offload = spi->controller->get_offload(spi, config); + if (IS_ERR(offload)) { + kfree(resource); + return offload; + } + + resource->controller = spi->controller; + resource->offload = offload; + + ret = devm_add_action_or_reset(dev, spi_offload_put, resource); + if (ret) + return ERR_PTR(ret); + + return offload; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_get); + +static void spi_offload_trigger_free(struct kref *ref) +{ + struct spi_offload_trigger *trigger = + container_of(ref, struct spi_offload_trigger, ref); + + mutex_destroy(&trigger->lock); + fwnode_handle_put(trigger->fwnode); + kfree(trigger); +} + +static void spi_offload_trigger_put(void *data) +{ + struct spi_offload_trigger *trigger = data; + + scoped_guard(mutex, &trigger->lock) + if (trigger->ops && trigger->ops->release) + trigger->ops->release(trigger); + + kref_put(&trigger->ref, spi_offload_trigger_free); +} + +static struct spi_offload_trigger +*spi_offload_trigger_get(enum spi_offload_trigger_type type, + struct fwnode_reference_args *args) +{ + struct spi_offload_trigger *trigger; + bool match = false; + int ret; + + guard(mutex)(&spi_offload_triggers_lock); + + list_for_each_entry(trigger, &spi_offload_triggers, list) { + if (trigger->fwnode != args->fwnode) + continue; + + match = trigger->ops->match(trigger, type, args->args, args->nargs); + if (match) + break; + } + + if (!match) + return ERR_PTR(-EPROBE_DEFER); + + guard(mutex)(&trigger->lock); + + if (!trigger->ops) + return ERR_PTR(-ENODEV); + + if (trigger->ops->request) { + ret = trigger->ops->request(trigger, type, args->args, args->nargs); + if (ret) + return ERR_PTR(ret); + } + + kref_get(&trigger->ref); + + return trigger; +} + +/** + * devm_spi_offload_trigger_get() - Get an offload trigger instance + * @dev: Device for devm purposes. + * @offload: Offload instance connected to a trigger. + * @type: Trigger type to get. + * + * Return: Offload trigger instance or error on failure. + */ +struct spi_offload_trigger +*devm_spi_offload_trigger_get(struct device *dev, + struct spi_offload *offload, + enum spi_offload_trigger_type type) +{ + struct spi_offload_trigger *trigger; + struct fwnode_reference_args args; + int ret; + + ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev), + "trigger-sources", + "#trigger-source-cells", 0, 0, + &args); + if (ret) + return ERR_PTR(ret); + + trigger = spi_offload_trigger_get(type, &args); + fwnode_handle_put(args.fwnode); + if (IS_ERR(trigger)) + return trigger; + + ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger); + if (ret) + return ERR_PTR(ret); + + return trigger; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get); + +/** + * spi_offload_trigger_validate - Validate the requested trigger + * @trigger: Offload trigger instance + * @config: Trigger config to validate + * + * On success, @config may be modifed to reflect what the hardware can do. + * For example, the frequency of a periodic trigger may be adjusted to the + * nearest supported value. + * + * Callers will likely need to do additional validation of the modified trigger + * parameters. + * + * Return: 0 on success, negative error code on failure. + */ +int spi_offload_trigger_validate(struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + guard(mutex)(&trigger->lock); + + if (!trigger->ops) + return -ENODEV; + + if (!trigger->ops->validate) + return -EOPNOTSUPP; + + return trigger->ops->validate(trigger, config); +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_validate); + +/** + * spi_offload_trigger_enable - enables trigger for offload + * @offload: Offload instance + * @trigger: Offload trigger instance + * @config: Trigger config to validate + * + * There must be a prepared offload instance with the specified ID (i.e. + * spi_optimize_message() was called with the same offload assigned to the + * message). This will also reserve the bus for exclusive use by the offload + * instance until the trigger is disabled. Any other attempts to send a + * transfer or lock the bus will fail with -EBUSY during this time. + * + * Calls must be balanced with spi_offload_trigger_disable(). + * + * Context: can sleep + * Return: 0 on success, else a negative error code. + */ +int spi_offload_trigger_enable(struct spi_offload *offload, + struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + int ret; + + guard(mutex)(&trigger->lock); + + if (!trigger->ops) + return -ENODEV; + + if (offload->ops && offload->ops->trigger_enable) { + ret = offload->ops->trigger_enable(offload); + if (ret) + return ret; + } + + if (trigger->ops->enable) { + ret = trigger->ops->enable(trigger, config); + if (ret) { + if (offload->ops->trigger_disable) + offload->ops->trigger_disable(offload); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_enable); + +/** + * spi_offload_trigger_disable - disables hardware trigger for offload + * @offload: Offload instance + * @trigger: Offload trigger instance + * + * Disables the hardware trigger for the offload instance with the specified ID + * and releases the bus for use by other clients. + * + * Context: can sleep + */ +void spi_offload_trigger_disable(struct spi_offload *offload, + struct spi_offload_trigger *trigger) +{ + if (offload->ops && offload->ops->trigger_disable) + offload->ops->trigger_disable(offload); + + guard(mutex)(&trigger->lock); + + if (!trigger->ops) + return; + + if (trigger->ops->disable) + trigger->ops->disable(trigger); +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_disable); + +static void spi_offload_release_dma_chan(void *chan) +{ + dma_release_channel(chan); +} + +/** + * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream + * @dev: Device for devm purposes. + * @offload: Offload instance + * + * This is the DMA channel that will provide data to transfers that use the + * %SPI_OFFLOAD_XFER_TX_STREAM offload flag. + * + * Return: Pointer to DMA channel info, or negative error code + */ +struct dma_chan +*devm_spi_offload_tx_stream_request_dma_chan(struct device *dev, + struct spi_offload *offload) +{ + struct dma_chan *chan; + int ret; + + if (!offload->ops || !offload->ops->tx_stream_request_dma_chan) + return ERR_PTR(-EOPNOTSUPP); + + chan = offload->ops->tx_stream_request_dma_chan(offload); + if (IS_ERR(chan)) + return chan; + + ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan); + if (ret) + return ERR_PTR(ret); + + return chan; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan); + +/** + * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream + * @dev: Device for devm purposes. + * @offload: Offload instance + * + * This is the DMA channel that will receive data from transfers that use the + * %SPI_OFFLOAD_XFER_RX_STREAM offload flag. + * + * Return: Pointer to DMA channel info, or negative error code + */ +struct dma_chan +*devm_spi_offload_rx_stream_request_dma_chan(struct device *dev, + struct spi_offload *offload) +{ + struct dma_chan *chan; + int ret; + + if (!offload->ops || !offload->ops->rx_stream_request_dma_chan) + return ERR_PTR(-EOPNOTSUPP); + + chan = offload->ops->rx_stream_request_dma_chan(offload); + if (IS_ERR(chan)) + return chan; + + ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan); + if (ret) + return ERR_PTR(ret); + + return chan; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan); + +/* Triggers providers */ + +static void spi_offload_trigger_unregister(void *data) +{ + struct spi_offload_trigger *trigger = data; + + scoped_guard(mutex, &spi_offload_triggers_lock) + list_del(&trigger->list); + + scoped_guard(mutex, &trigger->lock) { + trigger->priv = NULL; + trigger->ops = NULL; + } + + kref_put(&trigger->ref, spi_offload_trigger_free); +} + +/** + * devm_spi_offload_trigger_register() - Allocate and register an offload trigger + * @dev: Device for devm purposes. + * @info: Provider-specific trigger info. + * + * Return: 0 on success, else a negative error code. + */ +int devm_spi_offload_trigger_register(struct device *dev, + struct spi_offload_trigger_info *info) +{ + struct spi_offload_trigger *trigger; + + if (!info->fwnode || !info->ops) + return -EINVAL; + + trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); + if (!trigger) + return -ENOMEM; + + kref_init(&trigger->ref); + mutex_init(&trigger->lock); + trigger->fwnode = fwnode_handle_get(info->fwnode); + trigger->ops = info->ops; + trigger->priv = info->priv; + + scoped_guard(mutex, &spi_offload_triggers_lock) + list_add_tail(&trigger->list, &spi_offload_triggers); + + return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger); +} +EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register); + +/** + * spi_offload_trigger_get_priv() - Get the private data for the trigger + * + * @trigger: Offload trigger instance. + * + * Return: Private data for the trigger. + */ +void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger) +{ + return trigger->priv; +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 29c616e2c408..70bb74b3bd9c 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -134,6 +134,7 @@ struct omap2_mcspi { size_t max_xfer_len; u32 ref_clk_hz; bool use_multi_mode; + bool last_msg_kept_cs; }; struct omap2_mcspi_cs { @@ -1269,6 +1270,10 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, * multi-mode is applicable. */ mcspi->use_multi_mode = true; + + if (mcspi->last_msg_kept_cs) + mcspi->use_multi_mode = false; + list_for_each_entry(tr, &msg->transfers, transfer_list) { if (!tr->bits_per_word) bits_per_word = msg->spi->bits_per_word; @@ -1287,18 +1292,19 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, mcspi->use_multi_mode = false; } - /* Check if transfer asks to change the CS status after the transfer */ - if (!tr->cs_change) - mcspi->use_multi_mode = false; - - /* - * If at least one message is not compatible, switch back to single mode - * - * The bits_per_word of certain transfer can be different, but it will have no - * impact on the signal itself. - */ - if (!mcspi->use_multi_mode) - break; + if (list_is_last(&tr->transfer_list, &msg->transfers)) { + /* Check if transfer asks to keep the CS status after the whole message */ + if (tr->cs_change) { + mcspi->use_multi_mode = false; + mcspi->last_msg_kept_cs = true; + } else { + mcspi->last_msg_kept_cs = false; + } + } else { + /* Check if transfer asks to change the CS status after the transfer */ + if (!tr->cs_change) + mcspi->use_multi_mode = false; + } } omap2_mcspi_set_mode(ctlr); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 903d76145272..06711a62fa3d 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -73,8 +73,9 @@ struct chip_data { #define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT) #define LPSS_PRIV_CLOCK_GATE 0x38 -#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3 -#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3 +#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3 +#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3 +#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF 0x0 struct lpss_config { /* LPSS offset from drv_data->ioaddr */ @@ -321,6 +322,20 @@ static void __lpss_ssp_write_priv(struct driver_data *drv_data, writel(value, drv_data->lpss_base + offset); } +static bool __lpss_ssp_update_priv(struct driver_data *drv_data, unsigned int offset, + u32 mask, u32 value) +{ + u32 new, curr; + + curr = __lpss_ssp_read_priv(drv_data, offset); + new = (curr & ~mask) | (value & mask); + if (new == curr) + return false; + + __lpss_ssp_write_priv(drv_data, offset, new); + return true; +} + /* * lpss_ssp_setup - perform LPSS SSP specific setup * @drv_data: pointer to the driver private data @@ -337,21 +352,16 @@ static void lpss_ssp_setup(struct driver_data *drv_data) drv_data->lpss_base = drv_data->ssp->mmio_base + config->offset; /* Enable software chip select control */ - value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl); - value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH); - value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH; - __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); + value = LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH; + __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, value, value); /* Enable multiblock DMA transfers */ if (drv_data->controller_info->enable_dma) { - __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1); + __lpss_ssp_update_priv(drv_data, config->reg_ssp, BIT(0), BIT(0)); if (config->reg_general >= 0) { - value = __lpss_ssp_read_priv(drv_data, - config->reg_general); - value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE; - __lpss_ssp_write_priv(drv_data, - config->reg_general, value); + value = LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE; + __lpss_ssp_update_priv(drv_data, config->reg_general, value, value); } } } @@ -361,30 +371,19 @@ static void lpss_ssp_select_cs(struct spi_device *spi, { struct driver_data *drv_data = spi_controller_get_devdata(spi->controller); - u32 value, cs; + u32 cs; - if (!config->cs_sel_mask) + cs = spi_get_chipselect(spi, 0) << config->cs_sel_shift; + if (!__lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, config->cs_sel_mask, cs)) return; - value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl); - - cs = spi_get_chipselect(spi, 0); - cs <<= config->cs_sel_shift; - if (cs != (value & config->cs_sel_mask)) { - /* - * When switching another chip select output active the - * output must be selected first and wait 2 ssp_clk cycles - * before changing state to active. Otherwise a short - * glitch will occur on the previous chip select since - * output select is latched but state control is not. - */ - value &= ~config->cs_sel_mask; - value |= cs; - __lpss_ssp_write_priv(drv_data, - config->reg_cs_ctrl, value); - ndelay(1000000000 / - (drv_data->controller->max_speed_hz / 2)); - } + /* + * When switching another chip select output active the output must be + * selected first and wait 2 ssp_clk cycles before changing state to + * active. Otherwise a short glitch will occur on the previous chip + * select since output select is latched but state control is not. + */ + ndelay(1000000000 / (drv_data->controller->max_speed_hz / 2)); } static void lpss_ssp_cs_control(struct spi_device *spi, bool enable) @@ -392,34 +391,27 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable) struct driver_data *drv_data = spi_controller_get_devdata(spi->controller); const struct lpss_config *config; - u32 value; + u32 mask; config = lpss_get_config(drv_data); if (enable) lpss_ssp_select_cs(spi, config); - value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl); - if (enable) - value &= ~LPSS_CS_CONTROL_CS_HIGH; - else - value |= LPSS_CS_CONTROL_CS_HIGH; - __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); + mask = LPSS_CS_CONTROL_CS_HIGH; + __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? 0 : mask); if (config->cs_clk_stays_gated) { - u32 clkgate; - /* * Changing CS alone when dynamic clock gating is on won't * actually flip CS at that time. This ruins SPI transfers * that specify delays, or have no data. Toggle the clock mode * to force on briefly to poke the CS pin to move. */ - clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE); - value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) | - LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON; - - __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value); - __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate); + mask = LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK; + if (__lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask, + LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON)) + __lpss_ssp_update_priv(drv_data, LPSS_PRIV_CLOCK_GATE, mask, + LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_OFF); } } diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c new file mode 100644 index 000000000000..80856d2fa35c --- /dev/null +++ b/drivers/spi/spi-qpic-snand.c @@ -0,0 +1,1681 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * + * Authors: + * Md Sadre Alam <quic_mdalam@quicinc.com> + * Sricharan R <quic_srichara@quicinc.com> + * Varadarajan Narayanan <quic_varada@quicinc.com> + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dma/qcom_adm.h> +#include <linux/dma/qcom_bam_dma.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mtd/nand-qpic-common.h> +#include <linux/mtd/spinand.h> +#include <linux/bitfield.h> + +#define NAND_FLASH_SPI_CFG 0xc0 +#define NAND_NUM_ADDR_CYCLES 0xc4 +#define NAND_BUSY_CHECK_WAIT_CNT 0xc8 +#define NAND_FLASH_FEATURES 0xf64 + +/* QSPI NAND config reg bits */ +#define LOAD_CLK_CNTR_INIT_EN BIT(28) +#define CLK_CNTR_INIT_VAL_VEC 0x924 +#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16) +#define FEA_STATUS_DEV_ADDR 0xc0 +#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8) +#define SPI_CFG BIT(0) +#define SPI_NUM_ADDR 0xDA4DB +#define SPI_WAIT_CNT 0x10 +#define QPIC_QSPI_NUM_CS 1 +#define SPI_TRANSFER_MODE_x1 BIT(29) +#define SPI_TRANSFER_MODE_x4 (3 << 29) +#define SPI_WP BIT(28) +#define SPI_HOLD BIT(27) +#define QPIC_SET_FEATURE BIT(31) + +#define SPINAND_RESET 0xff +#define SPINAND_READID 0x9f +#define SPINAND_GET_FEATURE 0x0f +#define SPINAND_SET_FEATURE 0x1f +#define SPINAND_READ 0x13 +#define SPINAND_ERASE 0xd8 +#define SPINAND_WRITE_EN 0x06 +#define SPINAND_PROGRAM_EXECUTE 0x10 +#define SPINAND_PROGRAM_LOAD 0x84 + +#define ACC_FEATURE 0xe +#define BAD_BLOCK_MARKER_SIZE 0x2 +#define OOB_BUF_SIZE 128 +#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng) + +struct qpic_snand_op { + u32 cmd_reg; + u32 addr1_reg; + u32 addr2_reg; +}; + +struct snandc_read_status { + __le32 snandc_flash; + __le32 snandc_buffer; + __le32 snandc_erased_cw; +}; + +/* + * ECC state struct + * @corrected: ECC corrected + * @bitflips: Max bit flip + * @failed: ECC failed + */ +struct qcom_ecc_stats { + u32 corrected; + u32 bitflips; + u32 failed; +}; + +struct qpic_ecc { + struct device *dev; + int ecc_bytes_hw; + int spare_bytes; + int bbm_size; + int ecc_mode; + int bytes; + int steps; + int step_size; + int strength; + int cw_size; + int cw_data; + u32 cfg0; + u32 cfg1; + u32 cfg0_raw; + u32 cfg1_raw; + u32 ecc_buf_cfg; + u32 ecc_bch_cfg; + u32 clrflashstatus; + u32 clrreadstatus; + bool bch_enabled; +}; + +struct qpic_spi_nand { + struct qcom_nand_controller *snandc; + struct spi_controller *ctlr; + struct mtd_info *mtd; + struct clk *iomacro_clk; + struct qpic_ecc *ecc; + struct qcom_ecc_stats ecc_stats; + struct nand_ecc_engine ecc_eng; + u8 *data_buf; + u8 *oob_buf; + u32 wlen; + __le32 addr1; + __le32 addr2; + __le32 cmd; + u32 num_cw; + bool oob_rw; + bool page_rw; + bool raw_rw; +}; + +static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc, + int reg, int cw_offset, int read_size, + int is_last_read_loc) +{ + __le32 locreg_val; + u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | + ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) + << READ_LOCATION_LAST)); + + locreg_val = cpu_to_le32(val); + + if (reg == NAND_READ_LOCATION_0) + snandc->regs->read_location0 = locreg_val; + else if (reg == NAND_READ_LOCATION_1) + snandc->regs->read_location1 = locreg_val; + else if (reg == NAND_READ_LOCATION_2) + snandc->regs->read_location2 = locreg_val; + else if (reg == NAND_READ_LOCATION_3) + snandc->regs->read_location3 = locreg_val; +} + +static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc, + int reg, int cw_offset, int read_size, + int is_last_read_loc) +{ + __le32 locreg_val; + u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | + ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) + << READ_LOCATION_LAST)); + + locreg_val = cpu_to_le32(val); + + if (reg == NAND_READ_LOCATION_LAST_CW_0) + snandc->regs->read_location_last0 = locreg_val; + else if (reg == NAND_READ_LOCATION_LAST_CW_1) + snandc->regs->read_location_last1 = locreg_val; + else if (reg == NAND_READ_LOCATION_LAST_CW_2) + snandc->regs->read_location_last2 = locreg_val; + else if (reg == NAND_READ_LOCATION_LAST_CW_3) + snandc->regs->read_location_last3 = locreg_val; +} + +static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand) +{ + struct nand_ecc_engine *eng = nand->ecc.engine; + struct qpic_spi_nand *qspi = ecceng_to_qspi(eng); + + return qspi->snandc; +} + +static int qcom_spi_init(struct qcom_nand_controller *snandc) +{ + u32 snand_cfg_val = 0x0; + int ret; + + snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) | + FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) | + FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) | + FIELD_PREP(SPI_CFG, 0); + + snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); + snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR); + snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT); + + qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); + + snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN; + snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); + + qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); + + qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1, + NAND_BAM_NEXT_SGL); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure in submitting spi init descriptor\n"); + return ret; + } + + return ret; +} + +static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct qpic_ecc *qecc = snandc->qspi->ecc; + + if (section > 1) + return -ERANGE; + + oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes; + oobregion->offset = mtd->oobsize - oobregion->length; + + return 0; +} + +static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct qpic_ecc *qecc = snandc->qspi->ecc; + + if (section) + return -ERANGE; + + oobregion->length = qecc->steps * 4; + oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size; + + return 0; +} + +static const struct mtd_ooblayout_ops qcom_spi_ooblayout = { + .ecc = qcom_spi_ooblayout_ecc, + .free = qcom_spi_ooblayout_free, +}; + +static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) +{ + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct nand_ecc_props *reqs = &nand->ecc.requirements; + struct nand_ecc_props *user = &nand->ecc.user_conf; + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int cwperpage, bad_block_byte, ret; + struct qpic_ecc *ecc_cfg; + + cwperpage = mtd->writesize / NANDC_STEP_SIZE; + snandc->qspi->num_cw = cwperpage; + + ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); + if (!ecc_cfg) + return -ENOMEM; + + if (user->step_size && user->strength) { + ecc_cfg->step_size = user->step_size; + ecc_cfg->strength = user->strength; + } else if (reqs->step_size && reqs->strength) { + ecc_cfg->step_size = reqs->step_size; + ecc_cfg->strength = reqs->strength; + } else { + /* use defaults */ + ecc_cfg->step_size = NANDC_STEP_SIZE; + ecc_cfg->strength = 4; + } + + if (ecc_cfg->step_size != NANDC_STEP_SIZE) { + dev_err(snandc->dev, + "only %u bytes ECC step size is supported\n", + NANDC_STEP_SIZE); + ret = -EOPNOTSUPP; + goto err_free_ecc_cfg; + } + + if (ecc_cfg->strength != 4) { + dev_err(snandc->dev, + "only 4 bits ECC strength is supported\n"); + ret = -EOPNOTSUPP; + goto err_free_ecc_cfg; + } + + snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize, + GFP_KERNEL); + if (!snandc->qspi->oob_buf) { + ret = -ENOMEM; + goto err_free_ecc_cfg; + } + + memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize); + + nand->ecc.ctx.priv = ecc_cfg; + snandc->qspi->mtd = mtd; + + ecc_cfg->ecc_bytes_hw = 7; + ecc_cfg->spare_bytes = 4; + ecc_cfg->bbm_size = 1; + ecc_cfg->bch_enabled = true; + ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size; + + ecc_cfg->steps = 4; + ecc_cfg->cw_data = 516; + ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes; + bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1; + + mtd_set_ooblayout(mtd, &qcom_spi_ooblayout); + + /* + * Free the temporary BAM transaction allocated initially by + * qcom_nandc_alloc(), and allocate a new one based on the + * updated max_cwperpage value. + */ + qcom_free_bam_transaction(snandc); + + snandc->max_cwperpage = cwperpage; + + snandc->bam_txn = qcom_alloc_bam_transaction(snandc); + if (!snandc->bam_txn) { + dev_err(snandc->dev, "failed to allocate BAM transaction\n"); + ret = -ENOMEM; + goto err_free_ecc_cfg; + } + + ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | + FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) | + FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) | + FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | + FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) | + FIELD_PREP(STATUS_BFR_READ, 0) | + FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) | + FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes); + + ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | + FIELD_PREP(CS_ACTIVE_BSY, 0) | + FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) | + FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) | + FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | + FIELD_PREP(WIDE_FLASH, 0) | + FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled); + + ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | + FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | + FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) | + FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0); + + ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | + FIELD_PREP(CS_ACTIVE_BSY, 0) | + FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | + FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | + FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | + FIELD_PREP(WIDE_FLASH, 0) | + FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1); + + ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) | + FIELD_PREP(ECC_SW_RESET, 0) | + FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) | + FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) | + FIELD_PREP(ECC_MODE_MASK, 0) | + FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw); + + ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS; + ecc_cfg->clrflashstatus = FS_READY_BSY_N; + ecc_cfg->clrreadstatus = 0xc0; + + conf->step_size = ecc_cfg->step_size; + conf->strength = ecc_cfg->strength; + + snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET); + snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET); + + dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n", + ecc_cfg->strength, ecc_cfg->step_size); + + return 0; + +err_free_ecc_cfg: + kfree(ecc_cfg); + return ret; +} + +static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand) +{ + struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); + + kfree(ecc_cfg); +} + +static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); + + snandc->qspi->ecc = ecc_cfg; + snandc->qspi->raw_rw = false; + snandc->qspi->oob_rw = false; + snandc->qspi->page_rw = false; + + if (req->datalen) + snandc->qspi->page_rw = true; + + if (req->ooblen) + snandc->qspi->oob_rw = true; + + if (req->mode == MTD_OPS_RAW) + snandc->qspi->raw_rw = true; + + return 0; +} + +static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + + if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ) + return 0; + + if (snandc->qspi->ecc_stats.failed) + mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed; + else + mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected; + + if (snandc->qspi->ecc_stats.failed) + return -EBADMSG; + else + return snandc->qspi->ecc_stats.bitflips; +} + +static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = { + .init_ctx = qcom_spi_ecc_init_ctx_pipelined, + .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined, + .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined, + .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined, +}; + +/* helper to configure location register values */ +static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg, + int cw_offset, int read_size, int is_last_read_loc) +{ + int reg_base = NAND_READ_LOCATION_0; + int num_cw = snandc->qspi->num_cw; + + if (cw == (num_cw - 1)) + reg_base = NAND_READ_LOCATION_LAST_CW_0; + + reg_base += reg * 4; + + if (cw == (num_cw - 1)) + return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset, + read_size, is_last_read_loc); + else + return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset, + read_size, is_last_read_loc); +} + +static void +qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw) +{ + __le32 *reg = &snandc->regs->read_location0; + int num_cw = snandc->qspi->num_cw; + + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL); + if (cw == (num_cw - 1)) { + reg = &snandc->regs->read_location_last0; + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, + NAND_BAM_NEXT_SGL); + } + + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0); + qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1, + NAND_BAM_NEXT_SGL); +} + +static int qcom_spi_block_erase(struct qcom_nand_controller *snandc) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + int ret; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE)); + snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw); + snandc->regs->exec = cpu_to_le32(1); + + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to erase block\n"); + return ret; + } + + return 0; +} + +static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc, + bool use_ecc, int cw) +{ + __le32 *reg = &snandc->regs->read_location0; + int num_cw = snandc->qspi->num_cw; + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + if (cw == (num_cw - 1)) { + reg = &snandc->regs->read_location_last0; + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL); + } + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0); +} + +static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + struct mtd_info *mtd = snandc->qspi->mtd; + int size, ret = 0; + int col, bbpos; + u32 cfg0, cfg1, ecc_bch_cfg; + u32 num_cw = snandc->qspi->num_cw; + + qcom_clear_bam_transaction(snandc); + qcom_clear_read_regs(snandc); + + size = ecc_cfg->cw_size; + col = ecc_cfg->cw_size * (num_cw - 1); + + memset(snandc->data_buffer, 0xff, size); + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); + snandc->regs->addr1 = snandc->qspi->addr2; + + cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | + 0 << CW_PER_PAGE; + cfg1 = ecc_cfg->cfg1_raw; + ecc_bch_cfg = ECC_CFG_ECC_DISABLE; + + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1); + + qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1); + + qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failed to read last cw\n"); + return ret; + } + + qcom_nandc_dev_to_mem(snandc, true); + u32 flash = le32_to_cpu(snandc->reg_read_buf[0]); + + if (flash & (FS_OP_ERR | FS_MPU_ERR)) + return -EIO; + + bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); + + if (snandc->data_buffer[bbpos] == 0xff) + snandc->data_buffer[bbpos + 1] = 0xff; + if (snandc->data_buffer[bbpos] != 0xff) + snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos]; + + memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes); + + return ret; +} + +static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf) +{ + struct snandc_read_status *buf; + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + int i, num_cw = snandc->qspi->num_cw; + bool flash_op_err = false, erased; + unsigned int max_bitflips = 0; + unsigned int uncorrectable_cws = 0; + + snandc->qspi->ecc_stats.failed = 0; + snandc->qspi->ecc_stats.corrected = 0; + + qcom_nandc_dev_to_mem(snandc, true); + buf = (struct snandc_read_status *)snandc->reg_read_buf; + + for (i = 0; i < num_cw; i++, buf++) { + u32 flash, buffer, erased_cw; + int data_len, oob_len; + + if (i == (num_cw - 1)) { + data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2); + oob_len = num_cw << 2; + } else { + data_len = ecc_cfg->cw_data; + oob_len = 0; + } + + flash = le32_to_cpu(buf->snandc_flash); + buffer = le32_to_cpu(buf->snandc_buffer); + erased_cw = le32_to_cpu(buf->snandc_erased_cw); + + if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) { + if (ecc_cfg->bch_enabled) + erased = (erased_cw & ERASED_CW) == ERASED_CW; + else + erased = false; + + if (!erased) + uncorrectable_cws |= BIT(i); + + } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) { + flash_op_err = true; + } else { + unsigned int stat; + + stat = buffer & BS_CORRECTABLE_ERR_MSK; + snandc->qspi->ecc_stats.corrected += stat; + max_bitflips = max(max_bitflips, stat); + } + + if (data_buf) + data_buf += data_len; + if (oob_buf) + oob_buf += oob_len + ecc_cfg->bytes; + } + + if (flash_op_err) + return -EIO; + + if (!uncorrectable_cws) + snandc->qspi->ecc_stats.bitflips = max_bitflips; + else + snandc->qspi->ecc_stats.failed++; + + return 0; +} + +static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt) +{ + int i; + + qcom_nandc_dev_to_mem(snandc, true); + + for (i = 0; i < cw_cnt; i++) { + u32 flash = le32_to_cpu(snandc->reg_read_buf[i]); + + if (flash & (FS_OP_ERR | FS_MPU_ERR)) + return -EIO; + } + + return 0; +} + +static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf, + u8 *oob_buf, int cw) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + struct mtd_info *mtd = snandc->qspi->mtd; + int data_size1, data_size2, oob_size1, oob_size2; + int ret, reg_off = FLASH_BUF_ACC, read_loc = 0; + int raw_cw = cw; + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; + int col; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + raw_cw = num_cw - 1; + + cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | + 0 << CW_PER_PAGE; + cfg1 = ecc_cfg->cfg1_raw; + ecc_bch_cfg = ECC_CFG_ECC_DISABLE; + + col = ecc_cfg->cw_size * cw; + + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1); + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0); + + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); + oob_size1 = ecc_cfg->bbm_size; + + if (cw == (num_cw - 1)) { + data_size2 = NANDC_STEP_SIZE - data_size1 - + ((num_cw - 1) * 4); + oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size2 = ecc_cfg->cw_data - data_size1; + oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0); + read_loc += data_size1; + + qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0); + read_loc += oob_size1; + + qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0); + read_loc += data_size2; + + qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1); + + qcom_spi_config_cw_read(snandc, false, raw_cw); + + qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0); + reg_off += data_size1; + + qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0); + reg_off += oob_size1; + + qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0); + reg_off += data_size2; + + qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to read raw cw %d\n", cw); + return ret; + } + + return qcom_spi_check_raw_flash_errors(snandc, 1); +} + +static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *data_buf = NULL, *oob_buf = NULL; + int ret, cw; + u32 num_cw = snandc->qspi->num_cw; + + if (snandc->qspi->page_rw) + data_buf = op->data.buf.in; + + oob_buf = snandc->qspi->oob_buf; + memset(oob_buf, 0xff, OOB_BUF_SIZE); + + for (cw = 0; cw < num_cw; cw++) { + ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw); + if (ret) + return ret; + + if (data_buf) + data_buf += ecc_cfg->cw_data; + if (oob_buf) + oob_buf += ecc_cfg->bytes; + } + + return 0; +} + +static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; + int ret, i; + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; + + data_buf = op->data.buf.in; + data_buf_start = data_buf; + + oob_buf = snandc->qspi->oob_buf; + oob_buf_start = oob_buf; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); + + qcom_clear_bam_transaction(snandc); + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + for (i = 0; i < num_cw; i++) { + int data_size, oob_size; + + if (i == (num_cw - 1)) { + data_size = 512 - ((num_cw - 1) << 2); + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size = ecc_cfg->cw_data; + oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + if (data_buf && oob_buf) { + qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0); + qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1); + } else if (data_buf) { + qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1); + } else { + qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); + } + + qcom_spi_config_cw_read(snandc, true, i); + + if (data_buf) + qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf, + data_size, 0); + if (oob_buf) { + int j; + + for (j = 0; j < ecc_cfg->bbm_size; j++) + *oob_buf++ = 0xff; + + qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size, 0); + } + + if (data_buf) + data_buf += data_size; + if (oob_buf) + oob_buf += oob_size; + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to read page\n"); + return ret; + } + + return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); +} + +static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; + int ret, i; + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; + + oob_buf = op->data.buf.in; + oob_buf_start = oob_buf; + + data_buf_start = data_buf; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + for (i = 0; i < num_cw; i++) { + int data_size, oob_size; + + if (i == (num_cw - 1)) { + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size = ecc_cfg->cw_data; + oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); + + qcom_spi_config_cw_read(snandc, true, i); + + if (oob_buf) { + int j; + + for (j = 0; j < ecc_cfg->bbm_size; j++) + *oob_buf++ = 0xff; + + qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size, 0); + } + + if (oob_buf) + oob_buf += oob_size; + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to read oob\n"); + return ret; + } + + return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); +} + +static int qcom_spi_read_page(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + if (snandc->qspi->page_rw && snandc->qspi->raw_rw) + return qcom_spi_read_page_raw(snandc, op); + + if (snandc->qspi->page_rw) + return qcom_spi_read_page_ecc(snandc, op); + + if (snandc->qspi->oob_rw && snandc->qspi->raw_rw) + return qcom_spi_read_last_cw(snandc, op); + + if (snandc->qspi->oob_rw) + return qcom_spi_read_page_oob(snandc, op); + + return 0; +} + +static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc) +{ + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, + 1, NAND_BAM_NEXT_SGL); +} + +static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc) +{ + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1, + NAND_BAM_NEXT_SGL); +} + +static int qcom_spi_program_raw(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + struct mtd_info *mtd = snandc->qspi->mtd; + u8 *data_buf = NULL, *oob_buf = NULL; + int i, ret; + int num_cw = snandc->qspi->num_cw; + u32 cfg0, cfg1, ecc_bch_cfg; + + cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + cfg1 = ecc_cfg->cfg1_raw; + ecc_bch_cfg = ECC_CFG_ECC_DISABLE; + + data_buf = snandc->qspi->data_buf; + + oob_buf = snandc->qspi->oob_buf; + memset(oob_buf, 0xff, OOB_BUF_SIZE); + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_config_page_write(snandc); + + for (i = 0; i < num_cw; i++) { + int data_size1, data_size2, oob_size1, oob_size2; + int reg_off = FLASH_BUF_ACC; + + data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); + oob_size1 = ecc_cfg->bbm_size; + + if (i == (num_cw - 1)) { + data_size2 = NANDC_STEP_SIZE - data_size1 - + ((num_cw - 1) << 2); + oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size2 = ecc_cfg->cw_data - data_size1; + oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + qcom_write_data_dma(snandc, reg_off, data_buf, data_size1, + NAND_BAM_NO_EOT); + reg_off += data_size1; + data_buf += data_size1; + + qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1, + NAND_BAM_NO_EOT); + oob_buf += oob_size1; + reg_off += oob_size1; + + qcom_write_data_dma(snandc, reg_off, data_buf, data_size2, + NAND_BAM_NO_EOT); + reg_off += data_size2; + data_buf += data_size2; + + qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0); + oob_buf += oob_size2; + + qcom_spi_config_cw_write(snandc); + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to write raw page\n"); + return ret; + } + + return 0; +} + +static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *data_buf = NULL, *oob_buf = NULL; + int i, ret; + int num_cw = snandc->qspi->num_cw; + u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; + + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; + + if (snandc->qspi->data_buf) + data_buf = snandc->qspi->data_buf; + + oob_buf = snandc->qspi->oob_buf; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_config_page_write(snandc); + + for (i = 0; i < num_cw; i++) { + int data_size, oob_size; + + if (i == (num_cw - 1)) { + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size = ecc_cfg->cw_data; + oob_size = ecc_cfg->bytes; + } + + if (data_buf) + qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size, + i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0); + + if (i == (num_cw - 1)) { + if (oob_buf) { + oob_buf += ecc_cfg->bbm_size; + qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size, 0); + } + } + + qcom_spi_config_cw_write(snandc); + + if (data_buf) + data_buf += data_size; + if (oob_buf) + oob_buf += oob_size; + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to write page\n"); + return ret; + } + + return 0; +} + +static int qcom_spi_program_oob(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *oob_buf = NULL; + int ret, col, data_size, oob_size; + int num_cw = snandc->qspi->num_cw; + u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; + + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | + (num_cw - 1) << CW_PER_PAGE; + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; + + col = ecc_cfg->cw_size * (num_cw - 1); + + oob_buf = snandc->qspi->data_buf; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); + snandc->regs->exec = cpu_to_le32(1); + + /* calculate the data and oob size for the last codeword/step */ + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); + oob_size = snandc->qspi->mtd->oobavail; + + memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data); + /* override new oob content to last codeword */ + mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size, + oob_buf, 0, snandc->qspi->mtd->oobavail); + qcom_spi_config_page_write(snandc); + qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0); + qcom_spi_config_cw_write(snandc); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to write oob\n"); + return ret; + } + + return 0; +} + +static int qcom_spi_program_execute(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + if (snandc->qspi->page_rw && snandc->qspi->raw_rw) + return qcom_spi_program_raw(snandc, op); + + if (snandc->qspi->page_rw) + return qcom_spi_program_ecc(snandc, op); + + if (snandc->qspi->oob_rw) + return qcom_spi_program_oob(snandc, op); + + return 0; +} + +static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd) +{ + switch (opcode) { + case SPINAND_RESET: + *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE); + break; + case SPINAND_READID: + *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID); + break; + case SPINAND_GET_FEATURE: + *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE); + break; + case SPINAND_SET_FEATURE: + *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE | + QPIC_SET_FEATURE); + break; + case SPINAND_READ: + if (snandc->qspi->raw_rw) { + *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | + SPI_WP | SPI_HOLD | OP_PAGE_READ); + } else { + *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | + SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC); + } + + break; + case SPINAND_ERASE: + *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP | + SPI_HOLD | SPI_TRANSFER_MODE_x1; + break; + case SPINAND_WRITE_EN: + *cmd = SPINAND_WRITE_EN; + break; + case SPINAND_PROGRAM_EXECUTE: + *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | + SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE); + break; + case SPINAND_PROGRAM_LOAD: + *cmd = SPINAND_PROGRAM_LOAD; + break; + default: + dev_err(snandc->dev, "Opcode not supported: %u\n", opcode); + return -EOPNOTSUPP; + } + + return 0; +} + +static int qcom_spi_write_page(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + int ret; + u32 cmd; + + ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd); + if (ret < 0) + return ret; + + if (op->cmd.opcode == SPINAND_PROGRAM_LOAD) + snandc->qspi->data_buf = (u8 *)op->data.buf.out; + + return 0; +} + +static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_snand_op s_op = {}; + u32 cmd; + int ret, opcode; + + ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd); + if (ret < 0) + return ret; + + s_op.cmd_reg = cmd; + s_op.addr1_reg = op->addr.val; + s_op.addr2_reg = 0; + + opcode = op->cmd.opcode; + + switch (opcode) { + case SPINAND_WRITE_EN: + return 0; + case SPINAND_PROGRAM_EXECUTE: + s_op.addr1_reg = op->addr.val << 16; + s_op.addr2_reg = op->addr.val >> 16 & 0xff; + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->cmd = cpu_to_le32(cmd); + return qcom_spi_program_execute(snandc, op); + case SPINAND_READ: + s_op.addr1_reg = (op->addr.val << 16); + s_op.addr2_reg = op->addr.val >> 16 & 0xff; + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->cmd = cpu_to_le32(cmd); + return 0; + case SPINAND_ERASE: + s_op.addr2_reg = (op->addr.val >> 16) & 0xffff; + s_op.addr1_reg = op->addr.val; + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->cmd = cpu_to_le32(cmd); + return qcom_spi_block_erase(snandc); + default: + break; + } + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg); + snandc->regs->exec = cpu_to_le32(1); + snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg); + snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg); + + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + ret = qcom_submit_descs(snandc); + if (ret) + dev_err(snandc->dev, "failure in submitting cmd descriptor\n"); + + return ret; +} + +static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) +{ + int ret, val, opcode; + bool copy = false, copy_ftr = false; + + ret = qcom_spi_send_cmdaddr(snandc, op); + if (ret) + return ret; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + opcode = op->cmd.opcode; + + switch (opcode) { + case SPINAND_READID: + snandc->buf_count = 4; + qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); + copy = true; + break; + case SPINAND_GET_FEATURE: + snandc->buf_count = 4; + qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); + copy_ftr = true; + break; + case SPINAND_SET_FEATURE: + snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out); + qcom_write_reg_dma(snandc, &snandc->regs->flash_feature, + NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); + break; + case SPINAND_PROGRAM_EXECUTE: + case SPINAND_WRITE_EN: + case SPINAND_RESET: + case SPINAND_ERASE: + case SPINAND_READ: + return 0; + default: + return -EOPNOTSUPP; + } + + ret = qcom_submit_descs(snandc); + if (ret) + dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode); + + if (copy) { + qcom_nandc_dev_to_mem(snandc, true); + memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count); + } + + if (copy_ftr) { + qcom_nandc_dev_to_mem(snandc, true); + val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf); + val >>= 8; + memcpy(op->data.buf.in, &val, snandc->buf_count); + } + + return ret; +} + +static bool qcom_spi_is_page_op(const struct spi_mem_op *op) +{ + if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4) + return false; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (op->addr.buswidth == 4 && op->data.buswidth == 4) + return true; + + if (op->addr.nbytes == 2 && op->addr.buswidth == 1) + return true; + + } else if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.buswidth == 4) + return true; + if (op->addr.nbytes == 2 && op->addr.buswidth == 1) + return true; + } + + return false; +} + +static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + if (!spi_mem_default_supports_op(mem, op)) + return false; + + if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) + return false; + + if (qcom_spi_is_page_op(op)) + return true; + + return ((!op->addr.nbytes || op->addr.buswidth == 1) && + (!op->dummy.nbytes || op->dummy.buswidth == 1) && + (!op->data.nbytes || op->data.buswidth == 1)); +} + +static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller); + + dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, + op->addr.val, op->addr.buswidth, op->addr.nbytes, + op->data.buswidth, op->data.nbytes); + + if (qcom_spi_is_page_op(op)) { + if (op->data.dir == SPI_MEM_DATA_IN) + return qcom_spi_read_page(snandc, op); + if (op->data.dir == SPI_MEM_DATA_OUT) + return qcom_spi_write_page(snandc, op); + } else { + return qcom_spi_io_op(snandc, op); + } + + return 0; +} + +static const struct spi_controller_mem_ops qcom_spi_mem_ops = { + .supports_op = qcom_spi_supports_op, + .exec_op = qcom_spi_exec_op, +}; + +static const struct spi_controller_mem_caps qcom_spi_mem_caps = { + .ecc = true, +}; + +static int qcom_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *ctlr; + struct qcom_nand_controller *snandc; + struct qpic_spi_nand *qspi; + struct qpic_ecc *ecc; + struct resource *res; + const void *dev_data; + int ret; + + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); + if (!ecc) + return -ENOMEM; + + qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); + if (!qspi) + return -ENOMEM; + + ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false); + if (!ctlr) + return -ENOMEM; + + platform_set_drvdata(pdev, ctlr); + + snandc = spi_controller_get_devdata(ctlr); + qspi->snandc = snandc; + + snandc->dev = dev; + snandc->qspi = qspi; + snandc->qspi->ctlr = ctlr; + snandc->qspi->ecc = ecc; + + dev_data = of_device_get_match_data(dev); + if (!dev_data) { + dev_err(&pdev->dev, "failed to get device data\n"); + return -ENODEV; + } + + snandc->props = dev_data; + snandc->dev = &pdev->dev; + + snandc->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(snandc->core_clk)) + return PTR_ERR(snandc->core_clk); + + snandc->aon_clk = devm_clk_get(dev, "aon"); + if (IS_ERR(snandc->aon_clk)) + return PTR_ERR(snandc->aon_clk); + + snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom"); + if (IS_ERR(snandc->qspi->iomacro_clk)) + return PTR_ERR(snandc->qspi->iomacro_clk); + + snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(snandc->base)) + return PTR_ERR(snandc->base); + + snandc->base_phys = res->start; + snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res), + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dev, snandc->base_dma)) + return -ENXIO; + + ret = clk_prepare_enable(snandc->core_clk); + if (ret) + goto err_dis_core_clk; + + ret = clk_prepare_enable(snandc->aon_clk); + if (ret) + goto err_dis_aon_clk; + + ret = clk_prepare_enable(snandc->qspi->iomacro_clk); + if (ret) + goto err_dis_iom_clk; + + ret = qcom_nandc_alloc(snandc); + if (ret) + goto err_snand_alloc; + + ret = qcom_spi_init(snandc); + if (ret) + goto err_spi_init; + + /* setup ECC engine */ + snandc->qspi->ecc_eng.dev = &pdev->dev; + snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; + snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined; + snandc->qspi->ecc_eng.priv = snandc; + + ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng); + if (ret) { + dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret); + goto err_spi_init; + } + + ctlr->num_chipselect = QPIC_QSPI_NUM_CS; + ctlr->mem_ops = &qcom_spi_mem_ops; + ctlr->mem_caps = &qcom_spi_mem_caps; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL | + SPI_TX_QUAD | SPI_RX_QUAD; + + ret = spi_register_controller(ctlr); + if (ret) { + dev_err(&pdev->dev, "spi_register_controller failed.\n"); + goto err_spi_init; + } + + return 0; + +err_spi_init: + qcom_nandc_unalloc(snandc); +err_snand_alloc: + clk_disable_unprepare(snandc->qspi->iomacro_clk); +err_dis_iom_clk: + clk_disable_unprepare(snandc->aon_clk); +err_dis_aon_clk: + clk_disable_unprepare(snandc->core_clk); +err_dis_core_clk: + dma_unmap_resource(dev, res->start, resource_size(res), + DMA_BIDIRECTIONAL, 0); + return ret; +} + +static void qcom_spi_remove(struct platform_device *pdev) +{ + struct spi_controller *ctlr = platform_get_drvdata(pdev); + struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + spi_unregister_controller(ctlr); + + qcom_nandc_unalloc(snandc); + + clk_disable_unprepare(snandc->aon_clk); + clk_disable_unprepare(snandc->core_clk); + clk_disable_unprepare(snandc->qspi->iomacro_clk); + + dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res), + DMA_BIDIRECTIONAL, 0); +} + +static const struct qcom_nandc_props ipq9574_snandc_props = { + .dev_cmd_reg_start = 0x7000, + .bam_offset = 0x30000, + .supports_bam = true, +}; + +static const struct of_device_id qcom_snandc_of_match[] = { + { + .compatible = "qcom,ipq9574-snand", + .data = &ipq9574_snandc_props, + }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_snandc_of_match); + +static struct platform_driver qcom_spi_driver = { + .driver = { + .name = "qcom_snand", + .of_match_table = qcom_snandc_of_match, + }, + .probe = qcom_spi_probe, + .remove = qcom_spi_remove, +}; +module_platform_driver(qcom_spi_driver); + +MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores"); +MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/spi/spi-realtek-rtl-snand.c b/drivers/spi/spi-realtek-rtl-snand.c index cd0484041147..741cf2af3e91 100644 --- a/drivers/spi/spi-realtek-rtl-snand.c +++ b/drivers/spi/spi-realtek-rtl-snand.c @@ -364,7 +364,6 @@ static int rtl_snand_probe(struct platform_device *pdev) .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .cache_type = REGCACHE_NONE, }; int irq, ret; diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c index 70bbb459caa4..f3fe10eddb6a 100644 --- a/drivers/spi/spi-rockchip-sfc.c +++ b/drivers/spi/spi-rockchip-sfc.c @@ -13,12 +13,14 @@ #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/iopoll.h> +#include <linux/interrupt.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> -#include <linux/interrupt.h> #include <linux/spi/spi-mem.h> /* System control */ @@ -110,6 +112,7 @@ #define SFC_VER_3 0x3 #define SFC_VER_4 0x4 #define SFC_VER_5 0x5 +#define SFC_VER_8 0x8 /* Delay line controller register */ #define SFC_DLL_CTRL0 0x3C @@ -150,16 +153,13 @@ /* Data */ #define SFC_DATA 0x108 -/* The controller and documentation reports that it supports up to 4 CS - * devices (0-3), however I have only been able to test a single CS (CS 0) - * due to the configuration of my device. - */ -#define SFC_MAX_CHIPSELECT_NUM 4 +#define SFC_CS1_REG_OFFSET 0x200 + +#define SFC_MAX_CHIPSELECT_NUM 2 -/* The SFC can transfer max 16KB - 1 at one time - * we set it to 15.5KB here for alignment. - */ #define SFC_MAX_IOSIZE_VER3 (512 * 31) +/* Although up to 4GB, 64KB is enough with less mem reserved */ +#define SFC_MAX_IOSIZE_VER4 (0x10000U) /* DMA is only enabled for large data transmission */ #define SFC_DMA_TRANS_THRETHOLD (0x40) @@ -169,12 +169,14 @@ */ #define SFC_MAX_SPEED (150 * 1000 * 1000) +#define ROCKCHIP_AUTOSUSPEND_DELAY 2000 + struct rockchip_sfc { struct device *dev; void __iomem *regbase; struct clk *hclk; struct clk *clk; - u32 frequency; + u32 speed[SFC_MAX_CHIPSELECT_NUM]; /* virtual mapped addr for dma_buffer */ void *buffer; dma_addr_t dma_buffer; @@ -216,6 +218,22 @@ static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc) return SFC_MAX_IOSIZE_VER3; } +static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long speed) +{ + if (sfc->version >= SFC_VER_8) + return clk_set_rate(sfc->clk, speed * 2); + else + return clk_set_rate(sfc->clk, speed); +} + +static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc) +{ + if (sfc->version >= SFC_VER_8) + return clk_get_rate(sfc->clk) / 2; + else + return clk_get_rate(sfc->clk); +} + static void rockchip_sfc_irq_unmask(struct rockchip_sfc *sfc, u32 mask) { u32 reg; @@ -302,6 +320,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, u32 len) { u32 ctrl = 0, cmd = 0; + u8 cs = spi_get_chipselect(mem->spi, 0); /* set CMD */ cmd = op->cmd.opcode; @@ -315,7 +334,8 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT; } else { cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT; - writel(op->addr.nbytes * 8 - 1, sfc->regbase + SFC_ABIT); + writel(op->addr.nbytes * 8 - 1, + sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_ABIT); } ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT); @@ -347,7 +367,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, /* set the Controller */ ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE; - cmd |= spi_get_chipselect(mem->spi, 0) << SFC_CMD_CS_SHIFT; + cmd |= cs << SFC_CMD_CS_SHIFT; dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n", op->addr.nbytes, op->addr.buswidth, @@ -355,7 +375,7 @@ static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc, dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n", ctrl, cmd, op->addr.val, len); - writel(ctrl, sfc->regbase + SFC_CTRL); + writel(ctrl, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_CTRL); writel(cmd, sfc->regbase + SFC_CMD); if (op->addr.nbytes) writel(op->addr.val, sfc->regbase + SFC_ADDR); @@ -453,8 +473,10 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc, dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len); - if (op->data.dir == SPI_MEM_DATA_OUT) + if (op->data.dir == SPI_MEM_DATA_OUT) { memcpy(sfc->buffer, op->data.buf.out, len); + dma_sync_single_for_device(sfc->dev, sfc->dma_buffer, len, DMA_TO_DEVICE); + } ret = rockchip_sfc_fifo_transfer_dma(sfc, sfc->dma_buffer, len); if (!wait_for_completion_timeout(&sfc->cp, msecs_to_jiffies(2000))) { @@ -462,8 +484,11 @@ static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc, ret = -ETIMEDOUT; } rockchip_sfc_irq_mask(sfc, SFC_IMR_DMA); - if (op->data.dir == SPI_MEM_DATA_IN) + + if (op->data.dir == SPI_MEM_DATA_IN) { + dma_sync_single_for_cpu(sfc->dev, sfc->dma_buffer, len, DMA_FROM_DEVICE); memcpy(op->data.buf.in, sfc->buffer, len); + } return ret; } @@ -473,6 +498,16 @@ static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us) int ret = 0; u32 status; + /* + * There is very little data left in fifo, and the controller will + * complete the transmission in a short period of time. + */ + ret = readl_poll_timeout(sfc->regbase + SFC_SR, status, + !(status & SFC_SR_IS_BUSY), + 0, 10); + if (!ret) + return 0; + ret = readl_poll_timeout(sfc->regbase + SFC_SR, status, !(status & SFC_SR_IS_BUSY), 20, timeout_us); @@ -491,14 +526,22 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op struct rockchip_sfc *sfc = spi_controller_get_devdata(mem->spi->controller); u32 len = op->data.nbytes; int ret; + u8 cs = spi_get_chipselect(mem->spi, 0); - if (unlikely(mem->spi->max_speed_hz != sfc->frequency)) { - ret = clk_set_rate(sfc->clk, mem->spi->max_speed_hz); + ret = pm_runtime_get_sync(sfc->dev); + if (ret < 0) { + pm_runtime_put_noidle(sfc->dev); + return ret; + } + + if (unlikely(op->max_freq != sfc->speed[cs]) && + !has_acpi_companion(sfc->dev)) { + ret = rockchip_sfc_clk_set_rate(sfc, op->max_freq); if (ret) - return ret; - sfc->frequency = mem->spi->max_speed_hz; + goto out; + sfc->speed[cs] = op->max_freq; dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%ldHz\n", - sfc->frequency, clk_get_rate(sfc->clk)); + sfc->speed[cs], rockchip_sfc_clk_get_rate(sfc)); } rockchip_sfc_adjust_op_work((struct spi_mem_op *)op); @@ -515,11 +558,17 @@ static int rockchip_sfc_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op if (ret != len) { dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir); - return -EIO; + ret = -EIO; + goto out; } } - return rockchip_sfc_xfer_done(sfc, 100000); + ret = rockchip_sfc_xfer_done(sfc, 100000); +out: + pm_runtime_mark_last_busy(sfc->dev); + pm_runtime_put_autosuspend(sfc->dev); + + return ret; } static int rockchip_sfc_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) @@ -536,6 +585,10 @@ static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = { .adjust_op_size = rockchip_sfc_adjust_op_size, }; +static const struct spi_controller_mem_caps rockchip_sfc_mem_caps = { + .per_op_freq = true, +}; + static irqreturn_t rockchip_sfc_irq_handler(int irq, void *dev_id) { struct rockchip_sfc *sfc = dev_id; @@ -561,6 +614,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev) struct spi_controller *host; struct rockchip_sfc *sfc; int ret; + u32 i, val; host = devm_spi_alloc_host(&pdev->dev, sizeof(*sfc)); if (!host) @@ -568,6 +622,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev) host->flags = SPI_CONTROLLER_HALF_DUPLEX; host->mem_ops = &rockchip_sfc_mem_ops; + host->mem_caps = &rockchip_sfc_mem_caps; host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | SPI_RX_DUAL; host->max_speed_hz = SFC_MAX_SPEED; @@ -581,31 +636,29 @@ static int rockchip_sfc_probe(struct platform_device *pdev) if (IS_ERR(sfc->regbase)) return PTR_ERR(sfc->regbase); - sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc"); + if (!has_acpi_companion(&pdev->dev)) + sfc->clk = devm_clk_get(&pdev->dev, "clk_sfc"); if (IS_ERR(sfc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(sfc->clk), "Failed to get sfc interface clk\n"); - sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc"); + if (!has_acpi_companion(&pdev->dev)) + sfc->hclk = devm_clk_get(&pdev->dev, "hclk_sfc"); if (IS_ERR(sfc->hclk)) return dev_err_probe(&pdev->dev, PTR_ERR(sfc->hclk), "Failed to get sfc ahb clk\n"); - sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma"); - - if (sfc->use_dma) { - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); - if (ret) { - dev_warn(dev, "Unable to set dma mask\n"); - return ret; - } - - sfc->buffer = dmam_alloc_coherent(dev, SFC_MAX_IOSIZE_VER3, - &sfc->dma_buffer, GFP_KERNEL); - if (!sfc->buffer) - return -ENOMEM; + if (has_acpi_companion(&pdev->dev)) { + ret = device_property_read_u32(&pdev->dev, "clock-frequency", &val); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "Failed to find clock-frequency in ACPI\n"); + for (i = 0; i < SFC_MAX_CHIPSELECT_NUM; i++) + sfc->speed[i] = val; } + sfc->use_dma = !of_property_read_bool(sfc->dev->of_node, "rockchip,sfc-no-dma"); + ret = clk_prepare_enable(sfc->hclk); if (ret) { dev_err(&pdev->dev, "Failed to enable ahb clk\n"); @@ -630,19 +683,47 @@ static int rockchip_sfc_probe(struct platform_device *pdev) goto err_irq; } + platform_set_drvdata(pdev, sfc); + ret = rockchip_sfc_init(sfc); if (ret) goto err_irq; - sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc); sfc->version = rockchip_sfc_get_version(sfc); + sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc); + + pm_runtime_set_autosuspend_delay(dev, ROCKCHIP_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_get_noresume(dev); + + if (sfc->use_dma) { + sfc->buffer = (u8 *)__get_free_pages(GFP_KERNEL | GFP_DMA32, + get_order(sfc->max_iosize)); + if (!sfc->buffer) { + ret = -ENOMEM; + goto err_dma; + } + sfc->dma_buffer = virt_to_phys(sfc->buffer); + } - ret = spi_register_controller(host); + ret = devm_spi_register_controller(dev, host); if (ret) - goto err_irq; + goto err_register; - return 0; + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + return 0; +err_register: + free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); +err_dma: + pm_runtime_get_sync(dev); + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + pm_runtime_dont_use_autosuspend(dev); err_irq: clk_disable_unprepare(sfc->clk); err_clk: @@ -657,11 +738,80 @@ static void rockchip_sfc_remove(struct platform_device *pdev) struct spi_controller *host = sfc->host; spi_unregister_controller(host); + free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); clk_disable_unprepare(sfc->clk); clk_disable_unprepare(sfc->hclk); } +#ifdef CONFIG_PM +static int rockchip_sfc_runtime_suspend(struct device *dev) +{ + struct rockchip_sfc *sfc = dev_get_drvdata(dev); + + clk_disable_unprepare(sfc->clk); + clk_disable_unprepare(sfc->hclk); + + return 0; +} + +static int rockchip_sfc_runtime_resume(struct device *dev) +{ + struct rockchip_sfc *sfc = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(sfc->hclk); + if (ret < 0) + return ret; + + ret = clk_prepare_enable(sfc->clk); + if (ret < 0) + clk_disable_unprepare(sfc->hclk); + + return ret; +} +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +static int rockchip_sfc_suspend(struct device *dev) +{ + pinctrl_pm_select_sleep_state(dev); + + return pm_runtime_force_suspend(dev); +} + +static int rockchip_sfc_resume(struct device *dev) +{ + struct rockchip_sfc *sfc = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; + + pinctrl_pm_select_default_state(dev); + + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + rockchip_sfc_init(sfc); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops rockchip_sfc_pm_ops = { + SET_RUNTIME_PM_OPS(rockchip_sfc_runtime_suspend, + rockchip_sfc_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(rockchip_sfc_suspend, rockchip_sfc_resume) +}; + static const struct of_device_id rockchip_sfc_dt_ids[] = { { .compatible = "rockchip,sfc"}, { /* sentinel */ } @@ -672,6 +822,7 @@ static struct platform_driver rockchip_sfc_driver = { .driver = { .name = "rockchip-sfc", .of_match_table = rockchip_sfc_dt_ids, + .pm = &rockchip_sfc_pm_ops, }, .probe = rockchip_sfc_probe, .remove = rockchip_sfc_remove, diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 1bc012fce7cb..1a6381de6f33 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -547,7 +547,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs, cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET; if (spi->mode & SPI_LSB_FIRST) cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET; - if (spi->mode & SPI_CS_HIGH) + if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0))) cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET; if (xfer->rx_buf && xfer->tx_buf) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 389275dbc003..9c47f5741c5f 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -139,7 +139,9 @@ struct s3c64xx_spi_dma_data { * struct s3c64xx_spi_port_config - SPI Controller hardware info * @fifo_lvl_mask: [DEPRECATED] use @{rx, tx}_fifomask instead. * @rx_lvl_offset: [DEPRECATED] use @{rx,tx}_fifomask instead. - * @fifo_depth: depth of the FIFO. + * @fifo_depth: depth of the FIFOs. Used by compatibles where all the instances + * of the IP define the same FIFO depth. It has higher precedence + * than the FIFO depth specified via DT. * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's * length and position. * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c index eecf9ea95ae3..1627aa66c965 100644 --- a/drivers/spi/spi-sc18is602.c +++ b/drivers/spi/spi-sc18is602.c @@ -7,13 +7,15 @@ #include <linux/kernel.h> #include <linux/err.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/pm_runtime.h> -#include <linux/of.h> #include <linux/platform_data/sc18is602.h> +#include <linux/property.h> + #include <linux/gpio/consumer.h> enum chips { sc18is602, sc18is602b, sc18is603 }; @@ -236,9 +238,7 @@ static int sc18is602_setup(struct spi_device *spi) static int sc18is602_probe(struct i2c_client *client) { - const struct i2c_device_id *id = i2c_client_get_device_id(client); struct device *dev = &client->dev; - struct device_node *np = dev->of_node; struct sc18is602_platform_data *pdata = dev_get_platdata(dev); struct sc18is602 *hw; struct spi_controller *host; @@ -251,8 +251,9 @@ static int sc18is602_probe(struct i2c_client *client) if (!host) return -ENOMEM; + device_set_node(&host->dev, dev_fwnode(dev)); + hw = spi_controller_get_devdata(host); - i2c_set_clientdata(client, hw); /* assert reset and then release */ hw->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); @@ -265,11 +266,7 @@ static int sc18is602_probe(struct i2c_client *client) hw->dev = dev; hw->ctrl = 0xff; - if (client->dev.of_node) - hw->id = (uintptr_t)of_device_get_match_data(&client->dev); - else - hw->id = id->driver_data; - + hw->id = (uintptr_t)i2c_get_match_data(client); switch (hw->id) { case sc18is602: case sc18is602b: @@ -278,28 +275,21 @@ static int sc18is602_probe(struct i2c_client *client) break; case sc18is603: host->num_chipselect = 2; - if (pdata) { + if (pdata) hw->freq = pdata->clock_frequency; - } else { - const __be32 *val; - int len; - - val = of_get_property(np, "clock-frequency", &len); - if (val && len >= sizeof(__be32)) - hw->freq = be32_to_cpup(val); - } + else + device_property_read_u32(dev, "clock-frequency", &hw->freq); if (!hw->freq) hw->freq = SC18IS602_CLOCK; break; } - host->bus_num = np ? -1 : client->adapter->nr; + host->bus_num = dev_fwnode(dev) ? -1 : client->adapter->nr; host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST; host->bits_per_word_mask = SPI_BPW_MASK(8); host->setup = sc18is602_setup; host->transfer_one_message = sc18is602_transfer_one; host->max_transfer_size = sc18is602_max_transfer_size; host->max_message_size = sc18is602_max_transfer_size; - host->dev.of_node = np; host->min_speed_hz = hw->freq / 128; host->max_speed_hz = hw->freq / 4; @@ -314,7 +304,7 @@ static const struct i2c_device_id sc18is602_id[] = { }; MODULE_DEVICE_TABLE(i2c, sc18is602_id); -static const struct of_device_id sc18is602_of_match[] __maybe_unused = { +static const struct of_device_id sc18is602_of_match[] = { { .compatible = "nxp,sc18is602", .data = (void *)sc18is602 @@ -334,7 +324,7 @@ MODULE_DEVICE_TABLE(of, sc18is602_of_match); static struct i2c_driver sc18is602_driver = { .driver = { .name = "sc18is602", - .of_match_table = of_match_ptr(sc18is602_of_match), + .of_match_table = sc18is602_of_match, }, .probe = sc18is602_probe, .id_table = sc18is602_id, diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c new file mode 100644 index 000000000000..a59aa3fc55d2 --- /dev/null +++ b/drivers/spi/spi-sg2044-nor.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SG2044 SPI NOR controller driver + * + * Copyright (c) 2025 Longbin Li <looong.bin@gmail.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spi/spi-mem.h> + +/* Hardware register definitions */ +#define SPIFMC_CTRL 0x00 +#define SPIFMC_CTRL_CPHA BIT(12) +#define SPIFMC_CTRL_CPOL BIT(13) +#define SPIFMC_CTRL_HOLD_OL BIT(14) +#define SPIFMC_CTRL_WP_OL BIT(15) +#define SPIFMC_CTRL_LSBF BIT(20) +#define SPIFMC_CTRL_SRST BIT(21) +#define SPIFMC_CTRL_SCK_DIV_SHIFT 0 +#define SPIFMC_CTRL_FRAME_LEN_SHIFT 16 +#define SPIFMC_CTRL_SCK_DIV_MASK 0x7FF + +#define SPIFMC_CE_CTRL 0x04 +#define SPIFMC_CE_CTRL_CEMANUAL BIT(0) +#define SPIFMC_CE_CTRL_CEMANUAL_EN BIT(1) + +#define SPIFMC_DLY_CTRL 0x08 +#define SPIFMC_CTRL_FM_INTVL_MASK 0x000f +#define SPIFMC_CTRL_FM_INTVL BIT(0) +#define SPIFMC_CTRL_CET_MASK 0x0f00 +#define SPIFMC_CTRL_CET BIT(8) + +#define SPIFMC_DMMR 0x0c + +#define SPIFMC_TRAN_CSR 0x10 +#define SPIFMC_TRAN_CSR_TRAN_MODE_MASK GENMASK(1, 0) +#define SPIFMC_TRAN_CSR_TRAN_MODE_RX BIT(0) +#define SPIFMC_TRAN_CSR_TRAN_MODE_TX BIT(1) +#define SPIFMC_TRAN_CSR_FAST_MODE BIT(3) +#define SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT (0x00 << 4) +#define SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT (0x01 << 4) +#define SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT (0x02 << 4) +#define SPIFMC_TRAN_CSR_DMA_EN BIT(6) +#define SPIFMC_TRAN_CSR_MISO_LEVEL BIT(7) +#define SPIFMC_TRAN_CSR_ADDR_BYTES_MASK GENMASK(10, 8) +#define SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT 8 +#define SPIFMC_TRAN_CSR_WITH_CMD BIT(11) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK GENMASK(13, 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE (0x00 << 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_2_BYTE (0x01 << 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE (0x02 << 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE (0x03 << 12) +#define SPIFMC_TRAN_CSR_GO_BUSY BIT(15) +#define SPIFMC_TRAN_CSR_ADDR4B_SHIFT 20 +#define SPIFMC_TRAN_CSR_CMD4B_SHIFT 21 + +#define SPIFMC_TRAN_NUM 0x14 +#define SPIFMC_FIFO_PORT 0x18 +#define SPIFMC_FIFO_PT 0x20 + +#define SPIFMC_INT_STS 0x28 +#define SPIFMC_INT_TRAN_DONE BIT(0) +#define SPIFMC_INT_RD_FIFO BIT(2) +#define SPIFMC_INT_WR_FIFO BIT(3) +#define SPIFMC_INT_RX_FRAME BIT(4) +#define SPIFMC_INT_TX_FRAME BIT(5) + +#define SPIFMC_INT_EN 0x2c +#define SPIFMC_INT_TRAN_DONE_EN BIT(0) +#define SPIFMC_INT_RD_FIFO_EN BIT(2) +#define SPIFMC_INT_WR_FIFO_EN BIT(3) +#define SPIFMC_INT_RX_FRAME_EN BIT(4) +#define SPIFMC_INT_TX_FRAME_EN BIT(5) + +#define SPIFMC_OPT 0x030 +#define SPIFMC_OPT_DISABLE_FIFO_FLUSH BIT(1) + +#define SPIFMC_MAX_FIFO_DEPTH 8 + +#define SPIFMC_MAX_READ_SIZE 0x10000 + +struct sg2044_spifmc { + struct spi_controller *ctrl; + void __iomem *io_base; + struct device *dev; + struct mutex lock; + struct clk *clk; +}; + +static int sg2044_spifmc_wait_int(struct sg2044_spifmc *spifmc, u8 int_type) +{ + u32 stat; + + return readl_poll_timeout(spifmc->io_base + SPIFMC_INT_STS, stat, + (stat & int_type), 0, 1000000); +} + +static int sg2044_spifmc_wait_xfer_size(struct sg2044_spifmc *spifmc, + int xfer_size) +{ + u8 stat; + + return readl_poll_timeout(spifmc->io_base + SPIFMC_FIFO_PT, stat, + ((stat & 0xf) == xfer_size), 1, 1000000); +} + +static u32 sg2044_spifmc_init_reg(struct sg2044_spifmc *spifmc) +{ + u32 reg; + + reg = readl(spifmc->io_base + SPIFMC_TRAN_CSR); + reg &= ~(SPIFMC_TRAN_CSR_TRAN_MODE_MASK | + SPIFMC_TRAN_CSR_FAST_MODE | + SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT | + SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT | + SPIFMC_TRAN_CSR_DMA_EN | + SPIFMC_TRAN_CSR_ADDR_BYTES_MASK | + SPIFMC_TRAN_CSR_WITH_CMD | + SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK); + + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + return reg; +} + +static ssize_t sg2044_spifmc_read_64k(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op, loff_t from, + size_t len, u_char *buf) +{ + int xfer_size, offset; + u32 reg; + int ret; + int i; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT; + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = op->addr.nbytes - 1; i >= 0; i--) + writeb((from >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < op->dummy.nbytes; i++) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + writel(len, spifmc->io_base + SPIFMC_TRAN_NUM); + writel(0, spifmc->io_base + SPIFMC_INT_STS); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_RD_FIFO); + if (ret < 0) + return ret; + + offset = 0; + while (offset < len) { + xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, len - offset); + + ret = sg2044_spifmc_wait_xfer_size(spifmc, xfer_size); + if (ret < 0) + return ret; + + for (i = 0; i < xfer_size; i++) + buf[i + offset] = readb(spifmc->io_base + SPIFMC_FIFO_PORT); + + offset += xfer_size; + } + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return len; +} + +static ssize_t sg2044_spifmc_read(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + size_t xfer_size; + size_t offset; + loff_t from = op->addr.val; + size_t len = op->data.nbytes; + int ret; + u8 *din = op->data.buf.in; + + offset = 0; + while (offset < len) { + xfer_size = min_t(size_t, SPIFMC_MAX_READ_SIZE, len - offset); + + ret = sg2044_spifmc_read_64k(spifmc, op, from, xfer_size, din); + if (ret < 0) + return ret; + + offset += xfer_size; + din += xfer_size; + from += xfer_size; + } + + return 0; +} + +static ssize_t sg2044_spifmc_write(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + size_t xfer_size; + const u8 *dout = op->data.buf.out; + int i, offset; + int ret; + u32 reg; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT; + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = op->addr.nbytes - 1; i >= 0; i--) + writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < op->dummy.nbytes; i++) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + writel(0, spifmc->io_base + SPIFMC_INT_STS); + writel(op->data.nbytes, spifmc->io_base + SPIFMC_TRAN_NUM); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_xfer_size(spifmc, 0); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + offset = 0; + while (offset < op->data.nbytes) { + xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, op->data.nbytes - offset); + + ret = sg2044_spifmc_wait_xfer_size(spifmc, 0); + if (ret < 0) + return ret; + + for (i = 0; i < xfer_size; i++) + writeb(dout[i + offset], spifmc->io_base + SPIFMC_FIFO_PORT); + + offset += xfer_size; + } + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return 0; +} + +static ssize_t sg2044_spifmc_tran_cmd(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + int i, ret; + u32 reg; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT; + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = op->addr.nbytes - 1; i >= 0; i--) + writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < op->dummy.nbytes; i++) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + writel(0, spifmc->io_base + SPIFMC_INT_STS); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return 0; +} + +static void sg2044_spifmc_trans(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + if (op->data.dir == SPI_MEM_DATA_IN) + sg2044_spifmc_read(spifmc, op); + else if (op->data.dir == SPI_MEM_DATA_OUT) + sg2044_spifmc_write(spifmc, op); + else + sg2044_spifmc_tran_cmd(spifmc, op); +} + +static ssize_t sg2044_spifmc_trans_reg(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + const u8 *dout = NULL; + u8 *din = NULL; + size_t len = op->data.nbytes; + int ret, i; + u32 reg; + + if (op->data.dir == SPI_MEM_DATA_IN) + din = op->data.buf.in; + else + dout = op->data.buf.out; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + + if (din) { + reg |= SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX; + + writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT); + } else { + /* + * If write values to the Status Register, + * configure TRAN_CSR register as the same as + * sg2044_spifmc_read_reg. + */ + if (op->cmd.opcode == 0x01) { + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX; + writel(len, spifmc->io_base + SPIFMC_TRAN_NUM); + } + } + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < len; i++) { + if (din) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + else + writeb(dout[i], spifmc->io_base + SPIFMC_FIFO_PORT); + } + + writel(0, spifmc->io_base + SPIFMC_INT_STS); + writel(len, spifmc->io_base + SPIFMC_TRAN_NUM); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + if (din) { + while (len--) + *din++ = readb(spifmc->io_base + SPIFMC_FIFO_PORT); + } + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return 0; +} + +static int sg2044_spifmc_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct sg2044_spifmc *spifmc; + + spifmc = spi_controller_get_devdata(mem->spi->controller); + + mutex_lock(&spifmc->lock); + + if (op->addr.nbytes == 0) + sg2044_spifmc_trans_reg(spifmc, op); + else + sg2044_spifmc_trans(spifmc, op); + + mutex_unlock(&spifmc->lock); + + return 0; +} + +static const struct spi_controller_mem_ops sg2044_spifmc_mem_ops = { + .exec_op = sg2044_spifmc_exec_op, +}; + +static void sg2044_spifmc_init(struct sg2044_spifmc *spifmc) +{ + u32 tran_csr; + u32 reg; + + writel(0, spifmc->io_base + SPIFMC_DMMR); + + reg = readl(spifmc->io_base + SPIFMC_CTRL); + reg |= SPIFMC_CTRL_SRST; + reg &= ~(SPIFMC_CTRL_SCK_DIV_MASK); + reg |= 1; + writel(reg, spifmc->io_base + SPIFMC_CTRL); + + writel(0, spifmc->io_base + SPIFMC_CE_CTRL); + + tran_csr = readl(spifmc->io_base + SPIFMC_TRAN_CSR); + tran_csr |= (0 << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT); + tran_csr |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE; + tran_csr |= SPIFMC_TRAN_CSR_WITH_CMD; + writel(tran_csr, spifmc->io_base + SPIFMC_TRAN_CSR); +} + +static int sg2044_spifmc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *ctrl; + struct sg2044_spifmc *spifmc; + int ret; + + ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifmc)); + if (!ctrl) + return -ENOMEM; + + spifmc = spi_controller_get_devdata(ctrl); + + spifmc->clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(spifmc->clk)) + return dev_err_probe(dev, PTR_ERR(spifmc->clk), "Cannot get and enable AHB clock\n"); + + spifmc->dev = &pdev->dev; + spifmc->ctrl = ctrl; + + spifmc->io_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(spifmc->io_base)) + return PTR_ERR(spifmc->io_base); + + ctrl->num_chipselect = 1; + ctrl->dev.of_node = pdev->dev.of_node; + ctrl->bits_per_word_mask = SPI_BPW_MASK(8); + ctrl->auto_runtime_pm = false; + ctrl->mem_ops = &sg2044_spifmc_mem_ops; + ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD; + + ret = devm_mutex_init(dev, &spifmc->lock); + if (ret) + return ret; + + sg2044_spifmc_init(spifmc); + sg2044_spifmc_init_reg(spifmc); + + ret = devm_spi_register_controller(&pdev->dev, ctrl); + if (ret) + return dev_err_probe(dev, ret, "spi_register_controller failed\n"); + + return 0; +} + +static const struct of_device_id sg2044_spifmc_match[] = { + { .compatible = "sophgo,sg2044-spifmc-nor" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sg2044_spifmc_match); + +static struct platform_driver sg2044_nor_driver = { + .driver = { + .name = "sg2044,spifmc-nor", + .of_match_table = sg2044_spifmc_match, + }, + .probe = sg2044_spifmc_probe, +}; +module_platform_driver(sg2044_nor_driver); + +MODULE_DESCRIPTION("SG2044 SPI NOR controller driver"); +MODULE_AUTHOR("Longbin Li <looong.bin@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 8a98c313548e..7d8a7998f8ae 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -918,6 +918,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, void *rx_buf = t->rx_buf; unsigned int len = t->len; unsigned int bits = t->bits_per_word; + unsigned int max_wdlen = 256; unsigned int bytes_per_word; unsigned int words; int n; @@ -931,17 +932,17 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, if (!spi_controller_is_target(p->ctlr)) sh_msiof_spi_set_clk_regs(p, t); + if (tx_buf) + max_wdlen = min(max_wdlen, p->tx_fifo_size); + if (rx_buf) + max_wdlen = min(max_wdlen, p->rx_fifo_size); + while (ctlr->dma_tx && len > 15) { /* * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. */ - unsigned int l = 0; - - if (tx_buf) - l = min(round_down(len, 4), p->tx_fifo_size * 4); - if (rx_buf) - l = min(round_down(len, 4), p->rx_fifo_size * 4); + unsigned int l = min(round_down(len, 4), max_wdlen * 4); if (bits <= 8) { copy32 = copy_bswap32; diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c index 56ef114effc9..c4969f66a0ba 100644 --- a/drivers/spi/spi-sn-f-ospi.c +++ b/drivers/spi/spi-sn-f-ospi.c @@ -338,7 +338,6 @@ static void f_ospi_config_indir_protocol(struct f_ospi *ospi, static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem, const struct spi_mem_op *op) { - struct spi_device *spi = mem->spi; u32 irq_stat_en; int ret; @@ -346,7 +345,7 @@ static int f_ospi_indir_prepare_op(struct f_ospi *ospi, struct spi_mem *mem, if (ret) return ret; - f_ospi_config_clk(ospi, spi->max_speed_hz); + f_ospi_config_clk(ospi, op->max_freq); f_ospi_config_indir_protocol(ospi, mem, op); @@ -580,6 +579,10 @@ static const struct spi_controller_mem_ops f_ospi_mem_ops = { .exec_op = f_ospi_exec_op, }; +static const struct spi_controller_mem_caps f_ospi_mem_caps = { + .per_op_freq = true, +}; + static int f_ospi_init(struct f_ospi *ospi) { int ret; @@ -617,6 +620,7 @@ static int f_ospi_probe(struct platform_device *pdev) | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL | SPI_MODE_0 | SPI_MODE_1 | SPI_LSB_FIRST; ctlr->mem_ops = &f_ospi_mem_ops; + ctlr->mem_caps = &f_ospi_mem_caps; ctlr->bus_num = -1; of_property_read_u32(dev->of_node, "num-cs", &num_cs); if (num_cs > OSPI_NUM_CS) { diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c new file mode 100644 index 000000000000..9ec9823409cc --- /dev/null +++ b/drivers/spi/spi-stm32-ospi.c @@ -0,0 +1,1067 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2025 - All Rights Reserved + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/sizes.h> +#include <linux/spi/spi-mem.h> +#include <linux/types.h> + +#define OSPI_CR 0x00 +#define CR_EN BIT(0) +#define CR_ABORT BIT(1) +#define CR_DMAEN BIT(2) +#define CR_FTHRES_SHIFT 8 +#define CR_TEIE BIT(16) +#define CR_TCIE BIT(17) +#define CR_SMIE BIT(19) +#define CR_APMS BIT(22) +#define CR_CSSEL BIT(24) +#define CR_FMODE_MASK GENMASK(29, 28) +#define CR_FMODE_INDW (0U) +#define CR_FMODE_INDR (1U) +#define CR_FMODE_APM (2U) +#define CR_FMODE_MM (3U) + +#define OSPI_DCR1 0x08 +#define DCR1_DLYBYP BIT(3) +#define DCR1_DEVSIZE_MASK GENMASK(20, 16) +#define DCR1_MTYP_MASK GENMASK(26, 24) +#define DCR1_MTYP_MX_MODE 1 +#define DCR1_MTYP_HP_MEMMODE 4 + +#define OSPI_DCR2 0x0c +#define DCR2_PRESC_MASK GENMASK(7, 0) + +#define OSPI_SR 0x20 +#define SR_TEF BIT(0) +#define SR_TCF BIT(1) +#define SR_FTF BIT(2) +#define SR_SMF BIT(3) +#define SR_BUSY BIT(5) + +#define OSPI_FCR 0x24 +#define FCR_CTEF BIT(0) +#define FCR_CTCF BIT(1) +#define FCR_CSMF BIT(3) + +#define OSPI_DLR 0x40 +#define OSPI_AR 0x48 +#define OSPI_DR 0x50 +#define OSPI_PSMKR 0x80 +#define OSPI_PSMAR 0x88 + +#define OSPI_CCR 0x100 +#define CCR_IMODE_MASK GENMASK(2, 0) +#define CCR_IDTR BIT(3) +#define CCR_ISIZE_MASK GENMASK(5, 4) +#define CCR_ADMODE_MASK GENMASK(10, 8) +#define CCR_ADMODE_8LINES 4 +#define CCR_ADDTR BIT(11) +#define CCR_ADSIZE_MASK GENMASK(13, 12) +#define CCR_ADSIZE_32BITS 3 +#define CCR_DMODE_MASK GENMASK(26, 24) +#define CCR_DMODE_8LINES 4 +#define CCR_DQSE BIT(29) +#define CCR_DDTR BIT(27) +#define CCR_BUSWIDTH_0 0x0 +#define CCR_BUSWIDTH_1 0x1 +#define CCR_BUSWIDTH_2 0x2 +#define CCR_BUSWIDTH_4 0x3 +#define CCR_BUSWIDTH_8 0x4 + +#define OSPI_TCR 0x108 +#define TCR_DCYC_MASK GENMASK(4, 0) +#define TCR_DHQC BIT(28) +#define TCR_SSHIFT BIT(30) + +#define OSPI_IR 0x110 + +#define STM32_OSPI_MAX_MMAP_SZ SZ_256M +#define STM32_OSPI_MAX_NORCHIP 2 + +#define STM32_FIFO_TIMEOUT_US 30000 +#define STM32_ABT_TIMEOUT_US 100000 +#define STM32_COMP_TIMEOUT_MS 5000 +#define STM32_BUSY_TIMEOUT_US 100000 + + +#define STM32_AUTOSUSPEND_DELAY -1 + +struct stm32_ospi { + struct device *dev; + struct spi_controller *ctrl; + struct clk *clk; + struct reset_control *rstc; + + struct completion data_completion; + struct completion match_completion; + + struct dma_chan *dma_chtx; + struct dma_chan *dma_chrx; + struct completion dma_completion; + + void __iomem *regs_base; + void __iomem *mm_base; + phys_addr_t regs_phys_base; + resource_size_t mm_size; + u32 clk_rate; + u32 fmode; + u32 cr_reg; + u32 dcr_reg; + u32 flash_presc[STM32_OSPI_MAX_NORCHIP]; + int irq; + unsigned long status_timeout; + + /* + * To protect device configuration, could be different between + * 2 flash access + */ + struct mutex lock; +}; + +static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr) +{ + *val = readb_relaxed(addr); +} + +static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr) +{ + writeb_relaxed(*val, addr); +} + +static int stm32_ospi_abort(struct stm32_ospi *ospi) +{ + void __iomem *regs_base = ospi->regs_base; + u32 cr; + int timeout; + + cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT; + writel_relaxed(cr, regs_base + OSPI_CR); + + /* wait clear of abort bit by hw */ + timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR, + cr, !(cr & CR_ABORT), 1, + STM32_ABT_TIMEOUT_US); + + if (timeout) + dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout); + + return timeout; +} + +static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read) +{ + void __iomem *regs_base = ospi->regs_base; + void (*fifo)(u8 *val, void __iomem *addr); + u32 sr; + int ret; + + if (read) + fifo = stm32_ospi_read_fifo; + else + fifo = stm32_ospi_write_fifo; + + while (len--) { + ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR, + sr, sr & SR_FTF, 1, + STM32_FIFO_TIMEOUT_US); + if (ret) { + dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n", + len, sr); + return ret; + } + fifo(buf++, regs_base + OSPI_DR); + } + + return 0; +} + +static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi) +{ + u32 sr; + + return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR, + sr, !(sr & SR_BUSY), 1, + STM32_BUSY_TIMEOUT_US); +} + +static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi) +{ + void __iomem *regs_base = ospi->regs_base; + u32 cr, sr; + int err = 0; + + if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) || + ospi->fmode == CR_FMODE_APM) + goto out; + + reinit_completion(&ospi->data_completion); + cr = readl_relaxed(regs_base + OSPI_CR); + writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR); + + if (!wait_for_completion_timeout(&ospi->data_completion, + msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) + err = -ETIMEDOUT; + + sr = readl_relaxed(regs_base + OSPI_SR); + if (sr & SR_TCF) + /* avoid false timeout */ + err = 0; + if (sr & SR_TEF) + err = -EIO; + +out: + /* clear flags */ + writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR); + + if (!err) + err = stm32_ospi_wait_nobusy(ospi); + + return err; +} + +static void stm32_ospi_dma_callback(void *arg) +{ + struct completion *dma_completion = arg; + + complete(dma_completion); +} + +static irqreturn_t stm32_ospi_irq(int irq, void *dev_id) +{ + struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id; + void __iomem *regs_base = ospi->regs_base; + u32 cr, sr; + + cr = readl_relaxed(regs_base + OSPI_CR); + sr = readl_relaxed(regs_base + OSPI_SR); + + if (cr & CR_SMIE && sr & SR_SMF) { + /* disable irq */ + cr &= ~CR_SMIE; + writel_relaxed(cr, regs_base + OSPI_CR); + complete(&ospi->match_completion); + + return IRQ_HANDLED; + } + + if (sr & (SR_TEF | SR_TCF)) { + /* disable irq */ + cr &= ~CR_TCIE & ~CR_TEIE; + writel_relaxed(cr, regs_base + OSPI_CR); + complete(&ospi->data_completion); + } + + return IRQ_HANDLED; +} + +static void stm32_ospi_dma_setup(struct stm32_ospi *ospi, + struct dma_slave_config *dma_cfg) +{ + if (dma_cfg && ospi->dma_chrx) { + if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) { + dev_err(ospi->dev, "dma rx config failed\n"); + dma_release_channel(ospi->dma_chrx); + ospi->dma_chrx = NULL; + } + } + + if (dma_cfg && ospi->dma_chtx) { + if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) { + dev_err(ospi->dev, "dma tx config failed\n"); + dma_release_channel(ospi->dma_chtx); + ospi->dma_chtx = NULL; + } + } + + init_completion(&ospi->dma_completion); +} + +static int stm32_ospi_tx_mm(struct stm32_ospi *ospi, + const struct spi_mem_op *op) +{ + memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val, + op->data.nbytes); + return 0; +} + +static int stm32_ospi_tx_dma(struct stm32_ospi *ospi, + const struct spi_mem_op *op) +{ + struct dma_async_tx_descriptor *desc; + void __iomem *regs_base = ospi->regs_base; + enum dma_transfer_direction dma_dir; + struct dma_chan *dma_ch; + struct sg_table sgt; + dma_cookie_t cookie; + u32 cr, t_out; + int err; + + if (op->data.dir == SPI_MEM_DATA_IN) { + dma_dir = DMA_DEV_TO_MEM; + dma_ch = ospi->dma_chrx; + } else { + dma_dir = DMA_MEM_TO_DEV; + dma_ch = ospi->dma_chtx; + } + + /* + * Spi_map_buf return -EINVAL if the buffer is not DMA-able + * (DMA-able: in vmalloc | kmap | virt_addr_valid) + */ + err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt); + if (err) + return err; + + desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents, + dma_dir, DMA_PREP_INTERRUPT); + if (!desc) { + err = -ENOMEM; + goto out_unmap; + } + + cr = readl_relaxed(regs_base + OSPI_CR); + + reinit_completion(&ospi->dma_completion); + desc->callback = stm32_ospi_dma_callback; + desc->callback_param = &ospi->dma_completion; + cookie = dmaengine_submit(desc); + err = dma_submit_error(cookie); + if (err) + goto out; + + dma_async_issue_pending(dma_ch); + + writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR); + + t_out = sgt.nents * STM32_COMP_TIMEOUT_MS; + if (!wait_for_completion_timeout(&ospi->dma_completion, + msecs_to_jiffies(t_out))) + err = -ETIMEDOUT; + + if (err) + dmaengine_terminate_all(dma_ch); + +out: + writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR); +out_unmap: + spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt); + + return err; +} + +static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op) +{ + u8 *buf; + + if (!op->data.nbytes) + return 0; + + if (ospi->fmode == CR_FMODE_MM) + return stm32_ospi_tx_mm(ospi, op); + else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) || + (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) && + op->data.nbytes > 8) + if (!stm32_ospi_tx_dma(ospi, op)) + return 0; + + if (op->data.dir == SPI_MEM_DATA_IN) + buf = op->data.buf.in; + else + buf = (u8 *)op->data.buf.out; + + return stm32_ospi_poll(ospi, buf, op->data.nbytes, + op->data.dir == SPI_MEM_DATA_IN); +} + +static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi, + const struct spi_mem_op *op) +{ + void __iomem *regs_base = ospi->regs_base; + u32 cr; + + reinit_completion(&ospi->match_completion); + cr = readl_relaxed(regs_base + OSPI_CR); + writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR); + + if (!wait_for_completion_timeout(&ospi->match_completion, + msecs_to_jiffies(ospi->status_timeout))) { + u32 sr = readl_relaxed(regs_base + OSPI_SR); + + /* Avoid false timeout */ + if (!(sr & SR_SMF)) + return -ETIMEDOUT; + } + + writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR); + + return 0; +} + +static int stm32_ospi_get_mode(u8 buswidth) +{ + switch (buswidth) { + case 8: + return CCR_BUSWIDTH_8; + case 4: + return CCR_BUSWIDTH_4; + default: + return buswidth; + } +} + +static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller); + void __iomem *regs_base = ospi->regs_base; + u32 ccr, cr, dcr2, tcr; + int timeout, err = 0, err_poll_status = 0; + u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1]; + + dev_dbg(ospi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", + op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, + op->dummy.buswidth, op->data.buswidth, + op->addr.val, op->data.nbytes); + + cr = readl_relaxed(ospi->regs_base + OSPI_CR); + cr &= ~CR_CSSEL; + cr |= FIELD_PREP(CR_CSSEL, cs); + cr &= ~CR_FMODE_MASK; + cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode); + writel_relaxed(cr, regs_base + OSPI_CR); + + if (op->data.nbytes) + writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR); + + /* set prescaler */ + dcr2 = readl_relaxed(regs_base + OSPI_DCR2); + dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]); + writel_relaxed(dcr2, regs_base + OSPI_DCR2); + + ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth)); + + if (op->addr.nbytes) { + ccr |= FIELD_PREP(CCR_ADMODE_MASK, + stm32_ospi_get_mode(op->addr.buswidth)); + ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1); + } + + tcr = TCR_SSHIFT; + if (op->dummy.buswidth && op->dummy.nbytes) { + tcr |= FIELD_PREP(TCR_DCYC_MASK, + op->dummy.nbytes * 8 / op->dummy.buswidth); + } + writel_relaxed(tcr, regs_base + OSPI_TCR); + + if (op->data.nbytes) { + ccr |= FIELD_PREP(CCR_DMODE_MASK, + stm32_ospi_get_mode(op->data.buswidth)); + } + + writel_relaxed(ccr, regs_base + OSPI_CCR); + + /* set instruction, must be set after ccr register update */ + writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR); + + if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM) + writel_relaxed(op->addr.val, regs_base + OSPI_AR); + + if (ospi->fmode == CR_FMODE_APM) + err_poll_status = stm32_ospi_wait_poll_status(ospi, op); + + err = stm32_ospi_xfer(ospi, op); + + /* + * Abort in: + * -error case + * -read memory map: prefetching must be stopped if we read the last + * byte of device (device size - fifo size). like device size is not + * knows, the prefetching is always stop. + */ + if (err || err_poll_status || ospi->fmode == CR_FMODE_MM) + goto abort; + + /* Wait end of tx in indirect mode */ + err = stm32_ospi_wait_cmd(ospi); + if (err) + goto abort; + + return 0; + +abort: + timeout = stm32_ospi_abort(ospi); + writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR); + + if (err || err_poll_status || timeout) + dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n", + __func__, err, err_poll_status, timeout); + + return err; +} + +static int stm32_ospi_poll_status(struct spi_mem *mem, + const struct spi_mem_op *op, + u16 mask, u16 match, + unsigned long initial_delay_us, + unsigned long polling_rate_us, + unsigned long timeout_ms) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller); + void __iomem *regs_base = ospi->regs_base; + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + + writel_relaxed(mask, regs_base + OSPI_PSMKR); + writel_relaxed(match, regs_base + OSPI_PSMAR); + ospi->fmode = CR_FMODE_APM; + ospi->status_timeout = timeout_ms; + + ret = stm32_ospi_send(mem->spi, op); + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret; +} + +static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller); + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) + ospi->fmode = CR_FMODE_INDR; + else + ospi->fmode = CR_FMODE_INDW; + + ret = stm32_ospi_send(mem->spi, op); + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret; +} + +static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller); + + if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) + return -EOPNOTSUPP; + + /* Should never happen, as mm_base == null is an error probe exit condition */ + if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) + return -EOPNOTSUPP; + + if (!ospi->mm_size) + return -EOPNOTSUPP; + + return 0; +} + +static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, void *buf) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller); + struct spi_mem_op op; + u32 addr_max; + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + /* + * Make a local copy of desc op_tmpl and complete dirmap rdesc + * spi_mem_op template with offs, len and *buf in order to get + * all needed transfer information into struct spi_mem_op + */ + memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op)); + dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf); + + op.data.nbytes = len; + op.addr.val = desc->info.offset + offs; + op.data.buf.in = buf; + + addr_max = op.addr.val + op.data.nbytes + 1; + if (addr_max < ospi->mm_size && op.addr.buswidth) + ospi->fmode = CR_FMODE_MM; + else + ospi->fmode = CR_FMODE_INDR; + + ret = stm32_ospi_send(desc->mem->spi, &op); + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret ?: len; +} + +static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl, + struct spi_message *msg) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl); + struct spi_transfer *transfer; + struct spi_device *spi = msg->spi; + struct spi_mem_op op; + struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1]; + int ret = 0; + + if (!cs_gpiod) + return -EOPNOTSUPP; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + + gpiod_set_value_cansleep(cs_gpiod, true); + + list_for_each_entry(transfer, &msg->transfers, transfer_list) { + u8 dummy_bytes = 0; + + memset(&op, 0, sizeof(op)); + + dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n", + transfer->tx_buf, transfer->tx_nbits, + transfer->rx_buf, transfer->rx_nbits, + transfer->len, transfer->dummy_data); + + /* + * OSPI hardware supports dummy bytes transfer. + * If current transfer is dummy byte, merge it with the next + * transfer in order to take into account OSPI block constraint + */ + if (transfer->dummy_data) { + op.dummy.buswidth = transfer->tx_nbits; + op.dummy.nbytes = transfer->len; + dummy_bytes = transfer->len; + + /* If happens, means that message is not correctly built */ + if (list_is_last(&transfer->transfer_list, &msg->transfers)) { + ret = -EINVAL; + goto end_of_transfer; + } + + transfer = list_next_entry(transfer, transfer_list); + } + + op.data.nbytes = transfer->len; + + if (transfer->rx_buf) { + ospi->fmode = CR_FMODE_INDR; + op.data.buswidth = transfer->rx_nbits; + op.data.dir = SPI_MEM_DATA_IN; + op.data.buf.in = transfer->rx_buf; + } else { + ospi->fmode = CR_FMODE_INDW; + op.data.buswidth = transfer->tx_nbits; + op.data.dir = SPI_MEM_DATA_OUT; + op.data.buf.out = transfer->tx_buf; + } + + ret = stm32_ospi_send(spi, &op); + if (ret) + goto end_of_transfer; + + msg->actual_length += transfer->len + dummy_bytes; + } + +end_of_transfer: + gpiod_set_value_cansleep(cs_gpiod, false); + + mutex_unlock(&ospi->lock); + + msg->status = ret; + spi_finalize_current_message(ctrl); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret; +} + +static int stm32_ospi_setup(struct spi_device *spi) +{ + struct spi_controller *ctrl = spi->controller; + struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl); + void __iomem *regs_base = ospi->regs_base; + int ret; + u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1]; + + if (ctrl->busy) + return -EBUSY; + + if (!spi->max_speed_hz) + return -EINVAL; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1; + + mutex_lock(&ospi->lock); + + ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN; + writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR); + + /* set dcr fsize to max address */ + ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP; + writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1); + + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return 0; +} + +/* + * No special host constraint, so use default spi_mem_default_supports_op + * to check supported mode. + */ +static const struct spi_controller_mem_ops stm32_ospi_mem_ops = { + .exec_op = stm32_ospi_exec_op, + .dirmap_create = stm32_ospi_dirmap_create, + .dirmap_read = stm32_ospi_dirmap_read, + .poll_status = stm32_ospi_poll_status, +}; + +static int stm32_ospi_get_resources(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stm32_ospi *ospi = platform_get_drvdata(pdev); + struct resource *res; + struct reserved_mem *rmem = NULL; + struct device_node *node; + int ret; + + ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(ospi->regs_base)) + return PTR_ERR(ospi->regs_base); + + ospi->regs_phys_base = res->start; + + ospi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ospi->clk)) + return dev_err_probe(dev, PTR_ERR(ospi->clk), + "Can't get clock\n"); + + ospi->clk_rate = clk_get_rate(ospi->clk); + if (!ospi->clk_rate) { + dev_err(dev, "Invalid clock rate\n"); + return -EINVAL; + } + + ospi->irq = platform_get_irq(pdev, 0); + if (ospi->irq < 0) + return ospi->irq; + + ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0, + dev_name(dev), ospi); + if (ret) { + dev_err(dev, "Failed to request irq\n"); + return ret; + } + + ospi->rstc = devm_reset_control_array_get_optional_exclusive(dev); + if (IS_ERR(ospi->rstc)) + return dev_err_probe(dev, PTR_ERR(ospi->rstc), + "Can't get reset\n"); + + ospi->dma_chrx = dma_request_chan(dev, "rx"); + if (IS_ERR(ospi->dma_chrx)) { + ret = PTR_ERR(ospi->dma_chrx); + ospi->dma_chrx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma; + } + + ospi->dma_chtx = dma_request_chan(dev, "tx"); + if (IS_ERR(ospi->dma_chtx)) { + ret = PTR_ERR(ospi->dma_chtx); + ospi->dma_chtx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma; + } + + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (node) + rmem = of_reserved_mem_lookup(node); + of_node_put(node); + + if (rmem) { + ospi->mm_size = rmem->size; + ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size); + if (!ospi->mm_base) { + dev_err(dev, "unable to map memory region: %pa+%pa\n", + &rmem->base, &rmem->size); + ret = -ENOMEM; + goto err_dma; + } + + if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) { + dev_err(dev, "Memory map size outsize bounds\n"); + ret = -EINVAL; + goto err_dma; + } + } else { + dev_info(dev, "No memory-map region found\n"); + } + + init_completion(&ospi->data_completion); + init_completion(&ospi->match_completion); + + return 0; + +err_dma: + dev_info(dev, "Can't get all resources (%d)\n", ret); + + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); + + return ret; +}; + +static int stm32_ospi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *ctrl; + struct stm32_ospi *ospi; + struct dma_slave_config dma_cfg; + struct device_node *child; + int ret; + u8 spi_flash_count = 0; + + /* + * Flash subnodes sanity check: + * 1 or 2 spi-nand/spi-nor flashes => supported + * All other flash node configuration => not supported + */ + for_each_available_child_of_node(dev->of_node, child) { + if (of_device_is_compatible(child, "jedec,spi-nor") || + of_device_is_compatible(child, "spi-nand")) + spi_flash_count++; + } + + if (spi_flash_count == 0 || spi_flash_count > 2) { + dev_err(dev, "Incorrect DT flash node\n"); + return -ENODEV; + } + + ctrl = devm_spi_alloc_host(dev, sizeof(*ospi)); + if (!ctrl) + return -ENOMEM; + + ospi = spi_controller_get_devdata(ctrl); + ospi->ctrl = ctrl; + + ospi->dev = &pdev->dev; + platform_set_drvdata(pdev, ospi); + + ret = stm32_ospi_get_resources(pdev); + if (ret) + return ret; + + memset(&dma_cfg, 0, sizeof(dma_cfg)); + dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR; + dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR; + dma_cfg.src_maxburst = 4; + dma_cfg.dst_maxburst = 4; + stm32_ospi_dma_setup(ospi, &dma_cfg); + + mutex_init(&ospi->lock); + + ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD | + SPI_TX_OCTAL | SPI_RX_OCTAL; + ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX; + ctrl->setup = stm32_ospi_setup; + ctrl->bus_num = -1; + ctrl->mem_ops = &stm32_ospi_mem_ops; + ctrl->use_gpio_descriptors = true; + ctrl->transfer_one_message = stm32_ospi_transfer_one_message; + ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP; + ctrl->dev.of_node = dev->of_node; + + pm_runtime_enable(ospi->dev); + pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(ospi->dev); + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + goto err_pm_enable; + + if (ospi->rstc) { + reset_control_assert(ospi->rstc); + udelay(2); + reset_control_deassert(ospi->rstc); + } + + ret = spi_register_controller(ctrl); + if (ret) { + /* Disable ospi */ + writel_relaxed(0, ospi->regs_base + OSPI_CR); + goto err_pm_resume; + } + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return 0; + +err_pm_resume: + pm_runtime_put_sync_suspend(ospi->dev); + +err_pm_enable: + pm_runtime_force_suspend(ospi->dev); + mutex_destroy(&ospi->lock); + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); + + return ret; +} + +static void stm32_ospi_remove(struct platform_device *pdev) +{ + struct stm32_ospi *ospi = platform_get_drvdata(pdev); + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return; + + spi_unregister_controller(ospi->ctrl); + /* Disable ospi */ + writel_relaxed(0, ospi->regs_base + OSPI_CR); + mutex_destroy(&ospi->lock); + + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); + + pm_runtime_put_sync_suspend(ospi->dev); + pm_runtime_force_suspend(ospi->dev); +} + +static int __maybe_unused stm32_ospi_suspend(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + + pinctrl_pm_select_sleep_state(dev); + + return pm_runtime_force_suspend(ospi->dev); +} + +static int __maybe_unused stm32_ospi_resume(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + void __iomem *regs_base = ospi->regs_base; + int ret; + + ret = pm_runtime_force_resume(ospi->dev); + if (ret < 0) + return ret; + + pinctrl_pm_select_default_state(dev); + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR); + writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1); + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return 0; +} + +static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + + clk_disable_unprepare(ospi->clk); + + return 0; +} + +static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + + return clk_prepare_enable(ospi->clk); +} + +static const struct dev_pm_ops stm32_ospi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume) + SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend, + stm32_ospi_runtime_resume, NULL) +}; + +static const struct of_device_id stm32_ospi_of_match[] = { + { .compatible = "st,stm32mp25-ospi" }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_ospi_of_match); + +static struct platform_driver stm32_ospi_driver = { + .probe = stm32_ospi_probe, + .remove = stm32_ospi_remove, + .driver = { + .name = "stm32-ospi", + .pm = &stm32_ospi_pm_ops, + .of_match_table = stm32_ospi_of_match, + }, +}; +module_platform_driver(stm32_ospi_driver); + +MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 540b6948b24d..9691197bbf5a 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -362,11 +362,6 @@ static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op) u32 ccr, cr; int timeout, err = 0, err_poll_status = 0; - dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth, - op->addr.val, op->data.nbytes); - cr = readl_relaxed(qspi->io_base + QSPI_CR); cr &= ~CR_PRESC_MASK & ~CR_FSEL; cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc); diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index fcbe864c9b7d..aa92fd5a35a9 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host, else reg |= SUN4I_CTL_DHB; + /* Now that the settings are correct, enable the interface */ + reg |= SUN4I_CTL_ENABLE; + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ @@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev) } sun4i_spi_write(sspi, SUN4I_CTL_REG, - SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); + SUN4I_CTL_MASTER | SUN4I_CTL_TP); return 0; @@ -462,6 +465,7 @@ static int sun4i_spi_probe(struct platform_device *pdev) sspi->host = host; host->max_speed_hz = 100 * 1000 * 1000; host->min_speed_hz = 3 * 1000; + host->use_gpio_descriptors = true; host->set_cs = sun4i_spi_set_cs; host->transfer_one = sun4i_spi_transfer_one; host->num_chipselect = 4; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 3822d7c8d8ed..795a8482c2c7 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) u32 inactive_cycles; u8 cs_state; - if (setup->unit != SPI_DELAY_UNIT_SCK || - hold->unit != SPI_DELAY_UNIT_SCK || - inactive->unit != SPI_DELAY_UNIT_SCK) { + if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) || + (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) || + (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) { dev_err(&spi->dev, "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", SPI_DELAY_UNIT_SCK); diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 08e49a876894..665c06e1473b 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -134,7 +134,7 @@ #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0) #define QSPI_CMB_SEQ_CMD_CFG 0x1a0 -#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13) #define QSPI_COMMAND_SDR_DDR BIT(12) #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -147,7 +147,7 @@ #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0) #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac -#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13) #define QSPI_ADDRESS_SDR_DDR BIT(12) #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -1036,10 +1036,6 @@ static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len) { u32 addr_config = 0; - /* Extract Address configuration and value */ - is_ddr = 0; //Only SDR mode supported - bus_width = 0; //X1 mode - if (is_ddr) addr_config |= QSPI_ADDRESS_SDR_DDR; else @@ -1079,13 +1075,13 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, switch (transfer_phase) { case CMD_TRANSFER: /* X1 SDR mode */ - cmd_config = tegra_qspi_cmd_config(false, 0, + cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits, xfer->len); cmd_value = *((const u8 *)(xfer->tx_buf)); break; case ADDR_TRANSFER: /* X1 SDR mode */ - addr_config = tegra_qspi_addr_config(false, 0, + addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits, xfer->len); address_value = *((const u32 *)(xfer->tx_buf)); break; @@ -1117,9 +1113,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, (&tqspi->xfer_completion, QSPI_DMA_TIMEOUT); - if (WARN_ON(ret == 0)) { - dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n", - ret); + if (WARN_ON_ONCE(ret == 0)) { + dev_err_ratelimited(tqspi->dev, + "QSPI Transfer failed with timeout\n"); if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) dmaengine_terminate_all @@ -1163,26 +1159,22 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, ret = -EIO; goto exit; } - if (!xfer->cs_change) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } break; default: ret = -EINVAL; goto exit; } msg->actual_length += xfer->len; + if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) { + tegra_qspi_transfer_end(spi); + spi_transfer_delay_exec(xfer); + } transfer_phase++; } ret = 0; exit: msg->status = ret; - if (ret < 0) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } return ret; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 9122350402b5..a284d2794586 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -623,7 +623,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, mutex_lock(&qspi->list_lock); if (!qspi->mmap_enabled || qspi->current_cs != spi_get_chipselect(mem->spi, 0)) { - ti_qspi_setup_clk(qspi, mem->spi->max_speed_hz); + ti_qspi_setup_clk(qspi, op->max_freq); ti_qspi_enable_memory_map(mem->spi); } ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth, @@ -658,6 +658,10 @@ static const struct spi_controller_mem_ops ti_qspi_mem_ops = { .adjust_op_size = ti_qspi_adjust_op_size, }; +static const struct spi_controller_mem_caps ti_qspi_mem_caps = { + .per_op_freq = true, +}; + static int ti_qspi_start_transfer_one(struct spi_controller *host, struct spi_message *m) { @@ -777,6 +781,7 @@ static int ti_qspi_probe(struct platform_device *pdev) host->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | SPI_BPW_MASK(8); host->mem_ops = &ti_qspi_mem_ops; + host->mem_caps = &ti_qspi_mem_caps; if (!of_property_read_u32(np, "num-cs", &num_cs)) host->num_chipselect = num_cs; @@ -826,20 +831,12 @@ static int ti_qspi_probe(struct platform_device *pdev) if (of_property_present(np, "syscon-chipselects")) { qspi->ctrl_base = - syscon_regmap_lookup_by_phandle(np, - "syscon-chipselects"); + syscon_regmap_lookup_by_phandle_args(np, "syscon-chipselects", + 1, &qspi->ctrl_reg); if (IS_ERR(qspi->ctrl_base)) { ret = PTR_ERR(qspi->ctrl_base); goto free_host; } - ret = of_property_read_u32_index(np, - "syscon-chipselects", - 1, &qspi->ctrl_reg); - if (ret) { - dev_err(&pdev->dev, - "couldn't get ctrl_mod reg index\n"); - goto free_host; - } } qspi->fclk = devm_clk_get(&pdev->dev, "fck"); diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index 5caf0abf3763..5232483c4a3a 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -318,6 +318,7 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert) * zynq_qspi_config_op - Configure QSPI controller for specified transfer * @xqspi: Pointer to the zynq_qspi structure * @spi: Pointer to the spi_device structure + * @op: The memory operation to execute * * Sets the operational mode of QSPI controller for the next QSPI transfer and * sets the requested clock frequency. @@ -331,7 +332,8 @@ static void zynq_qspi_chipselect(struct spi_device *spi, bool assert) * controller the driver will set the highest or lowest frequency supported by * controller. */ -static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi) +static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi, + const struct spi_mem_op *op) { u32 config_reg, baud_rate_val = 0; @@ -346,7 +348,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi) */ while ((baud_rate_val < ZYNQ_QSPI_CONFIG_BAUD_DIV_MAX) && (clk_get_rate(xqspi->refclk) / (2 << baud_rate_val)) > - spi->max_speed_hz) + op->max_freq) baud_rate_val++; config_reg = zynq_qspi_read(xqspi, ZYNQ_QSPI_CONFIG_OFFSET); @@ -538,12 +540,8 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, int err = 0, i; u8 *tmpbuf; - dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth); - zynq_qspi_chipselect(mem->spi, true); - zynq_qspi_config_op(xqspi, mem->spi); + zynq_qspi_config_op(xqspi, mem->spi, op); if (op->cmd.opcode) { reinit_completion(&xqspi->data_completion); @@ -629,6 +627,10 @@ static const struct spi_controller_mem_ops zynq_qspi_mem_ops = { .exec_op = zynq_qspi_exec_mem_op, }; +static const struct spi_controller_mem_caps zynq_qspi_mem_caps = { + .per_op_freq = true, +}; + /** * zynq_qspi_probe - Probe method for the QSPI driver * @pdev: Pointer to the platform_device structure @@ -715,6 +717,7 @@ static int zynq_qspi_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; ctlr->mem_ops = &zynq_qspi_mem_ops; + ctlr->mem_caps = &zynq_qspi_mem_caps; ctlr->setup = zynq_qspi_setup_op; ctlr->max_speed_hz = clk_get_rate(xqspi->refclk) / 2; ctlr->dev.of_node = np; diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 549a6e0c9654..595b6dc10845 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -82,7 +82,6 @@ #define GQSPI_GENFIFO_RX 0x00020000 #define GQSPI_GENFIFO_STRIPE 0x00040000 #define GQSPI_GENFIFO_POLL 0x00080000 -#define GQSPI_GENFIFO_EXP_START 0x00000100 #define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004 #define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002 #define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001 @@ -535,7 +534,7 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi, * zynqmp_qspi_config_op - Configure QSPI controller for specified * transfer * @xqspi: Pointer to the zynqmp_qspi structure - * @qspi: Pointer to the spi_device structure + * @op: The memory operation to execute * * Sets the operational mode of QSPI controller for the next QSPI transfer and * sets the requested clock frequency. @@ -553,12 +552,12 @@ static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi, * frequency supported by controller. */ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi, - struct spi_device *qspi) + const struct spi_mem_op *op) { ulong clk_rate; u32 config_reg, req_speed_hz, baud_rate_val = 0; - req_speed_hz = qspi->max_speed_hz; + req_speed_hz = op->max_freq; if (xqspi->speed_hz != req_speed_hz) { xqspi->speed_hz = req_speed_hz; @@ -580,6 +579,8 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi, zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val); } + + dev_dbg(xqspi->dev, "config speed %u\n", req_speed_hz); return 0; } @@ -670,69 +671,77 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size) static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits, u32 genfifoentry) { - u32 transfer_len = 0; + u32 transfer_len, tempcount, exponent; + u8 imm_data; - if (xqspi->txbuf) { - genfifoentry &= ~GQSPI_GENFIFO_RX; - genfifoentry |= GQSPI_GENFIFO_DATA_XFER; - genfifoentry |= GQSPI_GENFIFO_TX; - transfer_len = xqspi->bytes_to_transfer; - } else if (xqspi->rxbuf) { - genfifoentry &= ~GQSPI_GENFIFO_TX; - genfifoentry |= GQSPI_GENFIFO_DATA_XFER; + genfifoentry |= GQSPI_GENFIFO_DATA_XFER; + if (xqspi->rxbuf) { genfifoentry |= GQSPI_GENFIFO_RX; if (xqspi->mode == GQSPI_MODE_DMA) transfer_len = xqspi->dma_rx_bytes; else transfer_len = xqspi->bytes_to_receive; } else { - /* Sending dummy circles here */ - genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX); - genfifoentry |= GQSPI_GENFIFO_DATA_XFER; transfer_len = xqspi->bytes_to_transfer; } + + if (xqspi->txbuf) + genfifoentry |= GQSPI_GENFIFO_TX; + genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits); xqspi->genfifoentry = genfifoentry; - - if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) { - genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK; - genfifoentry |= transfer_len; - zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry); - } else { - int tempcount = transfer_len; - u32 exponent = 8; /* 2^8 = 256 */ - u8 imm_data = tempcount & 0xFF; - - tempcount &= ~(tempcount & 0xFF); - /* Immediate entry */ - if (tempcount != 0) { - /* Exponent entries */ - genfifoentry |= GQSPI_GENFIFO_EXP; - while (tempcount != 0) { - if (tempcount & GQSPI_GENFIFO_EXP_START) { - genfifoentry &= - ~GQSPI_GENFIFO_IMM_DATA_MASK; - genfifoentry |= exponent; - zynqmp_gqspi_write(xqspi, - GQSPI_GEN_FIFO_OFST, - genfifoentry); - } - tempcount = tempcount >> 1; - exponent++; - } - } - if (imm_data != 0) { - genfifoentry &= ~GQSPI_GENFIFO_EXP; - genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK; - genfifoentry |= (u8)(imm_data & 0xFF); + dev_dbg(xqspi->dev, "genfifo %05x transfer_len %u\n", + genfifoentry, transfer_len); + + /* Exponent entries */ + imm_data = transfer_len; + tempcount = transfer_len >> 8; + exponent = 8; + genfifoentry |= GQSPI_GENFIFO_EXP; + while (tempcount) { + if (tempcount & 1) zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, - genfifoentry); - } - } - if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) { - /* Dummy generic FIFO entry */ - zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0); + genfifoentry | exponent); + tempcount >>= 1; + exponent++; } + + /* Immediate entry */ + genfifoentry &= ~GQSPI_GENFIFO_EXP; + if (imm_data) + zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, + genfifoentry | imm_data); + + /* Dummy generic FIFO entry */ + if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) + zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0); +} + +/** + * zynqmp_qspi_disable_dma() - Disable DMA mode + * @xqspi: GQSPI instance + */ +static void zynqmp_qspi_disable_dma(struct zynqmp_qspi *xqspi) +{ + u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); + + config_reg &= ~GQSPI_CFG_MODE_EN_MASK; + zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); + xqspi->mode = GQSPI_MODE_IO; +} + +/** + * zynqmp_qspi_enable_dma() - Enable DMA mode + * @xqspi: GQSPI instance + */ +static void zynqmp_qspi_enable_dma(struct zynqmp_qspi *xqspi) +{ + u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); + + config_reg &= ~GQSPI_CFG_MODE_EN_MASK; + config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK; + zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); + xqspi->mode = GQSPI_MODE_DMA; } /** @@ -744,7 +753,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits, */ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) { - u32 config_reg, genfifoentry; + u32 genfifoentry; dma_unmap_single(xqspi->dev, xqspi->dma_addr, xqspi->dma_rx_bytes, DMA_FROM_DEVICE); @@ -758,9 +767,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) if (xqspi->bytes_to_receive > 0) { /* Switch to IO mode,for remaining bytes to receive */ - config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); + zynqmp_qspi_disable_dma(xqspi); /* Initiate the transfer of remaining bytes */ genfifoentry = xqspi->genfifoentry; @@ -799,7 +806,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) { struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id; - irqreturn_t ret = IRQ_NONE; u32 status, mask, dma_status = 0; status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST); @@ -814,27 +820,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) dma_status); } - if (mask & GQSPI_ISR_TXNOT_FULL_MASK) { + if (!mask && !dma_status) + return IRQ_NONE; + + if (mask & GQSPI_ISR_TXNOT_FULL_MASK) zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL); - ret = IRQ_HANDLED; - } - if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) { + if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) zynqmp_process_dma_irq(xqspi); - ret = IRQ_HANDLED; - } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && - (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) { + else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && + (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL); - ret = IRQ_HANDLED; - } if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) { zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK); complete(&xqspi->data_completion); - ret = IRQ_HANDLED; } - return ret; + return IRQ_HANDLED; } /** @@ -845,17 +848,14 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) */ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi) { - u32 rx_bytes, rx_rem, config_reg; + u32 rx_bytes, rx_rem; dma_addr_t addr; u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf; if (xqspi->bytes_to_receive < 8 || ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) { /* Setting to IO mode */ - config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); - xqspi->mode = GQSPI_MODE_IO; + zynqmp_qspi_disable_dma(xqspi); xqspi->dma_rx_bytes = 0; return 0; } @@ -878,14 +878,7 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi) zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST, ((u32)addr) & 0xfff); - /* Enabling the DMA mode */ - config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); - - /* Switch to DMA mode */ - xqspi->mode = GQSPI_MODE_DMA; + zynqmp_qspi_enable_dma(xqspi); /* Write the number of bytes to transfer */ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes); @@ -905,18 +898,10 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi) static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits, u32 genfifoentry) { - u32 config_reg; - zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry); zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH); - if (xqspi->mode == GQSPI_MODE_DMA) { - config_reg = zynqmp_gqspi_read(xqspi, - GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, - config_reg); - xqspi->mode = GQSPI_MODE_IO; - } + if (xqspi->mode == GQSPI_MODE_DMA) + zynqmp_qspi_disable_dma(xqspi); } /** @@ -1059,20 +1044,16 @@ static unsigned long zynqmp_qspi_timeout(struct zynqmp_qspi *xqspi, u8 bits, static int zynqmp_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct zynqmp_qspi *xqspi = spi_controller_get_devdata - (mem->spi->controller); + struct zynqmp_qspi *xqspi = + spi_controller_get_devdata(mem->spi->controller); unsigned long timeout; int err = 0, i; u32 genfifoentry = 0; u16 opcode = op->cmd.opcode; u64 opaddr; - dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth); - mutex_lock(&xqspi->op_lock); - zynqmp_qspi_config_op(xqspi, mem->spi); + zynqmp_qspi_config_op(xqspi, op); zynqmp_qspi_chipselect(mem->spi, false); genfifoentry |= xqspi->genfifocs; genfifoentry |= xqspi->genfifobus; @@ -1224,6 +1205,10 @@ static const struct spi_controller_mem_ops zynqmp_qspi_mem_ops = { .exec_op = zynqmp_qspi_exec_op, }; +static const struct spi_controller_mem_caps zynqmp_qspi_mem_caps = { + .per_op_freq = true, +}; + /** * zynqmp_qspi_probe - Probe method for the QSPI driver * @pdev: Pointer to the platform_device structure @@ -1333,6 +1318,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->mem_ops = &zynqmp_qspi_mem_ops; + ctlr->mem_caps = &zynqmp_qspi_mem_caps; ctlr->setup = zynqmp_qspi_setup_op; ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ctlr->dev.of_node = np; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ff1add2ecb91..c6ac4b05bdb1 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -31,6 +31,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/sched/rt.h> #include <linux/slab.h> +#include <linux/spi/offload/types.h> #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> #include <uapi/linux/sched/types.h> @@ -42,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); #include "internals.h" -static DEFINE_IDR(spi_master_idr); +static DEFINE_IDR(spi_controller_idr); static void spidev_release(struct device *dev) { @@ -305,7 +306,7 @@ static const struct attribute_group spi_controller_statistics_group = { .attrs = spi_controller_statistics_attrs, }; -static const struct attribute_group *spi_master_groups[] = { +static const struct attribute_group *spi_controller_groups[] = { &spi_controller_statistics_group, NULL, }; @@ -410,29 +411,21 @@ static int spi_probe(struct device *dev) { const struct spi_driver *sdrv = to_spi_driver(dev->driver); struct spi_device *spi = to_spi_device(dev); + struct fwnode_handle *fwnode = dev_fwnode(dev); int ret; ret = of_clk_set_defaults(dev->of_node, false); if (ret) return ret; - if (dev->of_node) { + if (is_of_node(fwnode)) spi->irq = of_irq_get(dev->of_node, 0); - if (spi->irq == -EPROBE_DEFER) - return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get irq\n"); - if (spi->irq < 0) - spi->irq = 0; - } - - if (has_acpi_companion(dev) && spi->irq < 0) { - struct acpi_device *adev = to_acpi_device_node(dev->fwnode); - - spi->irq = acpi_dev_gpio_irq_get(adev, 0); - if (spi->irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - if (spi->irq < 0) - spi->irq = 0; - } + else if (is_acpi_device_node(fwnode) && spi->irq < 0) + spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0); + if (spi->irq == -EPROBE_DEFER) + return dev_err_probe(dev, spi->irq, "Failed to get irq\n"); + if (spi->irq < 0) + spi->irq = 0; ret = dev_pm_domain_attach(dev, true); if (ret) @@ -874,15 +867,18 @@ EXPORT_SYMBOL_GPL(spi_new_device); */ void spi_unregister_device(struct spi_device *spi) { + struct fwnode_handle *fwnode; + if (!spi) return; - if (spi->dev.of_node) { - of_node_clear_flag(spi->dev.of_node, OF_POPULATED); - of_node_put(spi->dev.of_node); + fwnode = dev_fwnode(&spi->dev); + if (is_of_node(fwnode)) { + of_node_clear_flag(to_of_node(fwnode), OF_POPULATED); + of_node_put(to_of_node(fwnode)); + } else if (is_acpi_device_node(fwnode)) { + acpi_device_clear_enumerated(to_acpi_device_node(fwnode)); } - if (ACPI_COMPANION(&spi->dev)) - acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); device_remove_software_node(&spi->dev); device_del(&spi->dev); spi_cleanup(spi); @@ -1059,7 +1055,7 @@ static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool * ambiguity. That's why we use enable, that takes SPI_CS_HIGH * into account. */ - if (has_acpi_companion(&spi->dev)) + if (is_acpi_device_node(dev_fwnode(&spi->dev))) gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); else /* Polarity handled by GPIO library */ @@ -1111,7 +1107,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi_toggle_csgpiod(spi, idx, enable, activate); } } - /* Some SPI masters need both GPIO CS & slave_select */ + /* Some SPI controllers need both GPIO CS & ->set_cs() */ if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && spi->controller->set_cs) spi->controller->set_cs(spi, !enable); @@ -1500,10 +1496,7 @@ static void _spi_transfer_delay_ns(u32 ns) } else { u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); - if (us <= 10) - udelay(us); - else - usleep_range(us, us + DIV_ROUND_UP(us, 10)); + fsleep(us); } } @@ -2060,7 +2053,7 @@ static int spi_init_queue(struct spi_controller *ctlr) ctlr->busy = false; ctlr->queue_empty = true; - ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); + ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev)); if (IS_ERR(ctlr->kworker)) { dev_err(&ctlr->dev, "failed to create message pump kworker\n"); return PTR_ERR(ctlr->kworker); @@ -2539,7 +2532,7 @@ err_out: * @ctlr: Pointer to spi_controller device * * Registers an spi_device for each child node of controller node which - * represents a valid SPI slave. + * represents a valid SPI target device. */ static void of_register_spi_devices(struct spi_controller *ctlr) { @@ -2824,7 +2817,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { - /* Apple does not use _CRS but nested devices for SPI slaves */ + /* Apple does not use _CRS but nested devices for SPI target devices */ acpi_spi_parse_apple_properties(adev, &lookup); } @@ -2916,7 +2909,7 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr) SPI_ACPI_ENUMERATE_MAX_DEPTH, acpi_spi_add_device, NULL, ctlr, NULL); if (ACPI_FAILURE(status)) - dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); + dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n"); } #else static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} @@ -2930,16 +2923,15 @@ static void spi_controller_release(struct device *dev) kfree(ctlr); } -static const struct class spi_master_class = { +static const struct class spi_controller_class = { .name = "spi_master", .dev_release = spi_controller_release, - .dev_groups = spi_master_groups, + .dev_groups = spi_controller_groups, }; #ifdef CONFIG_SPI_SLAVE /** - * spi_target_abort - abort the ongoing transfer request on an SPI slave - * controller + * spi_target_abort - abort the ongoing transfer request on an SPI target controller * @spi: device used for the current transfer */ int spi_target_abort(struct spi_device *spi) @@ -2959,9 +2951,13 @@ static ssize_t slave_show(struct device *dev, struct device_attribute *attr, struct spi_controller *ctlr = container_of(dev, struct spi_controller, dev); struct device *child; + int ret; child = device_find_any_child(&ctlr->dev); - return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); + ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); + put_device(child); + + return ret; } static ssize_t slave_store(struct device *dev, struct device_attribute *attr, @@ -2980,13 +2976,13 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr, child = device_find_any_child(&ctlr->dev); if (child) { - /* Remove registered slave */ + /* Remove registered target device */ device_unregister(child); put_device(child); } if (strcmp(name, "(null)")) { - /* Register new slave */ + /* Register new target device */ spi = spi_alloc_device(ctlr); if (!spi) return -ENOMEM; @@ -3005,40 +3001,40 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(slave); -static struct attribute *spi_slave_attrs[] = { +static struct attribute *spi_target_attrs[] = { &dev_attr_slave.attr, NULL, }; -static const struct attribute_group spi_slave_group = { - .attrs = spi_slave_attrs, +static const struct attribute_group spi_target_group = { + .attrs = spi_target_attrs, }; -static const struct attribute_group *spi_slave_groups[] = { +static const struct attribute_group *spi_target_groups[] = { &spi_controller_statistics_group, - &spi_slave_group, + &spi_target_group, NULL, }; -static const struct class spi_slave_class = { +static const struct class spi_target_class = { .name = "spi_slave", .dev_release = spi_controller_release, - .dev_groups = spi_slave_groups, + .dev_groups = spi_target_groups, }; #else -extern struct class spi_slave_class; /* dummy */ +extern struct class spi_target_class; /* dummy */ #endif /** - * __spi_alloc_controller - allocate an SPI master or slave controller + * __spi_alloc_controller - allocate an SPI host or target controller * @dev: the controller, possibly using the platform_bus * @size: how much zeroed driver-private data to allocate; the pointer to this * memory is in the driver_data field of the returned device, accessible * with spi_controller_get_devdata(); the memory is cacheline aligned; * drivers granting DMA access to portions of their private data need to * round up @size using ALIGN(size, dma_get_cache_alignment()). - * @slave: flag indicating whether to allocate an SPI master (false) or SPI - * slave (true) controller + * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true) + * controller * Context: can sleep * * This call is used only by SPI controller drivers, which are the @@ -3055,7 +3051,7 @@ extern struct class spi_slave_class; /* dummy */ * Return: the SPI controller structure on success, else NULL. */ struct spi_controller *__spi_alloc_controller(struct device *dev, - unsigned int size, bool slave) + unsigned int size, bool target) { struct spi_controller *ctlr; size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); @@ -3076,11 +3072,11 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, mutex_init(&ctlr->add_lock); ctlr->bus_num = -1; ctlr->num_chipselect = 1; - ctlr->slave = slave; - if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) - ctlr->dev.class = &spi_slave_class; + ctlr->target = target; + if (IS_ENABLED(CONFIG_SPI_SLAVE) && target) + ctlr->dev.class = &spi_target_class; else - ctlr->dev.class = &spi_master_class; + ctlr->dev.class = &spi_controller_class; ctlr->dev.parent = dev; pm_suspend_ignore_children(&ctlr->dev, true); spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); @@ -3098,7 +3094,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr) * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() * @dev: physical device of SPI controller * @size: how much zeroed driver-private data to allocate - * @slave: whether to allocate an SPI master (false) or SPI slave (true) + * @target: whether to allocate an SPI host (false) or SPI target (true) controller * Context: can sleep * * Allocate an SPI controller and automatically release a reference on it @@ -3111,7 +3107,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr) */ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, unsigned int size, - bool slave) + bool target) { struct spi_controller **ptr, *ctlr; @@ -3120,7 +3116,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, if (!ptr) return NULL; - ctlr = __spi_alloc_controller(dev, size, slave); + ctlr = __spi_alloc_controller(dev, size, target); if (ctlr) { ctlr->devm_allocated = true; *ptr = ctlr; @@ -3134,8 +3130,8 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); /** - * spi_get_gpio_descs() - grab chip select GPIOs for the master - * @ctlr: The SPI master to grab GPIO descriptors for + * spi_get_gpio_descs() - grab chip select GPIOs for the controller + * @ctlr: The SPI controller to grab GPIO descriptors for */ static int spi_get_gpio_descs(struct spi_controller *ctlr) { @@ -3233,7 +3229,7 @@ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int e int id; mutex_lock(&board_lock); - id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL); + id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL); mutex_unlock(&board_lock); if (WARN(id < 0, "couldn't get idr")) return id == -ENOSPC ? -EBUSY : id; @@ -3382,7 +3378,7 @@ destroy_queue: spi_destroy_queue(ctlr); free_bus_id: mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); + idr_remove(&spi_controller_idr, ctlr->bus_num); mutex_unlock(&board_lock); return status; } @@ -3394,8 +3390,7 @@ static void devm_spi_unregister(struct device *dev, void *res) } /** - * devm_spi_register_controller - register managed SPI host or target - * controller + * devm_spi_register_controller - register managed SPI host or target controller * @dev: device managing SPI controller * @ctlr: initialized controller, originally from spi_alloc_host() or * spi_alloc_target() @@ -3435,7 +3430,7 @@ static int __unregister(struct device *dev, void *null) } /** - * spi_unregister_controller - unregister SPI master or slave controller + * spi_unregister_controller - unregister SPI host or target controller * @ctlr: the controller being unregistered * Context: can sleep * @@ -3459,7 +3454,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) /* First make sure that this controller was ever added */ mutex_lock(&board_lock); - found = idr_find(&spi_master_idr, id); + found = idr_find(&spi_controller_idr, id); mutex_unlock(&board_lock); if (ctlr->queued) { if (spi_destroy_queue(ctlr)) @@ -3474,7 +3469,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) /* Free bus id */ mutex_lock(&board_lock); if (found == ctlr) - idr_remove(&spi_master_idr, id); + idr_remove(&spi_controller_idr, id); mutex_unlock(&board_lock); if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) @@ -4138,10 +4133,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->tx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) + !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_TX_QUAD)) + !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) + return -EINVAL; + if ((xfer->tx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_TX_OCTAL)) return -EINVAL; } /* Check transfer rx_nbits */ @@ -4154,15 +4152,27 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->rx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) + !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_RX_QUAD)) + !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL))) + return -EINVAL; + if ((xfer->rx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_RX_OCTAL)) return -EINVAL; } if (_spi_xfer_word_delay_update(xfer, spi)) return -EINVAL; + + /* Make sure controller supports required offload features. */ + if (xfer->offload_flags) { + if (!message->offload) + return -EINVAL; + + if (xfer->offload_flags & ~message->offload->xfer_flags) + return -EINVAL; + } } message->status = -EINPROGRESS; @@ -4618,7 +4628,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked); /** * spi_bus_lock - obtain a lock for exclusive SPI bus usage - * @ctlr: SPI bus master that should be locked for exclusive bus access + * @ctlr: SPI bus controller that should be locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep @@ -4649,7 +4659,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock); /** * spi_bus_unlock - release the lock for exclusive SPI bus usage - * @ctlr: SPI bus master that was locked for exclusive bus access + * @ctlr: SPI bus controller that was locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep @@ -4766,9 +4776,9 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node { struct device *dev; - dev = class_find_device_by_of_node(&spi_master_class, node); + dev = class_find_device_by_of_node(&spi_controller_class, node); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) - dev = class_find_device_by_of_node(&spi_slave_class, node); + dev = class_find_device_by_of_node(&spi_target_class, node); if (!dev) return NULL; @@ -4841,17 +4851,17 @@ extern struct notifier_block spi_of_notifier; #if IS_ENABLED(CONFIG_ACPI) static int spi_acpi_controller_match(struct device *dev, const void *data) { - return ACPI_COMPANION(dev->parent) == data; + return device_match_acpi_dev(dev->parent, data); } struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) { struct device *dev; - dev = class_find_device(&spi_master_class, NULL, adev, + dev = class_find_device(&spi_controller_class, NULL, adev, spi_acpi_controller_match); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) - dev = class_find_device(&spi_slave_class, NULL, adev, + dev = class_find_device(&spi_target_class, NULL, adev, spi_acpi_controller_match); if (!dev) return NULL; @@ -4921,12 +4931,12 @@ static int __init spi_init(void) if (status < 0) goto err1; - status = class_register(&spi_master_class); + status = class_register(&spi_controller_class); if (status < 0) goto err2; if (IS_ENABLED(CONFIG_SPI_SLAVE)) { - status = class_register(&spi_slave_class); + status = class_register(&spi_target_class); if (status < 0) goto err3; } @@ -4939,7 +4949,7 @@ static int __init spi_init(void) return 0; err3: - class_unregister(&spi_master_class); + class_unregister(&spi_controller_class); err2: bus_unregister(&spi_bus_type); err1: diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 653f82984216..6108959c28d9 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -698,19 +698,25 @@ static const struct class spidev_class = { .name = "spidev", }; +/* + * The spi device ids are expected to match the device names of the + * spidev_dt_ids array below. Both arrays are kept in the same ordering. + */ static const struct spi_device_id spidev_spi_ids[] = { - { .name = "bh2228fv" }, - { .name = "dh2228fv" }, - { .name = "jg10309-01" }, - { .name = "ltc2488" }, - { .name = "sx1301" }, - { .name = "bk4" }, - { .name = "dhcom-board" }, - { .name = "m53cpld" }, - { .name = "spi-petra" }, - { .name = "spi-authenta" }, - { .name = "em3581" }, - { .name = "si3210" }, + { .name = /* cisco */ "spi-petra" }, + { .name = /* dh */ "dhcom-board" }, + { .name = /* elgin */ "jg10309-01" }, + { .name = /* gocontroll */ "moduline-module-slot"}, + { .name = /* lineartechnology */ "ltc2488" }, + { .name = /* lwn */ "bk4" }, + { .name = /* lwn */ "bk4-spi" }, + { .name = /* menlo */ "m53cpld" }, + { .name = /* micron */ "spi-authenta" }, + { .name = /* rohm */ "bh2228fv" }, + { .name = /* rohm */ "dh2228fv" }, + { .name = /* semtech */ "sx1301" }, + { .name = /* silabs */ "em3581" }, + { .name = /* silabs */ "si3210" }, {}, }; MODULE_DEVICE_TABLE(spi, spidev_spi_ids); @@ -732,8 +738,10 @@ static const struct of_device_id spidev_dt_ids[] = { { .compatible = "cisco,spi-petra", .data = &spidev_of_check }, { .compatible = "dh,dhcom-board", .data = &spidev_of_check }, { .compatible = "elgin,jg10309-01", .data = &spidev_of_check }, + { .compatible = "gocontroll,moduline-module-slot", .data = &spidev_of_check}, { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check }, { .compatible = "lwn,bk4", .data = &spidev_of_check }, + { .compatible = "lwn,bk4-spi", .data = &spidev_of_check }, { .compatible = "menlo,m53cpld", .data = &spidev_of_check }, { .compatible = "micron,spi-authenta", .data = &spidev_of_check }, { .compatible = "rohm,bh2228fv", .data = &spidev_of_check }, |