diff options
Diffstat (limited to 'drivers/spi')
55 files changed, 5695 insertions, 946 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index ea8a31032927..c51da3fc3604 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -55,6 +55,9 @@ config SPI_MEM This extension is meant to simplify interaction with SPI memories by providing a high-level interface to send memory-like commands. +config SPI_OFFLOAD + bool + comment "SPI Master Controller Drivers" config SPI_AIROHA_SNFI @@ -176,6 +179,7 @@ config SPI_AU1550 config SPI_AXI_SPI_ENGINE tristate "Analog Devices AXI SPI Engine controller" depends on HAS_IOMEM + select SPI_OFFLOAD help This enables support for the Analog Devices AXI SPI Engine SPI controller. It is part of the SPI Engine framework that is used in some Analog Devices @@ -932,6 +936,15 @@ config SPI_QCOM_QSPI help QSPI(Quad SPI) driver for Qualcomm QSPI controller. +config SPI_QPIC_SNAND + tristate "QPIC SNAND controller" + depends on ARCH_QCOM || COMPILE_TEST + depends on MTD + help + QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller. + QPIC controller supports both parallel nand and serial nand. + This config will enable serial nand driver for QPIC controller. + config SPI_QUP tristate "Qualcomm SPI controller with QUP interface" depends on ARCH_QCOM || COMPILE_TEST @@ -1021,6 +1034,15 @@ config SPI_SN_F_OSPI for connecting an SPI Flash memory over up to 8-bit wide bus. It supports indirect access mode only. +config SPI_SG2044_NOR + tristate "SG2044 SPI NOR Controller" + depends on ARCH_SOPHGO || COMPILE_TEST + help + This enables support for the SG2044 SPI NOR controller, + which supports Dual/Quad read and write operations while + also supporting 3Byte address devices and 4Byte address + devices. + config SPI_SPRD tristate "Spreadtrum SPI controller" depends on ARCH_SPRD || COMPILE_TEST @@ -1045,6 +1067,16 @@ config SPI_STM32 is not available, the driver automatically falls back to PIO mode. +config SPI_STM32_OSPI + tristate "STMicroelectronics STM32 OCTO SPI controller" + depends on ARCH_STM32 || COMPILE_TEST + depends on OF + depends on SPI_MEM + help + This enables support for the Octo SPI controller in master mode. + This driver does not support generic SPI. The implementation only + supports spi-mem interface. + config SPI_STM32_QSPI tristate "STMicroelectronics STM32 QUAD SPI controller" depends on ARCH_STM32 || COMPILE_TEST @@ -1234,7 +1266,9 @@ config SPI_ZYNQMP_GQSPI config SPI_AMD tristate "AMD SPI controller" - depends on SPI_MASTER || COMPILE_TEST + depends on PCI + depends on SPI_MASTER || X86 || COMPILE_TEST + depends on SPI_MEM help Enables SPI controller driver for AMD SoC. @@ -1317,4 +1351,16 @@ endif # SPI_SLAVE config SPI_DYNAMIC def_bool ACPI || OF_DYNAMIC || SPI_SLAVE +if SPI_OFFLOAD + +comment "SPI Offload triggers" + +config SPI_OFFLOAD_TRIGGER_PWM + tristate "SPI offload trigger using PWM" + depends on PWM + help + Generic SPI offload trigger implemented using PWM output. + +endif # SPI_OFFLOAD + endif # SPI diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 9db7554c1864..4ea89f6fc531 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -10,6 +10,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG obj-$(CONFIG_SPI_MASTER) += spi.o obj-$(CONFIG_SPI_MEM) += spi-mem.o obj-$(CONFIG_SPI_MUX) += spi-mux.o +obj-$(CONFIG_SPI_OFFLOAD) += spi-offload.o obj-$(CONFIG_SPI_SPIDEV) += spidev.o obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o @@ -116,6 +117,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o +obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o obj-$(CONFIG_SPI_QUP) += spi-qup.o obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o @@ -134,9 +136,11 @@ obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o obj-$(CONFIG_SPI_SN_F_OSPI) += spi-sn-f-ospi.o +obj-$(CONFIG_SPI_SG2044_NOR) += spi-sg2044-nor.o obj-$(CONFIG_SPI_SPRD) += spi-sprd.o obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o obj-$(CONFIG_SPI_STM32) += spi-stm32.o +obj-$(CONFIG_SPI_STM32_OSPI) += spi-stm32-ospi.o obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o @@ -158,8 +162,11 @@ obj-$(CONFIG_SPI_XLP) += spi-xlp.o obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o obj-$(CONFIG_SPI_ZYNQ_QSPI) += spi-zynq-qspi.o obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o -obj-$(CONFIG_SPI_AMD) += spi-amd.o +obj-$(CONFIG_SPI_AMD) += spi-amd.o spi-amd-pci.o # SPI slave protocol handlers obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o + +# SPI offload triggers +obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 244ac0106862..2f6797324227 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -1287,9 +1287,9 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl) aq->rx_chan = dma_request_chan(&aq->pdev->dev, "rx"); if (IS_ERR(aq->rx_chan)) { - aq->rx_chan = NULL; - return dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan), - "RX DMA channel is not available\n"); + ret = dev_err_probe(&aq->pdev->dev, PTR_ERR(aq->rx_chan), + "RX DMA channel is not available\n"); + goto null_rx_chan; } aq->tx_chan = dma_request_chan(&aq->pdev->dev, "tx"); @@ -1310,8 +1310,9 @@ static int atmel_qspi_dma_init(struct spi_controller *ctrl) release_rx_chan: dma_release_channel(aq->rx_chan); - aq->rx_chan = NULL; aq->tx_chan = NULL; +null_rx_chan: + aq->rx_chan = NULL; return ret; } @@ -1436,22 +1437,17 @@ static int atmel_qspi_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(&pdev->dev, 500); pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_get_noresume(&pdev->dev); + devm_pm_runtime_set_active_enabled(&pdev->dev); + devm_pm_runtime_get_noresume(&pdev->dev); err = atmel_qspi_init(aq); if (err) goto dma_release; err = spi_register_controller(ctrl); - if (err) { - pm_runtime_put_noidle(&pdev->dev); - pm_runtime_disable(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); + if (err) goto dma_release; - } + pm_runtime_mark_last_busy(&pdev->dev); pm_runtime_put_autosuspend(&pdev->dev); @@ -1530,10 +1526,6 @@ static void atmel_qspi_remove(struct platform_device *pdev) */ dev_warn(&pdev->dev, "Failed to resume device on remove\n"); } - - pm_runtime_disable(&pdev->dev); - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_noidle(&pdev->dev); } static int __maybe_unused atmel_qspi_suspend(struct device *dev) diff --git a/drivers/spi/spi-amd-pci.c b/drivers/spi/spi-amd-pci.c new file mode 100644 index 000000000000..e5faab414c17 --- /dev/null +++ b/drivers/spi/spi-amd-pci.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD SPI controller driver + * + * Copyright (c) 2025, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Authors: Krishnamoorthi M <krishnamoorthi.m@amd.com> + * Akshata MukundShetty <akshata.mukundshetty@amd.com> + */ + +#include <linux/init.h> +#include <linux/spi/spi.h> +#include <linux/pci.h> + +#include "spi-amd.h" + +#define AMD_PCI_DEVICE_ID_LPC_BRIDGE 0x1682 +#define AMD_PCI_LPC_SPI_BASE_ADDR_REG 0xA0 +#define AMD_SPI_BASE_ADDR_MASK ~0xFF +#define AMD_HID2_PCI_BAR_OFFSET 0x00002000 +#define AMD_HID2_MEM_SIZE 0x200 + +static struct pci_device_id pci_spi_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_PCI_DEVICE_ID_LPC_BRIDGE) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, pci_spi_ids); + +static int amd_spi_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct spi_controller *host; + struct amd_spi *amd_spi; + u32 io_base_addr; + + /* Allocate storage for host and driver private data */ + host = devm_spi_alloc_host(dev, sizeof(struct amd_spi)); + if (!host) + return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n"); + + amd_spi = spi_controller_get_devdata(host); + + pci_read_config_dword(pdev, AMD_PCI_LPC_SPI_BASE_ADDR_REG, &io_base_addr); + io_base_addr = (io_base_addr & AMD_SPI_BASE_ADDR_MASK) + AMD_HID2_PCI_BAR_OFFSET; + amd_spi->io_remap_addr = devm_ioremap(dev, io_base_addr, AMD_HID2_MEM_SIZE); + + if (!amd_spi->io_remap_addr) + return dev_err_probe(dev, -ENOMEM, + "ioremap of SPI registers failed\n"); + + dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); + + amd_spi->version = AMD_HID2_SPI; + host->bus_num = 2; + + return amd_spi_probe_common(dev, host); +} + +static struct pci_driver amd_spi_pci_driver = { + .name = "amd_spi_pci", + .id_table = pci_spi_ids, + .probe = amd_spi_pci_probe, +}; + +module_pci_driver(amd_spi_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("AMD HID2 SPI Controller Driver"); diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index 17fc0b17e756..02e7fe095a0b 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -17,6 +17,8 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> +#include "spi-amd.h" + #define AMD_SPI_CTRL0_REG 0x00 #define AMD_SPI_EXEC_CMD BIT(16) #define AMD_SPI_FIFO_CLEAR BIT(20) @@ -52,10 +54,13 @@ #define AMD_SPI_SPD7_MASK GENMASK(13, AMD_SPI_SPD7_SHIFT) #define AMD_SPI_HID2_INPUT_RING_BUF0 0X100 +#define AMD_SPI_HID2_OUTPUT_BUF0 0x140 #define AMD_SPI_HID2_CNTRL 0x150 #define AMD_SPI_HID2_INT_STATUS 0x154 #define AMD_SPI_HID2_CMD_START 0x156 #define AMD_SPI_HID2_INT_MASK 0x158 +#define AMD_SPI_HID2_WRITE_CNTRL0 0x160 +#define AMD_SPI_HID2_WRITE_CNTRL1 0x164 #define AMD_SPI_HID2_READ_CNTRL0 0x170 #define AMD_SPI_HID2_READ_CNTRL1 0x174 #define AMD_SPI_HID2_READ_CNTRL2 0x180 @@ -81,17 +86,9 @@ #define AMD_SPI_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ #define AMD_SPI_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ -/** - * enum amd_spi_versions - SPI controller versions - * @AMD_SPI_V1: AMDI0061 hardware version - * @AMD_SPI_V2: AMDI0062 hardware version - * @AMD_HID2_SPI: AMDI0063 hardware version - */ -enum amd_spi_versions { - AMD_SPI_V1 = 1, - AMD_SPI_V2, - AMD_HID2_SPI, -}; +/* SPINAND write command opcodes */ +#define AMD_SPI_OP_PP 0x02 /* Page program */ +#define AMD_SPI_OP_PP_RANDOM 0x84 /* Page program */ enum amd_spi_speed { F_66_66MHz, @@ -118,22 +115,6 @@ struct amd_spi_freq { u32 spd7_val; }; -/** - * struct amd_spi - SPI driver instance - * @io_remap_addr: Start address of the SPI controller registers - * @phy_dma_buf: Physical address of DMA buffer - * @dma_virt_addr: Virtual address of DMA buffer - * @version: SPI controller hardware version - * @speed_hz: Device frequency - */ -struct amd_spi { - void __iomem *io_remap_addr; - dma_addr_t phy_dma_buf; - void *dma_virt_addr; - enum amd_spi_versions version; - unsigned int speed_hz; -}; - static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx) { return readb((u8 __iomem *)amd_spi->io_remap_addr + idx); @@ -445,6 +426,17 @@ static inline bool amd_is_spi_read_cmd(const u16 op) } } +static inline bool amd_is_spi_write_cmd(const u16 op) +{ + switch (op) { + case AMD_SPI_OP_PP: + case AMD_SPI_OP_PP_RANDOM: + return true; + default: + return false; + } +} + static bool amd_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { @@ -455,7 +447,7 @@ static bool amd_spi_supports_op(struct spi_mem *mem, return false; /* AMD SPI controllers support quad mode only for read operations */ - if (amd_is_spi_read_cmd(op->cmd.opcode)) { + if (amd_is_spi_read_cmd(op->cmd.opcode) || amd_is_spi_write_cmd(op->cmd.opcode)) { if (op->data.buswidth > 4) return false; @@ -464,7 +456,8 @@ static bool amd_spi_supports_op(struct spi_mem *mem, * doesn't support 4-byte address commands. */ if (amd_spi->version == AMD_HID2_SPI) { - if (amd_is_spi_read_cmd_4b(op->cmd.opcode) || + if ((amd_is_spi_read_cmd_4b(op->cmd.opcode) || + amd_is_spi_write_cmd(op->cmd.opcode)) && op->data.nbytes > AMD_SPI_HID2_DMA_SIZE) return false; } else if (op->data.nbytes > AMD_SPI_MAX_DATA) { @@ -490,7 +483,8 @@ static int amd_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) * controller index mode supports maximum of 64 bytes in a single * transaction. */ - if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode)) + if (amd_spi->version == AMD_HID2_SPI && (amd_is_spi_read_cmd(op->cmd.opcode) || + amd_is_spi_write_cmd(op->cmd.opcode))) op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_HID2_DMA_SIZE); else op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_MAX_DATA); @@ -514,32 +508,96 @@ static void amd_spi_set_addr(struct amd_spi *amd_spi, } } +static void amd_spi_hiddma_write(struct amd_spi *amd_spi, const struct spi_mem_op *op) +{ + u16 hid_cmd_start, val; + u32 hid_regval; + + /* + * Program the HID2 output Buffer0. 4k aligned buf_memory_addr[31:12], + * buf_size[2:0]. + */ + hid_regval = amd_spi->phy_dma_buf | BIT(0); + amd_spi_writereg32(amd_spi, AMD_SPI_HID2_OUTPUT_BUF0, hid_regval); + + /* Program max write length in hid2_write_control1 register */ + hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1); + hid_regval = (hid_regval & ~GENMASK(15, 0)) | ((op->data.nbytes) + 3); + amd_spi_writereg32(amd_spi, AMD_SPI_HID2_WRITE_CNTRL1, hid_regval); + + /* Set cmd start bit in hid2_cmd_start register to trigger HID basic write operation */ + hid_cmd_start = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_CMD_START); + amd_spi_writereg16(amd_spi, AMD_SPI_HID2_CMD_START, (hid_cmd_start | BIT(2))); + + /* Check interrupt status of HIDDMA basic write operation in hid2_int_status register */ + readw_poll_timeout(amd_spi->io_remap_addr + AMD_SPI_HID2_INT_STATUS, val, + (val & BIT(2)), AMD_SPI_IO_SLEEP_US, AMD_SPI_IO_TIMEOUT_US); + + /* Clear the interrupts by writing to hid2_int_status register */ + val = amd_spi_readreg16(amd_spi, AMD_SPI_HID2_INT_STATUS); + amd_spi_writereg16(amd_spi, AMD_SPI_HID2_INT_STATUS, val); +} + static void amd_spi_mem_data_out(struct amd_spi *amd_spi, const struct spi_mem_op *op) { int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes; u64 *buf_64 = (u64 *)op->data.buf.out; + u64 addr_val = op->addr.val; u32 nbytes = op->data.nbytes; u32 left_data = nbytes; u8 *buf; int i; - amd_spi_set_opcode(amd_spi, op->cmd.opcode); - amd_spi_set_addr(amd_spi, op); + /* + * Condition for using HID write mode. Only for writing complete page data, use HID write. + * Use index mode otherwise. + */ + if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_write_cmd(op->cmd.opcode)) { + u64 *dma_buf64 = (u64 *)(amd_spi->dma_virt_addr + op->addr.nbytes + op->cmd.nbytes); + u8 *dma_buf = (u8 *)amd_spi->dma_virt_addr; - for (i = 0; left_data >= 8; i++, left_data -= 8) - amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8), *buf_64++); + /* Copy opcode and address to DMA buffer */ + *dma_buf = op->cmd.opcode; - buf = (u8 *)buf_64; - for (i = 0; i < left_data; i++) { - amd_spi_writereg8(amd_spi, base_addr + op->dummy.nbytes + nbytes + i - left_data, - buf[i]); - } + dma_buf = (u8 *)dma_buf64; + for (i = 0; i < op->addr.nbytes; i++) { + *--dma_buf = addr_val & GENMASK(7, 0); + addr_val >>= 8; + } - amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes); - amd_spi_set_rx_count(amd_spi, 0); - amd_spi_clear_fifo_ptr(amd_spi); - amd_spi_execute_opcode(amd_spi); + /* Copy data to DMA buffer */ + while (left_data >= 8) { + *dma_buf64++ = *buf_64++; + left_data -= 8; + } + + buf = (u8 *)buf_64; + dma_buf = (u8 *)dma_buf64; + while (left_data--) + *dma_buf++ = *buf++; + + amd_spi_hiddma_write(amd_spi, op); + } else { + amd_spi_set_opcode(amd_spi, op->cmd.opcode); + amd_spi_set_addr(amd_spi, op); + + for (i = 0; left_data >= 8; i++, left_data -= 8) + amd_spi_writereg64(amd_spi, base_addr + op->dummy.nbytes + (i * 8), + *buf_64++); + + buf = (u8 *)buf_64; + for (i = 0; i < left_data; i++) { + amd_spi_writereg8(amd_spi, + base_addr + op->dummy.nbytes + nbytes + i - left_data, + buf[i]); + } + + amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes); + amd_spi_set_rx_count(amd_spi, 0); + amd_spi_clear_fifo_ptr(amd_spi); + amd_spi_execute_opcode(amd_spi); + } } static void amd_spi_hiddma_read(struct amd_spi *amd_spi, const struct spi_mem_op *op) @@ -616,15 +674,21 @@ static void amd_spi_mem_data_in(struct amd_spi *amd_spi, * Use index mode otherwise. */ if (amd_spi->version == AMD_HID2_SPI && amd_is_spi_read_cmd(op->cmd.opcode)) { + u64 *dma_buf64 = (u64 *)amd_spi->dma_virt_addr; + u8 *dma_buf; + amd_spi_hiddma_read(amd_spi, op); - for (i = 0; left_data >= 8; i++, left_data -= 8) - *buf_64++ = readq((u8 __iomem *)amd_spi->dma_virt_addr + (i * 8)); + /* Copy data from DMA buffer */ + while (left_data >= 8) { + *buf_64++ = *dma_buf64++; + left_data -= 8; + } buf = (u8 *)buf_64; - for (i = 0; i < left_data; i++) - buf[i] = readb((u8 __iomem *)amd_spi->dma_virt_addr + - (nbytes - left_data + i)); + dma_buf = (u8 *)dma_buf64; + while (left_data--) + *buf++ = *dma_buf++; /* Reset HID RX memory logic */ data = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL); @@ -728,51 +792,38 @@ static int amd_spi_setup_hiddma(struct amd_spi *amd_spi, struct device *dev) { u32 hid_regval; - /* Allocate DMA buffer to use for HID basic read operation */ - amd_spi->dma_virt_addr = dma_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE, - &amd_spi->phy_dma_buf, GFP_KERNEL); + /* Allocate DMA buffer to use for HID basic read and write operations. For write + * operations, the DMA buffer should include the opcode, address bytes and dummy + * bytes(if any) in addition to the data bytes. Additionally, the hardware requires + * that the buffer address be 4K aligned. So, allocate DMA buffer of size + * 2 * AMD_SPI_HID2_DMA_SIZE. + */ + amd_spi->dma_virt_addr = dmam_alloc_coherent(dev, AMD_SPI_HID2_DMA_SIZE * 2, + &amd_spi->phy_dma_buf, GFP_KERNEL); if (!amd_spi->dma_virt_addr) return -ENOMEM; /* * Enable interrupts and set mask bits in hid2_int_mask register to generate interrupt - * properly for HIDDMA basic read operations. + * properly for HIDDMA basic read and write operations. */ hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_INT_MASK); - hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(19); + hid_regval = (hid_regval & GENMASK(31, 8)) | BIT(18) | BIT(19); amd_spi_writereg32(amd_spi, AMD_SPI_HID2_INT_MASK, hid_regval); - /* Configure buffer unit(4k) in hid2_control register */ + /* Configure buffer unit(4k) and write threshold in hid2_control register */ hid_regval = amd_spi_readreg32(amd_spi, AMD_SPI_HID2_CNTRL); - amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, hid_regval & ~BIT(3)); + amd_spi_writereg32(amd_spi, AMD_SPI_HID2_CNTRL, (hid_regval | GENMASK(13, 12)) & ~BIT(3)); return 0; } -static int amd_spi_probe(struct platform_device *pdev) +int amd_spi_probe_common(struct device *dev, struct spi_controller *host) { - struct device *dev = &pdev->dev; - struct spi_controller *host; - struct amd_spi *amd_spi; + struct amd_spi *amd_spi = spi_controller_get_devdata(host); int err; - /* Allocate storage for host and driver private data */ - host = devm_spi_alloc_host(dev, sizeof(struct amd_spi)); - if (!host) - return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n"); - - amd_spi = spi_controller_get_devdata(host); - amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(amd_spi->io_remap_addr)) - return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr), - "ioremap of SPI registers failed\n"); - - dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); - - amd_spi->version = (uintptr_t) device_get_match_data(dev); - /* Initialize the spi_controller fields */ - host->bus_num = (amd_spi->version == AMD_HID2_SPI) ? 2 : 0; host->num_chipselect = 4; host->mode_bits = SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD; host->flags = SPI_CONTROLLER_HALF_DUPLEX; @@ -795,6 +846,32 @@ static int amd_spi_probe(struct platform_device *pdev) return err; } +EXPORT_SYMBOL_GPL(amd_spi_probe_common); + +static int amd_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *host; + struct amd_spi *amd_spi; + + /* Allocate storage for host and driver private data */ + host = devm_spi_alloc_host(dev, sizeof(struct amd_spi)); + if (!host) + return dev_err_probe(dev, -ENOMEM, "Error allocating SPI host\n"); + + amd_spi = spi_controller_get_devdata(host); + amd_spi->io_remap_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(amd_spi->io_remap_addr)) + return dev_err_probe(dev, PTR_ERR(amd_spi->io_remap_addr), + "ioremap of SPI registers failed\n"); + + dev_dbg(dev, "io_remap_address: %p\n", amd_spi->io_remap_addr); + + amd_spi->version = (uintptr_t)device_get_match_data(dev); + host->bus_num = 0; + + return amd_spi_probe_common(dev, host); +} #ifdef CONFIG_ACPI static const struct acpi_device_id spi_acpi_match[] = { diff --git a/drivers/spi/spi-amd.h b/drivers/spi/spi-amd.h new file mode 100644 index 000000000000..5f39ce7b5587 --- /dev/null +++ b/drivers/spi/spi-amd.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AMD SPI controller driver common stuff + * + * Copyright (c) 2025, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Krishnamoorthi M <krishnamoorthi.m@amd.com> + */ + +#ifndef SPI_AMD_H +#define SPI_AMD_H + +/** + * enum amd_spi_versions - SPI controller versions + * @AMD_SPI_V1: AMDI0061 hardware version + * @AMD_SPI_V2: AMDI0062 hardware version + * @AMD_HID2_SPI: AMDI0063 hardware version + */ +enum amd_spi_versions { + AMD_SPI_V1 = 1, + AMD_SPI_V2, + AMD_HID2_SPI, +}; + +/** + * struct amd_spi - SPI driver instance + * @io_remap_addr: Start address of the SPI controller registers + * @phy_dma_buf: Physical address of DMA buffer + * @dma_virt_addr: Virtual address of DMA buffer + * @version: SPI controller hardware version + * @speed_hz: Device frequency + */ +struct amd_spi { + void __iomem *io_remap_addr; + dma_addr_t phy_dma_buf; + void *dma_virt_addr; + enum amd_spi_versions version; + unsigned int speed_hz; +}; + +int amd_spi_probe_common(struct device *dev, struct spi_controller *host); + +#endif /* SPI_AMD_H */ diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index e9beae95dded..62a11142bd63 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -303,13 +303,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o u32 ctl_val; int ret = 0; - dev_dbg(aspi->dev, - "CE%d %s OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x len:%#x", - chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth, - op->addr.nbytes, op->dummy.nbytes, op->data.nbytes); - addr_mode = readl(aspi->regs + CE_CTRL_REG); addr_mode_backup = addr_mode; diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 7c252126b33e..8cc19934b48b 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -2,21 +2,28 @@ /* * SPI-Engine SPI controller driver * Copyright 2015 Analog Devices Inc. + * Copyright 2024 BayLibre, SAS * Author: Lars-Peter Clausen <lars@metafoo.de> */ +#include <linux/bitfield.h> +#include <linux/bitops.h> #include <linux/clk.h> #include <linux/completion.h> +#include <linux/dmaengine.h> #include <linux/fpga/adi-axi-common.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/of.h> #include <linux/module.h> #include <linux/overflow.h> #include <linux/platform_device.h> +#include <linux/spi/offload/provider.h> #include <linux/spi/spi.h> #include <trace/events/spi.h> +#define SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH 0x10 #define SPI_ENGINE_REG_RESET 0x40 #define SPI_ENGINE_REG_INT_ENABLE 0x80 @@ -24,6 +31,7 @@ #define SPI_ENGINE_REG_INT_SOURCE 0x88 #define SPI_ENGINE_REG_SYNC_ID 0xc0 +#define SPI_ENGINE_REG_OFFLOAD_SYNC_ID 0xc4 #define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0 #define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4 @@ -34,10 +42,24 @@ #define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec +#define SPI_ENGINE_MAX_NUM_OFFLOADS 32 + +#define SPI_ENGINE_REG_OFFLOAD_CTRL(x) (0x100 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_STATUS(x) (0x104 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_RESET(x) (0x108 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(x) (0x110 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) +#define SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(x) (0x114 + SPI_ENGINE_MAX_NUM_OFFLOADS * (x)) + +#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO GENMASK(15, 8) +#define SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD GENMASK(7, 0) + #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0) #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1) #define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2) #define SPI_ENGINE_INT_SYNC BIT(3) +#define SPI_ENGINE_INT_OFFLOAD_SYNC BIT(4) + +#define SPI_ENGINE_OFFLOAD_CTRL_ENABLE BIT(0) #define SPI_ENGINE_CONFIG_CPHA BIT(0) #define SPI_ENGINE_CONFIG_CPOL BIT(1) @@ -79,6 +101,10 @@ #define SPI_ENGINE_CMD_CS_INV(flags) \ SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags)) +/* default sizes - can be changed when SPI Engine firmware is compiled */ +#define SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE 16 +#define SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE 16 + struct spi_engine_program { unsigned int length; uint16_t instructions[] __counted_by(length); @@ -106,6 +132,19 @@ struct spi_engine_message_state { uint8_t *rx_buf; }; +enum { + SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, + SPI_ENGINE_OFFLOAD_FLAG_PREPARED, +}; + +struct spi_engine_offload { + struct spi_engine *spi_engine; + unsigned long flags; + unsigned int offload_num; + unsigned int spi_mode_config; + u8 bits_per_word; +}; + struct spi_engine { struct clk *clk; struct clk *ref_clk; @@ -118,6 +157,12 @@ struct spi_engine { unsigned int int_enable; /* shadows hardware CS inversion flag state */ u8 cs_inv; + + unsigned int offload_ctrl_mem_size; + unsigned int offload_sdo_mem_size; + struct spi_offload *offload; + u32 offload_caps; + bool offload_requires_sync; }; static void spi_engine_program_add_cmd(struct spi_engine_program *p, @@ -163,9 +208,9 @@ static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry, unsigned int n = min(len, 256U); unsigned int flags = 0; - if (xfer->tx_buf) + if (xfer->tx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_TX_STREAM)) flags |= SPI_ENGINE_TRANSFER_WRITE; - if (xfer->rx_buf) + if (xfer->rx_buf || (xfer->offload_flags & SPI_OFFLOAD_XFER_RX_STREAM)) flags |= SPI_ENGINE_TRANSFER_READ; spi_engine_program_add_cmd(p, dry, @@ -217,16 +262,44 @@ static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry, * * NB: This is separate from spi_engine_compile_message() because the latter * is called twice and would otherwise result in double-evaluation. + * + * Returns 0 on success, -EINVAL on failure. */ -static void spi_engine_precompile_message(struct spi_message *msg) +static int spi_engine_precompile_message(struct spi_message *msg) { unsigned int clk_div, max_hz = msg->spi->controller->max_speed_hz; struct spi_transfer *xfer; + u8 min_bits_per_word = U8_MAX; + u8 max_bits_per_word = 0; list_for_each_entry(xfer, &msg->transfers, transfer_list) { + /* If we have an offload transfer, we can't rx to buffer */ + if (msg->offload && xfer->rx_buf) + return -EINVAL; + clk_div = DIV_ROUND_UP(max_hz, xfer->speed_hz); xfer->effective_speed_hz = max_hz / min(clk_div, 256U); + + if (xfer->len) { + min_bits_per_word = min(min_bits_per_word, xfer->bits_per_word); + max_bits_per_word = max(max_bits_per_word, xfer->bits_per_word); + } + } + + /* + * If all xfers in the message use the same bits_per_word, we can + * provide some optimization when using SPI offload. + */ + if (msg->offload) { + struct spi_engine_offload *priv = msg->offload->priv; + + if (min_bits_per_word == max_bits_per_word) + priv->bits_per_word = min_bits_per_word; + else + priv->bits_per_word = 0; } + + return 0; } static void spi_engine_compile_message(struct spi_message *msg, bool dry, @@ -234,6 +307,7 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, { struct spi_device *spi = msg->spi; struct spi_controller *host = spi->controller; + struct spi_engine_offload *priv; struct spi_transfer *xfer; int clk_div, new_clk_div, inst_ns; bool keep_cs = false; @@ -247,9 +321,24 @@ static void spi_engine_compile_message(struct spi_message *msg, bool dry, clk_div = 1; - spi_engine_program_add_cmd(p, dry, - SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, - spi_engine_get_config(spi))); + /* + * As an optimization, SPI offload sets once this when the offload is + * enabled instead of repeating the instruction in each message. + */ + if (msg->offload) { + priv = msg->offload->priv; + priv->spi_mode_config = spi_engine_get_config(spi); + + /* + * If all xfers use the same bits_per_word, it can be optimized + * in the same way. + */ + bits_per_word = priv->bits_per_word; + } else { + spi_engine_program_add_cmd(p, dry, + SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, + spi_engine_get_config(spi))); + } xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); spi_engine_gen_cs(p, dry, spi, !xfer->cs_off); @@ -521,11 +610,107 @@ static irqreturn_t spi_engine_irq(int irq, void *devid) return IRQ_HANDLED; } +static int spi_engine_offload_prepare(struct spi_message *msg) +{ + struct spi_controller *host = msg->spi->controller; + struct spi_engine *spi_engine = spi_controller_get_devdata(host); + struct spi_engine_program *p = msg->opt_state; + struct spi_engine_offload *priv = msg->offload->priv; + struct spi_transfer *xfer; + void __iomem *cmd_addr; + void __iomem *sdo_addr; + size_t tx_word_count = 0; + unsigned int i; + + if (p->length > spi_engine->offload_ctrl_mem_size) + return -EINVAL; + + /* count total number of tx words in message */ + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + /* no support for reading to rx_buf */ + if (xfer->rx_buf) + return -EINVAL; + + if (!xfer->tx_buf) + continue; + + if (xfer->bits_per_word <= 8) + tx_word_count += xfer->len; + else if (xfer->bits_per_word <= 16) + tx_word_count += xfer->len / 2; + else + tx_word_count += xfer->len / 4; + } + + if (tx_word_count && !(spi_engine->offload_caps & SPI_OFFLOAD_CAP_TX_STATIC_DATA)) + return -EINVAL; + + if (tx_word_count > spi_engine->offload_sdo_mem_size) + return -EINVAL; + + /* + * This protects against calling spi_optimize_message() with an offload + * that has already been prepared with a different message. + */ + if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags)) + return -EBUSY; + + cmd_addr = spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CMD_FIFO(priv->offload_num); + sdo_addr = spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_SDO_FIFO(priv->offload_num); + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (!xfer->tx_buf) + continue; + + if (xfer->bits_per_word <= 8) { + const u8 *buf = xfer->tx_buf; + + for (i = 0; i < xfer->len; i++) + writel_relaxed(buf[i], sdo_addr); + } else if (xfer->bits_per_word <= 16) { + const u16 *buf = xfer->tx_buf; + + for (i = 0; i < xfer->len / 2; i++) + writel_relaxed(buf[i], sdo_addr); + } else { + const u32 *buf = xfer->tx_buf; + + for (i = 0; i < xfer->len / 4; i++) + writel_relaxed(buf[i], sdo_addr); + } + } + + for (i = 0; i < p->length; i++) + writel_relaxed(p->instructions[i], cmd_addr); + + return 0; +} + +static void spi_engine_offload_unprepare(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + struct spi_engine *spi_engine = priv->spi_engine; + + writel_relaxed(1, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num)); + writel_relaxed(0, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_RESET(priv->offload_num)); + + clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_PREPARED, &priv->flags); +} + static int spi_engine_optimize_message(struct spi_message *msg) { + struct spi_controller *host = msg->spi->controller; + struct spi_engine *spi_engine = spi_controller_get_devdata(host); struct spi_engine_program p_dry, *p; + int ret; - spi_engine_precompile_message(msg); + ret = spi_engine_precompile_message(msg); + if (ret) + return ret; p_dry.length = 0; spi_engine_compile_message(msg, true, &p_dry); @@ -536,31 +721,81 @@ static int spi_engine_optimize_message(struct spi_message *msg) spi_engine_compile_message(msg, false, p); - spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC( - AXI_SPI_ENGINE_CUR_MSG_SYNC_ID)); + /* + * Non-offload needs SYNC for completion interrupt. Older versions of + * the IP core also need SYNC for offload to work properly. + */ + if (!msg->offload || spi_engine->offload_requires_sync) + spi_engine_program_add_cmd(p, false, SPI_ENGINE_CMD_SYNC( + msg->offload ? 0 : AXI_SPI_ENGINE_CUR_MSG_SYNC_ID)); msg->opt_state = p; + if (msg->offload) { + ret = spi_engine_offload_prepare(msg); + if (ret) { + msg->opt_state = NULL; + kfree(p); + return ret; + } + } + return 0; } static int spi_engine_unoptimize_message(struct spi_message *msg) { + if (msg->offload) + spi_engine_offload_unprepare(msg->offload); + kfree(msg->opt_state); return 0; } +static struct spi_offload +*spi_engine_get_offload(struct spi_device *spi, + const struct spi_offload_config *config) +{ + struct spi_controller *host = spi->controller; + struct spi_engine *spi_engine = spi_controller_get_devdata(host); + struct spi_engine_offload *priv; + + if (!spi_engine->offload) + return ERR_PTR(-ENODEV); + + if (config->capability_flags & ~spi_engine->offload_caps) + return ERR_PTR(-EINVAL); + + priv = spi_engine->offload->priv; + + if (test_and_set_bit_lock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags)) + return ERR_PTR(-EBUSY); + + return spi_engine->offload; +} + +static void spi_engine_put_offload(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + + clear_bit_unlock(SPI_ENGINE_OFFLOAD_FLAG_ASSIGNED, &priv->flags); +} + static int spi_engine_setup(struct spi_device *device) { struct spi_controller *host = device->controller; struct spi_engine *spi_engine = spi_controller_get_devdata(host); + unsigned int reg; if (device->mode & SPI_CS_HIGH) spi_engine->cs_inv |= BIT(spi_get_chipselect(device, 0)); else spi_engine->cs_inv &= ~BIT(spi_get_chipselect(device, 0)); + writel_relaxed(SPI_ENGINE_CMD_SYNC(0), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine->cs_inv), spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); @@ -571,7 +806,11 @@ static int spi_engine_setup(struct spi_device *device) writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff), spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); - return 0; + writel_relaxed(SPI_ENGINE_CMD_SYNC(1), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + return readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID, + reg, reg == 1, 1, 1000); } static int spi_engine_transfer_one_message(struct spi_controller *host, @@ -583,6 +822,12 @@ static int spi_engine_transfer_one_message(struct spi_controller *host, unsigned int int_enable = 0; unsigned long flags; + if (msg->offload) { + dev_err(&host->dev, "Single transfer offload not supported\n"); + msg->status = -EOPNOTSUPP; + goto out; + } + /* reinitialize message state for this transfer */ memset(st, 0, sizeof(*st)); st->cmd_buf = p->instructions; @@ -632,11 +877,89 @@ static int spi_engine_transfer_one_message(struct spi_controller *host, trace_spi_transfer_stop(msg, xfer); } +out: spi_finalize_current_message(host); return msg->status; } +static int spi_engine_trigger_enable(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + struct spi_engine *spi_engine = priv->spi_engine; + unsigned int reg; + int ret; + + writel_relaxed(SPI_ENGINE_CMD_SYNC(0), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG, + priv->spi_mode_config), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + if (priv->bits_per_word) + writel_relaxed(SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS, + priv->bits_per_word), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + writel_relaxed(SPI_ENGINE_CMD_SYNC(1), + spi_engine->base + SPI_ENGINE_REG_CMD_FIFO); + + ret = readl_relaxed_poll_timeout(spi_engine->base + SPI_ENGINE_REG_SYNC_ID, + reg, reg == 1, 1, 1000); + if (ret) + return ret; + + reg = readl_relaxed(spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); + reg |= SPI_ENGINE_OFFLOAD_CTRL_ENABLE; + writel_relaxed(reg, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); + return 0; +} + +static void spi_engine_trigger_disable(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + struct spi_engine *spi_engine = priv->spi_engine; + unsigned int reg; + + reg = readl_relaxed(spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); + reg &= ~SPI_ENGINE_OFFLOAD_CTRL_ENABLE; + writel_relaxed(reg, spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_CTRL(priv->offload_num)); +} + +static struct dma_chan +*spi_engine_tx_stream_request_dma_chan(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + char name[16]; + + snprintf(name, sizeof(name), "offload%u-tx", priv->offload_num); + + return dma_request_chan(offload->provider_dev, name); +} + +static struct dma_chan +*spi_engine_rx_stream_request_dma_chan(struct spi_offload *offload) +{ + struct spi_engine_offload *priv = offload->priv; + char name[16]; + + snprintf(name, sizeof(name), "offload%u-rx", priv->offload_num); + + return dma_request_chan(offload->provider_dev, name); +} + +static const struct spi_offload_ops spi_engine_offload_ops = { + .trigger_enable = spi_engine_trigger_enable, + .trigger_disable = spi_engine_trigger_disable, + .tx_stream_request_dma_chan = spi_engine_tx_stream_request_dma_chan, + .rx_stream_request_dma_chan = spi_engine_rx_stream_request_dma_chan, +}; + static void spi_engine_release_hw(void *p) { struct spi_engine *spi_engine = p; @@ -651,8 +974,7 @@ static int spi_engine_probe(struct platform_device *pdev) struct spi_engine *spi_engine; struct spi_controller *host; unsigned int version; - int irq; - int ret; + int irq, ret; irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -667,6 +989,46 @@ static int spi_engine_probe(struct platform_device *pdev) spin_lock_init(&spi_engine->lock); init_completion(&spi_engine->msg_complete); + /* + * REVISIT: for now, all SPI Engines only have one offload. In the + * future, this should be read from a memory mapped register to + * determine the number of offloads enabled at HDL compile time. For + * now, we can tell if an offload is present if there is a trigger + * source wired up to it. + */ + if (device_property_present(&pdev->dev, "trigger-sources")) { + struct spi_engine_offload *priv; + + spi_engine->offload = + devm_spi_offload_alloc(&pdev->dev, + sizeof(struct spi_engine_offload)); + if (IS_ERR(spi_engine->offload)) + return PTR_ERR(spi_engine->offload); + + priv = spi_engine->offload->priv; + priv->spi_engine = spi_engine; + priv->offload_num = 0; + + spi_engine->offload->ops = &spi_engine_offload_ops; + spi_engine->offload_caps = SPI_OFFLOAD_CAP_TRIGGER; + + if (device_property_match_string(&pdev->dev, "dma-names", "offload0-rx") >= 0) { + spi_engine->offload_caps |= SPI_OFFLOAD_CAP_RX_STREAM_DMA; + spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_RX_STREAM; + } + + if (device_property_match_string(&pdev->dev, "dma-names", "offload0-tx") >= 0) { + spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STREAM_DMA; + spi_engine->offload->xfer_flags |= SPI_OFFLOAD_XFER_TX_STREAM; + } else { + /* + * HDL compile option to enable TX DMA stream also disables + * the SDO memory, so can't do both at the same time. + */ + spi_engine->offload_caps |= SPI_OFFLOAD_CAP_TX_STATIC_DATA; + } + } + spi_engine->clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); if (IS_ERR(spi_engine->clk)) return PTR_ERR(spi_engine->clk); @@ -688,6 +1050,22 @@ static int spi_engine_probe(struct platform_device *pdev) return -ENODEV; } + if (ADI_AXI_PCORE_VER_MINOR(version) >= 1) { + unsigned int sizes = readl(spi_engine->base + + SPI_ENGINE_REG_OFFLOAD_MEM_ADDR_WIDTH); + + spi_engine->offload_ctrl_mem_size = 1 << + FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_CMD, sizes); + spi_engine->offload_sdo_mem_size = 1 << + FIELD_GET(SPI_ENGINE_SPI_OFFLOAD_MEM_WIDTH_SDO, sizes); + } else { + spi_engine->offload_ctrl_mem_size = SPI_ENGINE_OFFLOAD_CMD_FIFO_SIZE; + spi_engine->offload_sdo_mem_size = SPI_ENGINE_OFFLOAD_SDO_FIFO_SIZE; + } + + /* IP v1.5 dropped the requirement for SYNC in offload messages. */ + spi_engine->offload_requires_sync = ADI_AXI_PCORE_VER_MINOR(version) < 5; + writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET); writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); @@ -709,6 +1087,8 @@ static int spi_engine_probe(struct platform_device *pdev) host->transfer_one_message = spi_engine_transfer_one_message; host->optimize_message = spi_engine_optimize_message; host->unoptimize_message = spi_engine_unoptimize_message; + host->get_offload = spi_engine_get_offload; + host->put_offload = spi_engine_put_offload; host->num_chipselect = 8; /* Some features depend of the IP core version. */ diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c index 644b44d2aef2..18261cbd413b 100644 --- a/drivers/spi/spi-bcm63xx-hsspi.c +++ b/drivers/spi/spi-bcm63xx-hsspi.c @@ -745,7 +745,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev) if (IS_ERR(clk)) return PTR_ERR(clk); - reset = devm_reset_control_get_optional_exclusive(dev, NULL); + reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index c8f64ec69344..b56210734caa 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -523,7 +523,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) return PTR_ERR(clk); } - reset = devm_reset_control_get_optional_exclusive(dev, NULL); + reset = devm_reset_control_get_optional_shared(dev, NULL); if (IS_ERR(reset)) return PTR_ERR(reset); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index c90462783b3f..d3c78f59b22c 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1949,7 +1949,7 @@ static int cqspi_probe(struct platform_device *pdev) host->num_chipselect = cqspi->num_chipselect; - if (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET) + if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) cqspi_device_reset(cqspi); if (cqspi->use_direct_mode) { @@ -1958,12 +1958,7 @@ static int cqspi_probe(struct platform_device *pdev) goto probe_setup_failed; } - ret = devm_pm_runtime_enable(dev); - if (ret) { - if (cqspi->rx_chan) - dma_release_channel(cqspi->rx_chan); - goto probe_setup_failed; - } + pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); @@ -1981,6 +1976,7 @@ static int cqspi_probe(struct platform_device *pdev) return 0; probe_setup_failed: cqspi_controller_enable(cqspi, 0); + pm_runtime_disable(dev); probe_reset_failed: if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); @@ -1999,7 +1995,8 @@ static void cqspi_remove(struct platform_device *pdev) if (cqspi->rx_chan) dma_release_channel(cqspi->rx_chan); - clk_disable_unprepare(cqspi->clk); + if (pm_runtime_get_sync(&pdev->dev) >= 0) + clk_disable(cqspi->clk); if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); diff --git a/drivers/spi/spi-cavium-thunderx.c b/drivers/spi/spi-cavium-thunderx.c index 337aef12abcc..367ae7120bb3 100644 --- a/drivers/spi/spi-cavium-thunderx.c +++ b/drivers/spi/spi-cavium-thunderx.c @@ -34,7 +34,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev, if (ret) goto error; - ret = pci_request_regions(pdev, DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) goto error; @@ -78,7 +78,6 @@ static int thunderx_spi_probe(struct pci_dev *pdev, return 0; error: - pci_release_regions(pdev); spi_controller_put(host); return ret; } @@ -92,7 +91,6 @@ static void thunderx_spi_remove(struct pci_dev *pdev) if (!p) return; - pci_release_regions(pdev); /* Put everything in a known state. */ writeq(0, p->register_base + OCTEON_SPI_CFG(p)); } diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c index ceefc253c549..14307dd800b7 100644 --- a/drivers/spi/spi-cs42l43.c +++ b/drivers/spi/spi-cs42l43.c @@ -237,7 +237,9 @@ static int cs42l43_get_speaker_id_gpios(struct cs42l43_spi *priv, int *result) int i, ret; descs = gpiod_get_array_optional(priv->dev, "spk-id", GPIOD_IN); - if (IS_ERR_OR_NULL(descs)) + if (!descs) + return 0; + else if (IS_ERR(descs)) return PTR_ERR(descs); spkid = 0; @@ -293,7 +295,7 @@ static struct spi_board_info *cs42l43_create_bridge_amp(struct cs42l43_spi *priv struct spi_board_info *info; if (spkid >= 0) { - props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL); + props = devm_kcalloc(priv->dev, 2, sizeof(*props), GFP_KERNEL); if (!props) return NULL; diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c index 941ecc6f59f8..b3b883cb9541 100644 --- a/drivers/spi/spi-dw-core.c +++ b/drivers/spi/spi-dw-core.c @@ -423,7 +423,7 @@ static int dw_spi_transfer_one(struct spi_controller *host, int ret; dws->dma_mapped = 0; - dws->n_bytes = roundup_pow_of_two(BITS_TO_BYTES(transfer->bits_per_word)); + dws->n_bytes = spi_bpw_to_bytes(transfer->bits_per_word); dws->tx = (void *)transfer->tx_buf; dws->tx_len = transfer->len / dws->n_bytes; dws->rx = transfer->rx_buf; diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c index fc9e33be1e0e..e01c63d23b64 100644 --- a/drivers/spi/spi-fsi.c +++ b/drivers/spi/spi-fsi.c @@ -479,6 +479,19 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr, shift = SPI_FSI_SEQUENCE_SHIFT_IN(next->len); fsi_spi_sequence_add(&seq, shift); + } else if (next->tx_buf) { + if ((next->len + transfer->len) > (SPI_FSI_MAX_TX_SIZE + 8)) { + rc = -EINVAL; + goto error; + } + + len = next->len; + while (len > 8) { + fsi_spi_sequence_add(&seq, + SPI_FSI_SEQUENCE_SHIFT_OUT(8)); + len -= 8; + } + fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SHIFT_OUT(len)); } else { next = NULL; } diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 067c954cb6ea..0dcd49114095 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. -// Copyright 2020 NXP +// Copyright 2020-2025 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -62,6 +62,7 @@ #define SPI_SR_TFIWF BIT(18) #define SPI_SR_RFDF BIT(17) #define SPI_SR_CMDFFF BIT(16) +#define SPI_SR_TXRXS BIT(30) #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ SPI_SR_TFUF | SPI_SR_TFFF | \ SPI_SR_CMDTCF | SPI_SR_SPEF | \ @@ -921,9 +922,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, struct spi_transfer *transfer; bool cs = false; int status = 0; + u32 val = 0; + bool cs_change = false; message->actual_length = 0; + /* Put DSPI in running mode if halted. */ + regmap_read(dspi->regmap, SPI_MCR, &val); + if (val & SPI_MCR_HALT) { + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + !(val & SPI_SR_TXRXS)) + ; + } + list_for_each_entry(transfer, &message->transfers, transfer_list) { dspi->cur_transfer = transfer; dspi->cur_msg = message; @@ -953,6 +965,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; } + cs_change = transfer->cs_change; dspi->tx = transfer->tx_buf; dspi->rx = transfer->rx_buf; dspi->len = transfer->len; @@ -962,17 +975,28 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); + regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); + spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, dspi->progress, !dspi->irq); if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { status = dspi_dma_xfer(dspi); } else { + /* + * Reinitialize the completion before transferring data + * to avoid the case where it might remain in the done + * state due to a spurious interrupt from a previous + * transfer. This could falsely signal that the current + * transfer has completed. + */ + if (dspi->irq) + reinit_completion(&dspi->xfer_done); + dspi_fifo_write(dspi); if (dspi->irq) { wait_for_completion(&dspi->xfer_done); - reinit_completion(&dspi->xfer_done); } else { do { status = dspi_poll(dspi); @@ -988,6 +1012,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi_deassert_cs(spi, &cs); } + if (status || !cs_change) { + /* Put DSPI in stop mode */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_HALT, SPI_MCR_HALT); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + val & SPI_SR_TXRXS) + ; + } + message->status = status; spi_finalize_current_message(ctlr); @@ -1167,6 +1200,20 @@ static int dspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); +static const struct regmap_range dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(3)), + regmap_reg_range(SPI_SR, SPI_TXFR3), + regmap_reg_range(SPI_RXFR0, SPI_RXFR3), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_access_table = { + .yes_ranges = dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), +}; + static const struct regmap_range dspi_volatile_ranges[] = { regmap_reg_range(SPI_MCR, SPI_TCR), regmap_reg_range(SPI_SR, SPI_SR), @@ -1184,6 +1231,8 @@ static const struct regmap_config dspi_regmap_config = { .reg_stride = 4, .max_register = 0x88, .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }; static const struct regmap_range dspi_xspi_volatile_ranges[] = { @@ -1205,6 +1254,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { .reg_stride = 4, .max_register = 0x13c, .volatile_table = &dspi_xspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }, { .name = "pushr", @@ -1227,6 +1278,8 @@ static int dspi_init(struct fsl_dspi *dspi) if (!spi_controller_is_target(dspi->ctlr)) mcr |= SPI_MCR_HOST; + mcr |= SPI_MCR_HALT; + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 40f5c8fdba76..5e3818445234 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -572,7 +572,7 @@ static int fsl_lpspi_calculate_timeout(struct fsl_lpspi_data *fsl_lpspi, timeout += 1; /* Double calculated timeout */ - return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); + return secs_to_jiffies(2 * timeout); } static int fsl_lpspi_dma_transfer(struct spi_controller *controller, diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 2f54dc09d11b..c887abb028d7 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -264,14 +264,14 @@ static const struct fsl_qspi_devtype_data ls2080a_data = { struct fsl_qspi { void __iomem *iobase; void __iomem *ahb_addr; - u32 memmap_phy; - struct clk *clk, *clk_en; - struct device *dev; - struct completion c; const struct fsl_qspi_devtype_data *devtype_data; struct mutex lock; + struct completion c; + struct clk *clk, *clk_en; struct pm_qos_request pm_qos_req; + struct device *dev; int selected; + u32 memmap_phy; }; static inline int needs_swap_endian(struct fsl_qspi *q) @@ -844,13 +844,18 @@ static const struct spi_controller_mem_caps fsl_qspi_mem_caps = { .per_op_freq = true, }; -static void fsl_qspi_cleanup(void *data) +static void fsl_qspi_disable(void *data) { struct fsl_qspi *q = data; /* disable the hardware */ qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); +} + +static void fsl_qspi_cleanup(void *data) +{ + struct fsl_qspi *q = data; fsl_qspi_clk_disable_unprep(q); @@ -866,7 +871,7 @@ static int fsl_qspi_probe(struct platform_device *pdev) struct fsl_qspi *q; int ret; - ctlr = spi_alloc_host(&pdev->dev, sizeof(*q)); + ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*q)); if (!ctlr) return -ENOMEM; @@ -876,68 +881,60 @@ static int fsl_qspi_probe(struct platform_device *pdev) q = spi_controller_get_devdata(ctlr); q->dev = dev; q->devtype_data = of_device_get_match_data(dev); - if (!q->devtype_data) { - ret = -ENODEV; - goto err_put_ctrl; - } + if (!q->devtype_data) + return -ENODEV; platform_set_drvdata(pdev, q); /* find the resources */ q->iobase = devm_platform_ioremap_resource_byname(pdev, "QuadSPI"); - if (IS_ERR(q->iobase)) { - ret = PTR_ERR(q->iobase); - goto err_put_ctrl; - } + if (IS_ERR(q->iobase)) + return PTR_ERR(q->iobase); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI-memory"); - if (!res) { - ret = -EINVAL; - goto err_put_ctrl; - } + if (!res) + return -EINVAL; q->memmap_phy = res->start; /* Since there are 4 cs, map size required is 4 times ahb_buf_size */ q->ahb_addr = devm_ioremap(dev, q->memmap_phy, (q->devtype_data->ahb_buf_size * 4)); - if (!q->ahb_addr) { - ret = -ENOMEM; - goto err_put_ctrl; - } + if (!q->ahb_addr) + return -ENOMEM; /* find the clocks */ q->clk_en = devm_clk_get(dev, "qspi_en"); - if (IS_ERR(q->clk_en)) { - ret = PTR_ERR(q->clk_en); - goto err_put_ctrl; - } + if (IS_ERR(q->clk_en)) + return PTR_ERR(q->clk_en); q->clk = devm_clk_get(dev, "qspi"); - if (IS_ERR(q->clk)) { - ret = PTR_ERR(q->clk); - goto err_put_ctrl; - } + if (IS_ERR(q->clk)) + return PTR_ERR(q->clk); + + mutex_init(&q->lock); ret = fsl_qspi_clk_prep_enable(q); if (ret) { dev_err(dev, "can not enable the clock\n"); - goto err_put_ctrl; + return ret; } + ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); + if (ret) + return ret; + /* find the irq */ ret = platform_get_irq(pdev, 0); if (ret < 0) - goto err_disable_clk; + return ret; ret = devm_request_irq(dev, ret, fsl_qspi_irq_handler, 0, pdev->name, q); if (ret) { dev_err(dev, "failed to request irq: %d\n", ret); - goto err_disable_clk; + return ret; } - mutex_init(&q->lock); - ctlr->bus_num = -1; ctlr->num_chipselect = 4; ctlr->mem_ops = &fsl_qspi_mem_ops; @@ -947,24 +944,15 @@ static int fsl_qspi_probe(struct platform_device *pdev) ctlr->dev.of_node = np; - ret = devm_add_action_or_reset(dev, fsl_qspi_cleanup, q); + ret = devm_add_action_or_reset(dev, fsl_qspi_disable, q); if (ret) - goto err_put_ctrl; + return ret; ret = devm_spi_register_controller(dev, ctlr); if (ret) - goto err_put_ctrl; + return ret; return 0; - -err_disable_clk: - fsl_qspi_clk_disable_unprep(q); - -err_put_ctrl: - spi_controller_put(ctlr); - - dev_err(dev, "Freescale QuadSPI probe failed\n"); - return ret; } static int fsl_qspi_suspend(struct device *dev) diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 4f192e013cd6..ea5f1b10b79e 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -39,42 +39,14 @@ struct spi_gpio { /*----------------------------------------------------------------------*/ -/* - * Because the overhead of going through four GPIO procedure calls - * per transferred bit can make performance a problem, this code - * is set up so that you can use it in either of two ways: - * - * - The slow generic way: set up platform_data to hold the GPIO - * numbers used for MISO/MOSI/SCK, and issue procedure calls for - * each of them. This driver can handle several such busses. - * - * - The quicker inlined way: only helps with platform GPIO code - * that inlines operations for constant GPIOs. This can give - * you tight (fast!) inner loops, but each such bus needs a - * new driver. You'll define a new C file, with Makefile and - * Kconfig support; the C code can be a total of six lines: - * - * #define DRIVER_NAME "myboard_spi2" - * #define SPI_MISO_GPIO 119 - * #define SPI_MOSI_GPIO 120 - * #define SPI_SCK_GPIO 121 - * #define SPI_N_CHIPSEL 4 - * #include "spi-gpio.c" - */ - -#ifndef DRIVER_NAME #define DRIVER_NAME "spi_gpio" -#define GENERIC_BITBANG /* vs tight inlines */ - -#endif - /*----------------------------------------------------------------------*/ static inline struct spi_gpio *__pure spi_to_spi_gpio(const struct spi_device *spi) { - const struct spi_bitbang *bang; + struct spi_bitbang *bang; struct spi_gpio *spi_gpio; bang = spi_controller_get_devdata(spi->controller); @@ -341,16 +313,14 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev, struct spi_gpio *spi_gpio = spi_controller_get_devdata(host); int i; -#ifdef GENERIC_BITBANG - if (!pdata || !pdata->num_chipselect) + if (!pdata) return -ENODEV; -#endif - /* - * The host needs to think there is a chipselect even if not - * connected - */ - host->num_chipselect = pdata->num_chipselect ?: 1; + /* It's just one always-selected device, fine to continue */ + if (!pdata->num_chipselect) + return 0; + + host->num_chipselect = pdata->num_chipselect; spi_gpio->cs_gpios = devm_kcalloc(dev, host->num_chipselect, sizeof(*spi_gpio->cs_gpios), GFP_KERNEL); @@ -445,8 +415,6 @@ static int spi_gpio_probe(struct platform_device *pdev) return devm_spi_register_controller(&pdev->dev, host); } -MODULE_ALIAS("platform:" DRIVER_NAME); - static const struct of_device_id spi_gpio_dt_ids[] = { { .compatible = "spi-gpio" }, {} @@ -465,3 +433,4 @@ module_platform_driver(spi_gpio_driver); MODULE_DESCRIPTION("SPI host driver using generic bitbanged GPIO "); MODULE_AUTHOR("David Brownell"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index eeb7d082c247..c93d80a4d734 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1449,7 +1449,7 @@ static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size) timeout += 1; /* Double calculated timeout */ - return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC); + return secs_to_jiffies(2 * timeout); } static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, @@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { + int ret; struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); - spi_imx_setupxfer(spi, transfer); + ret = spi_imx_setupxfer(spi, transfer); + if (ret < 0) + return ret; transfer->effective_speed_hz = spi_imx->spi_bus_clk; /* flush rxfifo before transfer */ diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 4d9ffec900bb..4b63cb98df9c 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -44,6 +44,7 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_spi_boardinfo *info; + void __iomem *base; int ret; ret = pcim_enable_device(pdev); @@ -56,7 +57,12 @@ static int intel_spi_pci_probe(struct pci_dev *pdev, return -ENOMEM; info->data = pdev; - return intel_spi_probe(&pdev->dev, &pdev->resource[0], info); + + base = pcim_iomap_region(pdev, 0, KBUILD_MODNAME); + if (IS_ERR(base)) + return PTR_ERR(base); + + return intel_spi_probe(&pdev->dev, base, info); } static const struct pci_device_id intel_spi_pci_ids[] = { diff --git a/drivers/spi/spi-intel-platform.c b/drivers/spi/spi-intel-platform.c index 0974cca83a5d..6cbed0b2e063 100644 --- a/drivers/spi/spi-intel-platform.c +++ b/drivers/spi/spi-intel-platform.c @@ -14,14 +14,17 @@ static int intel_spi_platform_probe(struct platform_device *pdev) { struct intel_spi_boardinfo *info; - struct resource *mem; + void __iomem *base; info = dev_get_platdata(&pdev->dev); if (!info) return -EINVAL; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - return intel_spi_probe(&pdev->dev, mem, info); + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + return intel_spi_probe(&pdev->dev, base, info); } static struct platform_driver intel_spi_platform_driver = { diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index b0dcdb6fb8fa..5d5a546c62ea 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -1467,13 +1467,13 @@ EXPORT_SYMBOL_GPL(intel_spi_groups); /** * intel_spi_probe() - Probe the Intel SPI flash controller * @dev: Pointer to the parent device - * @mem: MMIO resource + * @base: iomapped MMIO resource * @info: Platform specific information * * Probes Intel SPI flash controller and creates the flash chip device. * Returns %0 on success and negative errno in case of failure. */ -int intel_spi_probe(struct device *dev, struct resource *mem, +int intel_spi_probe(struct device *dev, void __iomem *base, const struct intel_spi_boardinfo *info) { struct spi_controller *host; @@ -1488,10 +1488,7 @@ int intel_spi_probe(struct device *dev, struct resource *mem, ispi = spi_controller_get_devdata(host); - ispi->base = devm_ioremap_resource(dev, mem); - if (IS_ERR(ispi->base)) - return PTR_ERR(ispi->base); - + ispi->base = base; ispi->dev = dev; ispi->host = host; ispi->info = info; diff --git a/drivers/spi/spi-intel.h b/drivers/spi/spi-intel.h index c5f35060dd63..53b292c6ea0c 100644 --- a/drivers/spi/spi-intel.h +++ b/drivers/spi/spi-intel.h @@ -11,11 +11,9 @@ #include <linux/platform_data/x86/spi-intel.h> -struct resource; - extern const struct attribute_group *intel_spi_groups[]; -int intel_spi_probe(struct device *dev, struct resource *mem, +int intel_spi_probe(struct device *dev, void __iomem *base, const struct intel_spi_boardinfo *info); #endif /* SPI_INTEL_H */ diff --git a/drivers/spi/spi-loongson-core.c b/drivers/spi/spi-loongson-core.c index 4fec226456d1..b46f072a0387 100644 --- a/drivers/spi/spi-loongson-core.c +++ b/drivers/spi/spi-loongson-core.c @@ -5,6 +5,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index 31a878d9458d..7dd92deffe3f 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -420,7 +420,7 @@ MODULE_LICENSE("GPL"); static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) { /* limit the hex_dump */ - if (len < 1024) { + if (len <= 1024) { print_hex_dump(KERN_INFO, pre, DUMP_PREFIX_OFFSET, 16, 1, ptr, len, 0); @@ -494,8 +494,8 @@ struct rx_ranges { static int rx_ranges_cmp(void *priv, const struct list_head *a, const struct list_head *b) { - struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); - struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list); + const struct rx_ranges *rx_a = list_entry(a, struct rx_ranges, list); + const struct rx_ranges *rx_b = list_entry(b, struct rx_ranges, list); if (rx_a->start > rx_b->start) return 1; @@ -635,8 +635,8 @@ static int spi_test_check_loopback_result(struct spi_device *spi, } else { /* first byte received */ txb = ((u8 *)xfer->rx_buf)[0]; - /* first byte may be 0 or xff */ - if (!((txb == 0) || (txb == 0xff))) { + /* first byte may be 0 or 0xff */ + if (txb != 0 && txb != 0xff) { dev_err(&spi->dev, "loopback strangeness - we expect 0x00 or 0xff, but not 0x%02x\n", txb); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index a9f0f47f4759..5db0639d3b01 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -377,6 +377,17 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) /* Make sure the operation frequency is correct before going futher */ spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op); + dev_vdbg(&mem->spi->dev, "[cmd: 0x%02x][%dB addr: %#8llx][%2dB dummy][%4dB data %s] %d%c-%d%c-%d%c-%d%c @ %uHz\n", + op->cmd.opcode, + op->addr.nbytes, (op->addr.nbytes ? op->addr.val : 0), + op->dummy.nbytes, + op->data.nbytes, (op->data.nbytes ? (op->data.dir == SPI_MEM_DATA_IN ? " read" : "write") : " "), + op->cmd.buswidth, op->cmd.dtr ? 'D' : 'S', + op->addr.buswidth, op->addr.dtr ? 'D' : 'S', + op->dummy.buswidth, op->dummy.dtr ? 'D' : 'S', + op->data.buswidth, op->data.dtr ? 'D' : 'S', + op->max_freq ? op->max_freq : mem->spi->max_speed_hz); + ret = spi_mem_check_op(op); if (ret) return ret; @@ -585,7 +596,11 @@ u64 spi_mem_calc_op_duration(struct spi_mem_op *op) ns_per_cycles = 1000000000 / op->max_freq; ncycles += ((op->cmd.nbytes * 8) / op->cmd.buswidth) / (op->cmd.dtr ? 2 : 1); ncycles += ((op->addr.nbytes * 8) / op->addr.buswidth) / (op->addr.dtr ? 2 : 1); - ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + + /* Dummy bytes are optional for some SPI flash memory operations */ + if (op->dummy.nbytes) + ncycles += ((op->dummy.nbytes * 8) / op->dummy.buswidth) / (op->dummy.dtr ? 2 : 1); + ncycles += ((op->data.nbytes * 8) / op->data.buswidth) / (op->data.dtr ? 2 : 1); return ncycles * ns_per_cycles; diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index df74ad5060f8..6b9137307533 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -21,18 +21,26 @@ #include <linux/interrupt.h> #include <linux/reset.h> #include <linux/pinctrl/consumer.h> +#include <linux/dma-mapping.h> /* - * The Meson SPICC controller could support DMA based transfers, but is not - * implemented by the vendor code, and while having the registers documentation - * it has never worked on the GXL Hardware. - * The PIO mode is the only mode implemented, and due to badly designed HW : - * - all transfers are cutted in 16 words burst because the FIFO hangs on - * TX underflow, and there is no TX "Half-Empty" interrupt, so we go by - * FIFO max size chunk only - * - CS management is dumb, and goes UP between every burst, so is really a - * "Data Valid" signal than a Chip Select, GPIO link should be used instead - * to have a CS go down over the full transfer + * There are two modes for data transmission: PIO and DMA. + * When bits_per_word is 8, 16, 24, or 32, data is transferred using PIO mode. + * When bits_per_word is 64, DMA mode is used by default. + * + * DMA achieves a transfer with one or more SPI bursts, each SPI burst is made + * up of one or more DMA bursts. The DMA burst implementation mechanism is, + * For TX, when the number of words in TXFIFO is less than the preset + * reading threshold, SPICC starts a reading DMA burst, which reads the preset + * number of words from TX buffer, then writes them into TXFIFO. + * For RX, when the number of words in RXFIFO is greater than the preset + * writing threshold, SPICC starts a writing request burst, which reads the + * preset number of words from RXFIFO, then write them into RX buffer. + * DMA works if the transfer meets the following conditions, + * - 64 bits per word + * - The transfer length in word must be multiples of the dma_burst_len, and + * the dma_burst_len should be one of 8,7...2, otherwise, it will be split + * into several SPI bursts by this driver */ #define SPICC_MAX_BURST 128 @@ -128,6 +136,23 @@ #define SPICC_DWADDR 0x24 /* Write Address of DMA */ +#define SPICC_LD_CNTL0 0x28 +#define VSYNC_IRQ_SRC_SELECT BIT(0) +#define DMA_EN_SET_BY_VSYNC BIT(2) +#define XCH_EN_SET_BY_VSYNC BIT(3) +#define DMA_READ_COUNTER_EN BIT(4) +#define DMA_WRITE_COUNTER_EN BIT(5) +#define DMA_RADDR_LOAD_BY_VSYNC BIT(6) +#define DMA_WADDR_LOAD_BY_VSYNC BIT(7) +#define DMA_ADDR_LOAD_FROM_LD_ADDR BIT(8) + +#define SPICC_LD_CNTL1 0x2c +#define DMA_READ_COUNTER GENMASK(15, 0) +#define DMA_WRITE_COUNTER GENMASK(31, 16) +#define DMA_BURST_LEN_DEFAULT 8 +#define DMA_BURST_COUNT_MAX 0xffff +#define SPI_BURST_LEN_MAX (DMA_BURST_LEN_DEFAULT * DMA_BURST_COUNT_MAX) + #define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */ #define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0) #define SPICC_ENH_DATARATE_MASK GENMASK(23, 16) @@ -171,6 +196,9 @@ struct meson_spicc_device { struct pinctrl *pinctrl; struct pinctrl_state *pins_idle_high; struct pinctrl_state *pins_idle_low; + dma_addr_t tx_dma; + dma_addr_t rx_dma; + bool using_dma; }; #define pow2_clk_to_spicc(_div) container_of(_div, struct meson_spicc_device, pow2_div) @@ -202,6 +230,148 @@ static void meson_spicc_oen_enable(struct meson_spicc_device *spicc) writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0); } +static int meson_spicc_dma_map(struct meson_spicc_device *spicc, + struct spi_transfer *t) +{ + struct device *dev = spicc->host->dev.parent; + + if (!(t->tx_buf && t->rx_buf)) + return -EINVAL; + + t->tx_dma = dma_map_single(dev, (void *)t->tx_buf, t->len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, t->tx_dma)) + return -ENOMEM; + + t->rx_dma = dma_map_single(dev, t->rx_buf, t->len, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, t->rx_dma)) + return -ENOMEM; + + spicc->tx_dma = t->tx_dma; + spicc->rx_dma = t->rx_dma; + + return 0; +} + +static void meson_spicc_dma_unmap(struct meson_spicc_device *spicc, + struct spi_transfer *t) +{ + struct device *dev = spicc->host->dev.parent; + + if (t->tx_dma) + dma_unmap_single(dev, t->tx_dma, t->len, DMA_TO_DEVICE); + if (t->rx_dma) + dma_unmap_single(dev, t->rx_dma, t->len, DMA_FROM_DEVICE); +} + +/* + * According to the remain words length, calculate a suitable spi burst length + * and a dma burst length for current spi burst + */ +static u32 meson_spicc_calc_dma_len(struct meson_spicc_device *spicc, + u32 len, u32 *dma_burst_len) +{ + u32 i; + + if (len <= spicc->data->fifo_size) { + *dma_burst_len = len; + return len; + } + + *dma_burst_len = DMA_BURST_LEN_DEFAULT; + + if (len == (SPI_BURST_LEN_MAX + 1)) + return SPI_BURST_LEN_MAX - DMA_BURST_LEN_DEFAULT; + + if (len >= SPI_BURST_LEN_MAX) + return SPI_BURST_LEN_MAX; + + for (i = DMA_BURST_LEN_DEFAULT; i > 1; i--) + if ((len % i) == 0) { + *dma_burst_len = i; + return len; + } + + i = len % DMA_BURST_LEN_DEFAULT; + len -= i; + + if (i == 1) + len -= DMA_BURST_LEN_DEFAULT; + + return len; +} + +static void meson_spicc_setup_dma(struct meson_spicc_device *spicc) +{ + unsigned int len; + unsigned int dma_burst_len, dma_burst_count; + unsigned int count_en = 0; + unsigned int txfifo_thres = 0; + unsigned int read_req = 0; + unsigned int rxfifo_thres = 31; + unsigned int write_req = 0; + unsigned int ld_ctr1 = 0; + + writel_relaxed(spicc->tx_dma, spicc->base + SPICC_DRADDR); + writel_relaxed(spicc->rx_dma, spicc->base + SPICC_DWADDR); + + /* Set the max burst length to support a transmission with length of + * no more than 1024 bytes(128 words), which must use the CS management + * because of some strict timing requirements + */ + writel_bits_relaxed(SPICC_BURSTLENGTH_MASK, SPICC_BURSTLENGTH_MASK, + spicc->base + SPICC_CONREG); + + len = meson_spicc_calc_dma_len(spicc, spicc->xfer_remain, + &dma_burst_len); + spicc->xfer_remain -= len; + dma_burst_count = DIV_ROUND_UP(len, dma_burst_len); + dma_burst_len--; + + if (spicc->tx_dma) { + spicc->tx_dma += len; + count_en |= DMA_READ_COUNTER_EN; + txfifo_thres = spicc->data->fifo_size - dma_burst_len; + read_req = dma_burst_len; + ld_ctr1 |= FIELD_PREP(DMA_READ_COUNTER, dma_burst_count); + } + + if (spicc->rx_dma) { + spicc->rx_dma += len; + count_en |= DMA_WRITE_COUNTER_EN; + rxfifo_thres = dma_burst_len; + write_req = dma_burst_len; + ld_ctr1 |= FIELD_PREP(DMA_WRITE_COUNTER, dma_burst_count); + } + + writel_relaxed(count_en, spicc->base + SPICC_LD_CNTL0); + writel_relaxed(ld_ctr1, spicc->base + SPICC_LD_CNTL1); + writel_relaxed(SPICC_DMA_ENABLE + | SPICC_DMA_URGENT + | FIELD_PREP(SPICC_TXFIFO_THRESHOLD_MASK, txfifo_thres) + | FIELD_PREP(SPICC_READ_BURST_MASK, read_req) + | FIELD_PREP(SPICC_RXFIFO_THRESHOLD_MASK, rxfifo_thres) + | FIELD_PREP(SPICC_WRITE_BURST_MASK, write_req), + spicc->base + SPICC_DMAREG); +} + +static irqreturn_t meson_spicc_dma_irq(struct meson_spicc_device *spicc) +{ + if (readl_relaxed(spicc->base + SPICC_DMAREG) & SPICC_DMA_ENABLE) + return IRQ_HANDLED; + + if (spicc->xfer_remain) { + meson_spicc_setup_dma(spicc); + } else { + writel_bits_relaxed(SPICC_SMC, 0, spicc->base + SPICC_CONREG); + writel_relaxed(0, spicc->base + SPICC_INTREG); + writel_relaxed(0, spicc->base + SPICC_DMAREG); + meson_spicc_dma_unmap(spicc, spicc->xfer); + complete(&spicc->done); + } + + return IRQ_HANDLED; +} + static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc) { return !!FIELD_GET(SPICC_TF, @@ -293,6 +463,9 @@ static irqreturn_t meson_spicc_irq(int irq, void *data) writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG); + if (spicc->using_dma) + return meson_spicc_dma_irq(spicc); + /* Empty RX FIFO */ meson_spicc_rx(spicc); @@ -426,9 +599,6 @@ static int meson_spicc_transfer_one(struct spi_controller *host, meson_spicc_reset_fifo(spicc); - /* Setup burst */ - meson_spicc_setup_burst(spicc); - /* Setup wait for completion */ reinit_completion(&spicc->done); @@ -442,11 +612,36 @@ static int meson_spicc_transfer_one(struct spi_controller *host, /* Increase it twice and add 200 ms tolerance */ timeout += timeout + 200; - /* Start burst */ - writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); + if (xfer->bits_per_word == 64) { + int ret; + + /* dma_burst_len 1 can't trigger a dma burst */ + if (xfer->len < 16) + return -EINVAL; + + ret = meson_spicc_dma_map(spicc, xfer); + if (ret) { + meson_spicc_dma_unmap(spicc, xfer); + dev_err(host->dev.parent, "dma map failed\n"); + return ret; + } + + spicc->using_dma = true; + spicc->xfer_remain = DIV_ROUND_UP(xfer->len, spicc->bytes_per_word); + meson_spicc_setup_dma(spicc); + writel_relaxed(SPICC_TE_EN, spicc->base + SPICC_INTREG); + writel_bits_relaxed(SPICC_SMC, SPICC_SMC, spicc->base + SPICC_CONREG); + } else { + spicc->using_dma = false; + /* Setup burst */ + meson_spicc_setup_burst(spicc); - /* Enable interrupts */ - writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG); + /* Start burst */ + writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); + + /* Enable interrupts */ + writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG); + } if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout))) return -ETIMEDOUT; @@ -545,6 +740,14 @@ static int meson_spicc_setup(struct spi_device *spi) if (!spi->controller_state) spi->controller_state = spi_controller_get_devdata(spi->controller); + /* DMA works at 64 bits, the rest works on PIO */ + if (spi->bits_per_word != 8 && + spi->bits_per_word != 16 && + spi->bits_per_word != 24 && + spi->bits_per_word != 32 && + spi->bits_per_word != 64) + return -EINVAL; + return 0; } @@ -853,10 +1056,6 @@ static int meson_spicc_probe(struct platform_device *pdev) host->num_chipselect = 4; host->dev.of_node = pdev->dev.of_node; host->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LOOP; - host->bits_per_word_mask = SPI_BPW_MASK(32) | - SPI_BPW_MASK(24) | - SPI_BPW_MASK(16) | - SPI_BPW_MASK(8); host->flags = (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX); host->min_speed_hz = spicc->data->min_speed_hz; host->max_speed_hz = spicc->data->max_speed_hz; diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 197bf2dbe5de..4b0a1c0db041 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -20,6 +20,7 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> #include <linux/dma-mapping.h> +#include <linux/pm_qos.h> #define SPI_CFG0_REG 0x0000 #define SPI_CFG1_REG 0x0004 @@ -147,6 +148,7 @@ struct mtk_spi_compatible { * @tx_sgl_len: Size of TX DMA transfer * @rx_sgl_len: Size of RX DMA transfer * @dev_comp: Device data structure + * @qos_request: QoS request * @spi_clk_hz: Current SPI clock in Hz * @spimem_done: SPI-MEM operation completion * @use_spimem: Enables SPI-MEM @@ -166,6 +168,7 @@ struct mtk_spi { struct scatterlist *tx_sgl, *rx_sgl; u32 tx_sgl_len, rx_sgl_len; const struct mtk_spi_compatible *dev_comp; + struct pm_qos_request qos_request; u32 spi_clk_hz; struct completion spimem_done; bool use_spimem; @@ -356,6 +359,7 @@ static int mtk_spi_hw_init(struct spi_controller *host, struct mtk_chip_config *chip_config = spi->controller_data; struct mtk_spi *mdata = spi_controller_get_devdata(host); + cpu_latency_qos_update_request(&mdata->qos_request, 500); cpha = spi->mode & SPI_CPHA ? 1 : 0; cpol = spi->mode & SPI_CPOL ? 1 : 0; @@ -459,6 +463,15 @@ static int mtk_spi_prepare_message(struct spi_controller *host, return mtk_spi_hw_init(host, msg->spi); } +static int mtk_spi_unprepare_message(struct spi_controller *host, + struct spi_message *message) +{ + struct mtk_spi *mdata = spi_controller_get_devdata(host); + + cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE); + return 0; +} + static void mtk_spi_set_cs(struct spi_device *spi, bool enable) { u32 reg_val; @@ -1143,6 +1156,7 @@ static int mtk_spi_probe(struct platform_device *pdev) host->set_cs = mtk_spi_set_cs; host->prepare_message = mtk_spi_prepare_message; + host->unprepare_message = mtk_spi_unprepare_message; host->transfer_one = mtk_spi_transfer_one; host->can_dma = mtk_spi_can_dma; host->setup = mtk_spi_setup; @@ -1249,6 +1263,8 @@ static int mtk_spi_probe(struct platform_device *pdev) clk_disable_unprepare(mdata->spi_hclk); } + cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE); + if (mdata->dev_comp->need_pad_sel) { if (mdata->pad_num != host->num_chipselect) return dev_err_probe(dev, -EINVAL, @@ -1292,6 +1308,7 @@ static void mtk_spi_remove(struct platform_device *pdev) struct mtk_spi *mdata = spi_controller_get_devdata(host); int ret; + cpu_latency_qos_remove_request(&mdata->qos_request); if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) complete(&mdata->spimem_done); diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c index fdbea9dffb62..e82ee6dcf498 100644 --- a/drivers/spi/spi-mtk-snfi.c +++ b/drivers/spi/spi-mtk-snfi.c @@ -1284,9 +1284,6 @@ static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller); - dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, - op->addr.val, op->addr.buswidth, op->addr.nbytes, - op->data.buswidth, op->data.nbytes); if (mtk_snand_is_page_ops(op)) { if (op->data.dir == SPI_MEM_DATA_IN) return mtk_snand_read_page_cache(ms, op); diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c index c02c4204442f..0eb35c4e3987 100644 --- a/drivers/spi/spi-mux.c +++ b/drivers/spi/spi-mux.c @@ -68,9 +68,7 @@ static int spi_mux_select(struct spi_device *spi) priv->current_cs = spi_get_chipselect(spi, 0); - spi_setup(priv->spi); - - return 0; + return spi_setup(priv->spi); } static int spi_mux_setup(struct spi_device *spi) diff --git a/drivers/spi/spi-npcm-fiu.c b/drivers/spi/spi-npcm-fiu.c index 958bab27a081..67cc1d86de42 100644 --- a/drivers/spi/spi-npcm-fiu.c +++ b/drivers/spi/spi-npcm-fiu.c @@ -550,11 +550,6 @@ static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) int ret = 0; u8 *buf; - dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth, op->addr.val, - op->data.nbytes); - if (fiu->spix_mode || op->addr.nbytes > 4) return -EOPNOTSUPP; diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index bad6b30bab0e..f3d576505413 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -48,6 +48,8 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> #include <linux/pm_qos.h> #include <linux/regmap.h> #include <linux/sizes.h> @@ -57,6 +59,9 @@ #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> +/* runtime pm timeout */ +#define FSPI_RPM_TIMEOUT 50 /* 50ms */ + /* Registers used by the driver */ #define FSPI_MCR0 0x00 #define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) @@ -394,6 +399,8 @@ struct nxp_fspi { struct mutex lock; struct pm_qos_request pm_qos_req; int selected; +#define FSPI_NEED_INIT (1 << 0) + int flags; }; static inline int needs_ip_only(struct nxp_fspi *f) @@ -627,15 +634,15 @@ static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) return 0; } -static int nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) +static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) { if (is_acpi_node(dev_fwnode(f->dev))) - return 0; + return; clk_disable_unprepare(f->clk); clk_disable_unprepare(f->clk_en); - return 0; + return; } static void nxp_fspi_dll_calibration(struct nxp_fspi *f) @@ -925,7 +932,13 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->controller); int err = 0; - mutex_lock(&f->lock); + guard(mutex)(&f->lock); + + err = pm_runtime_get_sync(f->dev); + if (err < 0) { + dev_err(f->dev, "Failed to enable clock %d\n", __LINE__); + return err; + } /* Wait for controller being ready. */ err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0, @@ -955,7 +968,8 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) /* Invalidate the data in the AHB buffer. */ nxp_fspi_invalid(f); - mutex_unlock(&f->lock); + pm_runtime_mark_last_busy(f->dev); + pm_runtime_put_autosuspend(f->dev); return err; } @@ -1154,6 +1168,24 @@ static const struct spi_controller_mem_caps nxp_fspi_mem_caps = { .per_op_freq = true, }; +static void nxp_fspi_cleanup(void *data) +{ + struct nxp_fspi *f = data; + + /* enable clock first since there is register access */ + pm_runtime_get_sync(f->dev); + + /* disable the hardware */ + fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0); + + pm_runtime_disable(f->dev); + pm_runtime_put_noidle(f->dev); + nxp_fspi_clk_disable_unprep(f); + + if (f->ahb_addr) + iounmap(f->ahb_addr); +} + static int nxp_fspi_probe(struct platform_device *pdev) { struct spi_controller *ctlr; @@ -1161,10 +1193,10 @@ static int nxp_fspi_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct resource *res; struct nxp_fspi *f; - int ret; + int ret, irq; u32 reg; - ctlr = spi_alloc_host(&pdev->dev, sizeof(*f)); + ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*f)); if (!ctlr) return -ENOMEM; @@ -1174,10 +1206,8 @@ static int nxp_fspi_probe(struct platform_device *pdev) f = spi_controller_get_devdata(ctlr); f->dev = dev; f->devtype_data = (struct nxp_fspi_devtype_data *)device_get_match_data(dev); - if (!f->devtype_data) { - ret = -ENODEV; - goto err_put_ctrl; - } + if (!f->devtype_data) + return -ENODEV; platform_set_drvdata(pdev, f); @@ -1186,11 +1216,8 @@ static int nxp_fspi_probe(struct platform_device *pdev) f->iobase = devm_platform_ioremap_resource(pdev, 0); else f->iobase = devm_platform_ioremap_resource_byname(pdev, "fspi_base"); - - if (IS_ERR(f->iobase)) { - ret = PTR_ERR(f->iobase); - goto err_put_ctrl; - } + if (IS_ERR(f->iobase)) + return PTR_ERR(f->iobase); /* find the resources - controller memory mapped space */ if (is_acpi_node(dev_fwnode(f->dev))) @@ -1198,11 +1225,8 @@ static int nxp_fspi_probe(struct platform_device *pdev) else res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); - - if (!res) { - ret = -ENODEV; - goto err_put_ctrl; - } + if (!res) + return -ENODEV; /* assign memory mapped starting address and mapped size. */ f->memmap_phy = res->start; @@ -1211,100 +1235,111 @@ static int nxp_fspi_probe(struct platform_device *pdev) /* find the clocks */ if (dev_of_node(&pdev->dev)) { f->clk_en = devm_clk_get(dev, "fspi_en"); - if (IS_ERR(f->clk_en)) { - ret = PTR_ERR(f->clk_en); - goto err_put_ctrl; - } + if (IS_ERR(f->clk_en)) + return PTR_ERR(f->clk_en); f->clk = devm_clk_get(dev, "fspi"); - if (IS_ERR(f->clk)) { - ret = PTR_ERR(f->clk); - goto err_put_ctrl; - } - - ret = nxp_fspi_clk_prep_enable(f); - if (ret) { - dev_err(dev, "can not enable the clock\n"); - goto err_put_ctrl; - } + if (IS_ERR(f->clk)) + return PTR_ERR(f->clk); } + /* find the irq */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return dev_err_probe(dev, irq, "Failed to get irq source"); + + pm_runtime_enable(dev); + pm_runtime_set_autosuspend_delay(dev, FSPI_RPM_TIMEOUT); + pm_runtime_use_autosuspend(dev); + + /* enable clock */ + ret = pm_runtime_get_sync(f->dev); + if (ret < 0) + return dev_err_probe(dev, ret, "Failed to enable clock"); + /* Clear potential interrupts */ reg = fspi_readl(f, f->iobase + FSPI_INTR); if (reg) fspi_writel(f, reg, f->iobase + FSPI_INTR); - /* find the irq */ - ret = platform_get_irq(pdev, 0); + nxp_fspi_default_setup(f); + + ret = pm_runtime_put_sync(dev); if (ret < 0) - goto err_disable_clk; + return dev_err_probe(dev, ret, "Failed to disable clock"); - ret = devm_request_irq(dev, ret, + ret = devm_request_irq(dev, irq, nxp_fspi_irq_handler, 0, pdev->name, f); - if (ret) { - dev_err(dev, "failed to request irq: %d\n", ret); - goto err_disable_clk; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to request irq\n"); - mutex_init(&f->lock); + ret = devm_mutex_init(dev, &f->lock); + if (ret) + return dev_err_probe(dev, ret, "Failed to initialize lock\n"); ctlr->bus_num = -1; ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; ctlr->mem_ops = &nxp_fspi_mem_ops; ctlr->mem_caps = &nxp_fspi_mem_caps; - - nxp_fspi_default_setup(f); - ctlr->dev.of_node = np; - ret = devm_spi_register_controller(&pdev->dev, ctlr); + ret = devm_add_action_or_reset(dev, nxp_fspi_cleanup, f); if (ret) - goto err_destroy_mutex; + return dev_err_probe(dev, ret, "Failed to register nxp_fspi_cleanup\n"); - return 0; + return devm_spi_register_controller(&pdev->dev, ctlr); +} -err_destroy_mutex: - mutex_destroy(&f->lock); +static int nxp_fspi_runtime_suspend(struct device *dev) +{ + struct nxp_fspi *f = dev_get_drvdata(dev); -err_disable_clk: nxp_fspi_clk_disable_unprep(f); -err_put_ctrl: - spi_controller_put(ctlr); - - dev_err(dev, "NXP FSPI probe failed\n"); - return ret; + return 0; } -static void nxp_fspi_remove(struct platform_device *pdev) +static int nxp_fspi_runtime_resume(struct device *dev) { - struct nxp_fspi *f = platform_get_drvdata(pdev); - - /* disable the hardware */ - fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0); + struct nxp_fspi *f = dev_get_drvdata(dev); + int ret; - nxp_fspi_clk_disable_unprep(f); + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return ret; - mutex_destroy(&f->lock); + if (f->flags & FSPI_NEED_INIT) { + nxp_fspi_default_setup(f); + ret = pinctrl_pm_select_default_state(dev); + if (ret) + dev_err(dev, "select flexspi default pinctrl failed!\n"); + f->flags &= ~FSPI_NEED_INIT; + } - if (f->ahb_addr) - iounmap(f->ahb_addr); + return ret; } static int nxp_fspi_suspend(struct device *dev) { - return 0; -} - -static int nxp_fspi_resume(struct device *dev) -{ struct nxp_fspi *f = dev_get_drvdata(dev); + int ret; - nxp_fspi_default_setup(f); + ret = pinctrl_pm_select_sleep_state(dev); + if (ret) { + dev_err(dev, "select flexspi sleep pinctrl failed!\n"); + return ret; + } - return 0; + f->flags |= FSPI_NEED_INIT; + + return pm_runtime_force_suspend(dev); } +static const struct dev_pm_ops nxp_fspi_pm_ops = { + RUNTIME_PM_OPS(nxp_fspi_runtime_suspend, nxp_fspi_runtime_resume, NULL) + SYSTEM_SLEEP_PM_OPS(nxp_fspi_suspend, pm_runtime_force_resume) +}; + static const struct of_device_id nxp_fspi_dt_ids[] = { { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, }, { .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, }, @@ -1324,20 +1359,14 @@ static const struct acpi_device_id nxp_fspi_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, nxp_fspi_acpi_ids); #endif -static const struct dev_pm_ops nxp_fspi_pm_ops = { - .suspend = nxp_fspi_suspend, - .resume = nxp_fspi_resume, -}; - static struct platform_driver nxp_fspi_driver = { .driver = { .name = "nxp-fspi", .of_match_table = nxp_fspi_dt_ids, .acpi_match_table = ACPI_PTR(nxp_fspi_acpi_ids), - .pm = &nxp_fspi_pm_ops, + .pm = pm_ptr(&nxp_fspi_pm_ops), }, .probe = nxp_fspi_probe, - .remove = nxp_fspi_remove, }; module_platform_driver(nxp_fspi_driver); diff --git a/drivers/spi/spi-offload-trigger-pwm.c b/drivers/spi/spi-offload-trigger-pwm.c new file mode 100644 index 000000000000..805ed41560df --- /dev/null +++ b/drivers/spi/spi-offload-trigger-pwm.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Analog Devices Inc. + * Copyright (C) 2024 BayLibre, SAS + * + * Generic PWM trigger for SPI offload. + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/math.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/pwm.h> +#include <linux/spi/offload/provider.h> +#include <linux/spi/offload/types.h> +#include <linux/time.h> +#include <linux/types.h> + +struct spi_offload_trigger_pwm_state { + struct device *dev; + struct pwm_device *pwm; +}; + +static bool spi_offload_trigger_pwm_match(struct spi_offload_trigger *trigger, + enum spi_offload_trigger_type type, + u64 *args, u32 nargs) +{ + if (nargs) + return false; + + return type == SPI_OFFLOAD_TRIGGER_PERIODIC; +} + +static int spi_offload_trigger_pwm_validate(struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger); + struct spi_offload_trigger_periodic *periodic = &config->periodic; + struct pwm_waveform wf = { }; + int ret; + + if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC) + return -EINVAL; + + if (!periodic->frequency_hz) + return -EINVAL; + + wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz); + /* REVISIT: 50% duty-cycle for now - may add config parameter later */ + wf.duty_length_ns = wf.period_length_ns / 2; + + ret = pwm_round_waveform_might_sleep(st->pwm, &wf); + if (ret < 0) + return ret; + + periodic->frequency_hz = DIV_ROUND_UP_ULL(NSEC_PER_SEC, wf.period_length_ns); + + return 0; +} + +static int spi_offload_trigger_pwm_enable(struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger); + struct spi_offload_trigger_periodic *periodic = &config->periodic; + struct pwm_waveform wf = { }; + + if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC) + return -EINVAL; + + if (!periodic->frequency_hz) + return -EINVAL; + + wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz); + /* REVISIT: 50% duty-cycle for now - may add config parameter later */ + wf.duty_length_ns = wf.period_length_ns / 2; + + return pwm_set_waveform_might_sleep(st->pwm, &wf, false); +} + +static void spi_offload_trigger_pwm_disable(struct spi_offload_trigger *trigger) +{ + struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger); + struct pwm_waveform wf; + int ret; + + ret = pwm_get_waveform_might_sleep(st->pwm, &wf); + if (ret < 0) { + dev_err(st->dev, "failed to get waveform: %d\n", ret); + return; + } + + wf.duty_length_ns = 0; + + ret = pwm_set_waveform_might_sleep(st->pwm, &wf, false); + if (ret < 0) + dev_err(st->dev, "failed to disable PWM: %d\n", ret); +} + +static const struct spi_offload_trigger_ops spi_offload_trigger_pwm_ops = { + .match = spi_offload_trigger_pwm_match, + .validate = spi_offload_trigger_pwm_validate, + .enable = spi_offload_trigger_pwm_enable, + .disable = spi_offload_trigger_pwm_disable, +}; + +static void spi_offload_trigger_pwm_release(void *data) +{ + pwm_disable(data); +} + +static int spi_offload_trigger_pwm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_offload_trigger_info info = { + .fwnode = dev_fwnode(dev), + .ops = &spi_offload_trigger_pwm_ops, + }; + struct spi_offload_trigger_pwm_state *st; + struct pwm_state state; + int ret; + + st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + info.priv = st; + st->dev = dev; + + st->pwm = devm_pwm_get(dev, NULL); + if (IS_ERR(st->pwm)) + return dev_err_probe(dev, PTR_ERR(st->pwm), "failed to get PWM\n"); + + /* init with duty_cycle = 0, output enabled to ensure trigger off */ + pwm_init_state(st->pwm, &state); + state.enabled = true; + + ret = pwm_apply_might_sleep(st->pwm, &state); + if (ret < 0) + return dev_err_probe(dev, ret, "failed to apply PWM state\n"); + + ret = devm_add_action_or_reset(dev, spi_offload_trigger_pwm_release, st->pwm); + if (ret) + return ret; + + return devm_spi_offload_trigger_register(dev, &info); +} + +static const struct of_device_id spi_offload_trigger_pwm_of_match_table[] = { + { .compatible = "pwm-trigger" }, + { } +}; +MODULE_DEVICE_TABLE(of, spi_offload_trigger_pwm_of_match_table); + +static struct platform_driver spi_offload_trigger_pwm_driver = { + .driver = { + .name = "pwm-trigger", + .of_match_table = spi_offload_trigger_pwm_of_match_table, + }, + .probe = spi_offload_trigger_pwm_probe, +}; +module_platform_driver(spi_offload_trigger_pwm_driver); + +MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>"); +MODULE_DESCRIPTION("Generic PWM trigger"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c new file mode 100644 index 000000000000..d336f4d228d5 --- /dev/null +++ b/drivers/spi/spi-offload.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024 Analog Devices Inc. + * Copyright (C) 2024 BayLibre, SAS + */ + +/* + * SPI Offloading support. + * + * Some SPI controllers support offloading of SPI transfers. Essentially, this + * is the ability for a SPI controller to perform SPI transfers with minimal + * or even no CPU intervention, e.g. via a specialized SPI controller with a + * hardware trigger or via a conventional SPI controller using a non-Linux MCU + * processor core to offload the work. + */ + +#define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD" + +#include <linux/cleanup.h> +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/export.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/property.h> +#include <linux/spi/offload/consumer.h> +#include <linux/spi/offload/provider.h> +#include <linux/spi/offload/types.h> +#include <linux/spi/spi.h> +#include <linux/types.h> + +struct spi_controller_and_offload { + struct spi_controller *controller; + struct spi_offload *offload; +}; + +struct spi_offload_trigger { + struct list_head list; + struct kref ref; + struct fwnode_handle *fwnode; + /* synchronizes calling ops and driver registration */ + struct mutex lock; + /* + * If the provider goes away while the consumer still has a reference, + * ops and priv will be set to NULL and all calls will fail with -ENODEV. + */ + const struct spi_offload_trigger_ops *ops; + void *priv; +}; + +static LIST_HEAD(spi_offload_triggers); +static DEFINE_MUTEX(spi_offload_triggers_lock); + +/** + * devm_spi_offload_alloc() - Allocate offload instance + * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev + * @priv_size: Size of private data to allocate + * + * Offload providers should use this to allocate offload instances. + * + * Return: Pointer to new offload instance or error on failure. + */ +struct spi_offload *devm_spi_offload_alloc(struct device *dev, + size_t priv_size) +{ + struct spi_offload *offload; + void *priv; + + offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL); + if (!offload) + return ERR_PTR(-ENOMEM); + + priv = devm_kzalloc(dev, priv_size, GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + offload->provider_dev = dev; + offload->priv = priv; + + return offload; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_alloc); + +static void spi_offload_put(void *data) +{ + struct spi_controller_and_offload *resource = data; + + resource->controller->put_offload(resource->offload); + kfree(resource); +} + +/** + * devm_spi_offload_get() - Get an offload instance + * @dev: Device for devm purposes + * @spi: SPI device to use for the transfers + * @config: Offload configuration + * + * Peripheral drivers call this function to get an offload instance that meets + * the requirements specified in @config. If no suitable offload instance is + * available, -ENODEV is returned. + * + * Return: Offload instance or error on failure. + */ +struct spi_offload *devm_spi_offload_get(struct device *dev, + struct spi_device *spi, + const struct spi_offload_config *config) +{ + struct spi_controller_and_offload *resource; + struct spi_offload *offload; + int ret; + + if (!spi || !config) + return ERR_PTR(-EINVAL); + + if (!spi->controller->get_offload) + return ERR_PTR(-ENODEV); + + resource = kzalloc(sizeof(*resource), GFP_KERNEL); + if (!resource) + return ERR_PTR(-ENOMEM); + + offload = spi->controller->get_offload(spi, config); + if (IS_ERR(offload)) { + kfree(resource); + return offload; + } + + resource->controller = spi->controller; + resource->offload = offload; + + ret = devm_add_action_or_reset(dev, spi_offload_put, resource); + if (ret) + return ERR_PTR(ret); + + return offload; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_get); + +static void spi_offload_trigger_free(struct kref *ref) +{ + struct spi_offload_trigger *trigger = + container_of(ref, struct spi_offload_trigger, ref); + + mutex_destroy(&trigger->lock); + fwnode_handle_put(trigger->fwnode); + kfree(trigger); +} + +static void spi_offload_trigger_put(void *data) +{ + struct spi_offload_trigger *trigger = data; + + scoped_guard(mutex, &trigger->lock) + if (trigger->ops && trigger->ops->release) + trigger->ops->release(trigger); + + kref_put(&trigger->ref, spi_offload_trigger_free); +} + +static struct spi_offload_trigger +*spi_offload_trigger_get(enum spi_offload_trigger_type type, + struct fwnode_reference_args *args) +{ + struct spi_offload_trigger *trigger; + bool match = false; + int ret; + + guard(mutex)(&spi_offload_triggers_lock); + + list_for_each_entry(trigger, &spi_offload_triggers, list) { + if (trigger->fwnode != args->fwnode) + continue; + + match = trigger->ops->match(trigger, type, args->args, args->nargs); + if (match) + break; + } + + if (!match) + return ERR_PTR(-EPROBE_DEFER); + + guard(mutex)(&trigger->lock); + + if (trigger->ops->request) { + ret = trigger->ops->request(trigger, type, args->args, args->nargs); + if (ret) + return ERR_PTR(ret); + } + + kref_get(&trigger->ref); + + return trigger; +} + +/** + * devm_spi_offload_trigger_get() - Get an offload trigger instance + * @dev: Device for devm purposes. + * @offload: Offload instance connected to a trigger. + * @type: Trigger type to get. + * + * Return: Offload trigger instance or error on failure. + */ +struct spi_offload_trigger +*devm_spi_offload_trigger_get(struct device *dev, + struct spi_offload *offload, + enum spi_offload_trigger_type type) +{ + struct spi_offload_trigger *trigger; + struct fwnode_reference_args args; + int ret; + + ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev), + "trigger-sources", + "#trigger-source-cells", 0, 0, + &args); + if (ret) + return ERR_PTR(ret); + + trigger = spi_offload_trigger_get(type, &args); + fwnode_handle_put(args.fwnode); + if (IS_ERR(trigger)) + return trigger; + + ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger); + if (ret) + return ERR_PTR(ret); + + return trigger; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get); + +/** + * spi_offload_trigger_validate - Validate the requested trigger + * @trigger: Offload trigger instance + * @config: Trigger config to validate + * + * On success, @config may be modifed to reflect what the hardware can do. + * For example, the frequency of a periodic trigger may be adjusted to the + * nearest supported value. + * + * Callers will likely need to do additional validation of the modified trigger + * parameters. + * + * Return: 0 on success, negative error code on failure. + */ +int spi_offload_trigger_validate(struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + guard(mutex)(&trigger->lock); + + if (!trigger->ops) + return -ENODEV; + + if (!trigger->ops->validate) + return -EOPNOTSUPP; + + return trigger->ops->validate(trigger, config); +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_validate); + +/** + * spi_offload_trigger_enable - enables trigger for offload + * @offload: Offload instance + * @trigger: Offload trigger instance + * @config: Trigger config to validate + * + * There must be a prepared offload instance with the specified ID (i.e. + * spi_optimize_message() was called with the same offload assigned to the + * message). This will also reserve the bus for exclusive use by the offload + * instance until the trigger is disabled. Any other attempts to send a + * transfer or lock the bus will fail with -EBUSY during this time. + * + * Calls must be balanced with spi_offload_trigger_disable(). + * + * Context: can sleep + * Return: 0 on success, else a negative error code. + */ +int spi_offload_trigger_enable(struct spi_offload *offload, + struct spi_offload_trigger *trigger, + struct spi_offload_trigger_config *config) +{ + int ret; + + guard(mutex)(&trigger->lock); + + if (!trigger->ops) + return -ENODEV; + + if (offload->ops && offload->ops->trigger_enable) { + ret = offload->ops->trigger_enable(offload); + if (ret) + return ret; + } + + if (trigger->ops->enable) { + ret = trigger->ops->enable(trigger, config); + if (ret) { + if (offload->ops && offload->ops->trigger_disable) + offload->ops->trigger_disable(offload); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_enable); + +/** + * spi_offload_trigger_disable - disables hardware trigger for offload + * @offload: Offload instance + * @trigger: Offload trigger instance + * + * Disables the hardware trigger for the offload instance with the specified ID + * and releases the bus for use by other clients. + * + * Context: can sleep + */ +void spi_offload_trigger_disable(struct spi_offload *offload, + struct spi_offload_trigger *trigger) +{ + if (offload->ops && offload->ops->trigger_disable) + offload->ops->trigger_disable(offload); + + guard(mutex)(&trigger->lock); + + if (!trigger->ops) + return; + + if (trigger->ops->disable) + trigger->ops->disable(trigger); +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_disable); + +static void spi_offload_release_dma_chan(void *chan) +{ + dma_release_channel(chan); +} + +/** + * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream + * @dev: Device for devm purposes. + * @offload: Offload instance + * + * This is the DMA channel that will provide data to transfers that use the + * %SPI_OFFLOAD_XFER_TX_STREAM offload flag. + * + * Return: Pointer to DMA channel info, or negative error code + */ +struct dma_chan +*devm_spi_offload_tx_stream_request_dma_chan(struct device *dev, + struct spi_offload *offload) +{ + struct dma_chan *chan; + int ret; + + if (!offload->ops || !offload->ops->tx_stream_request_dma_chan) + return ERR_PTR(-EOPNOTSUPP); + + chan = offload->ops->tx_stream_request_dma_chan(offload); + if (IS_ERR(chan)) + return chan; + + ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan); + if (ret) + return ERR_PTR(ret); + + return chan; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan); + +/** + * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream + * @dev: Device for devm purposes. + * @offload: Offload instance + * + * This is the DMA channel that will receive data from transfers that use the + * %SPI_OFFLOAD_XFER_RX_STREAM offload flag. + * + * Return: Pointer to DMA channel info, or negative error code + */ +struct dma_chan +*devm_spi_offload_rx_stream_request_dma_chan(struct device *dev, + struct spi_offload *offload) +{ + struct dma_chan *chan; + int ret; + + if (!offload->ops || !offload->ops->rx_stream_request_dma_chan) + return ERR_PTR(-EOPNOTSUPP); + + chan = offload->ops->rx_stream_request_dma_chan(offload); + if (IS_ERR(chan)) + return chan; + + ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan); + if (ret) + return ERR_PTR(ret); + + return chan; +} +EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan); + +/* Triggers providers */ + +static void spi_offload_trigger_unregister(void *data) +{ + struct spi_offload_trigger *trigger = data; + + scoped_guard(mutex, &spi_offload_triggers_lock) + list_del(&trigger->list); + + scoped_guard(mutex, &trigger->lock) { + trigger->priv = NULL; + trigger->ops = NULL; + } + + kref_put(&trigger->ref, spi_offload_trigger_free); +} + +/** + * devm_spi_offload_trigger_register() - Allocate and register an offload trigger + * @dev: Device for devm purposes. + * @info: Provider-specific trigger info. + * + * Return: 0 on success, else a negative error code. + */ +int devm_spi_offload_trigger_register(struct device *dev, + struct spi_offload_trigger_info *info) +{ + struct spi_offload_trigger *trigger; + + if (!info->fwnode || !info->ops || !info->ops->match) + return -EINVAL; + + trigger = kzalloc(sizeof(*trigger), GFP_KERNEL); + if (!trigger) + return -ENOMEM; + + kref_init(&trigger->ref); + mutex_init(&trigger->lock); + trigger->fwnode = fwnode_handle_get(info->fwnode); + trigger->ops = info->ops; + trigger->priv = info->priv; + + scoped_guard(mutex, &spi_offload_triggers_lock) + list_add_tail(&trigger->list, &spi_offload_triggers); + + return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger); +} +EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register); + +/** + * spi_offload_trigger_get_priv() - Get the private data for the trigger + * + * @trigger: Offload trigger instance. + * + * Return: Private data for the trigger. + */ +void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger) +{ + return trigger->priv; +} +EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 29c616e2c408..70bb74b3bd9c 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -134,6 +134,7 @@ struct omap2_mcspi { size_t max_xfer_len; u32 ref_clk_hz; bool use_multi_mode; + bool last_msg_kept_cs; }; struct omap2_mcspi_cs { @@ -1269,6 +1270,10 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, * multi-mode is applicable. */ mcspi->use_multi_mode = true; + + if (mcspi->last_msg_kept_cs) + mcspi->use_multi_mode = false; + list_for_each_entry(tr, &msg->transfers, transfer_list) { if (!tr->bits_per_word) bits_per_word = msg->spi->bits_per_word; @@ -1287,18 +1292,19 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr, mcspi->use_multi_mode = false; } - /* Check if transfer asks to change the CS status after the transfer */ - if (!tr->cs_change) - mcspi->use_multi_mode = false; - - /* - * If at least one message is not compatible, switch back to single mode - * - * The bits_per_word of certain transfer can be different, but it will have no - * impact on the signal itself. - */ - if (!mcspi->use_multi_mode) - break; + if (list_is_last(&tr->transfer_list, &msg->transfers)) { + /* Check if transfer asks to keep the CS status after the whole message */ + if (tr->cs_change) { + mcspi->use_multi_mode = false; + mcspi->last_msg_kept_cs = true; + } else { + mcspi->last_msg_kept_cs = false; + } + } else { + /* Check if transfer asks to change the CS status after the transfer */ + if (!tr->cs_change) + mcspi->use_multi_mode = false; + } } omap2_mcspi_set_mode(ctlr); diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c index fc98979eba48..e27642c4dea4 100644 --- a/drivers/spi/spi-pci1xxxx.c +++ b/drivers/spi/spi-pci1xxxx.c @@ -685,6 +685,17 @@ static irqreturn_t pci1xxxx_spi_isr(int irq, void *dev) return pci1xxxx_spi_isr_io(irq, dev); } +static irqreturn_t pci1xxxx_spi_shared_isr(int irq, void *dev) +{ + struct pci1xxxx_spi *par = dev; + u8 i = 0; + + for (i = 0; i < par->total_hw_instances; i++) + pci1xxxx_spi_isr(irq, par->spi_int[i]); + + return IRQ_HANDLED; +} + static bool pci1xxxx_spi_can_dma(struct spi_controller *host, struct spi_device *spi, struct spi_transfer *xfer) @@ -702,6 +713,7 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * struct device *dev = &pdev->dev; struct pci1xxxx_spi *spi_bus; struct spi_controller *spi_host; + int num_vector = 0; u32 regval; int ret; @@ -741,21 +753,19 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * if (ret) return -ENOMEM; - ret = pci_request_regions(pdev, DRV_NAME); + ret = pcim_request_all_regions(pdev, DRV_NAME); if (ret) return -ENOMEM; spi_bus->reg_base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); - if (!spi_bus->reg_base) { - ret = -EINVAL; - goto error; - } + if (!spi_bus->reg_base) + return -EINVAL; - ret = pci_alloc_irq_vectors(pdev, hw_inst_cnt, hw_inst_cnt, - PCI_IRQ_ALL_TYPES); - if (ret < 0) { + num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt, + PCI_IRQ_INTX | PCI_IRQ_MSI); + if (num_vector < 0) { dev_err(&pdev->dev, "Error allocating MSI vectors\n"); - goto error; + return num_vector; } init_completion(&spi_sub_ptr->spi_xfer_done); @@ -767,19 +777,24 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); spi_sub_ptr->irq = pci_irq_vector(pdev, 0); - ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, - pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, - pci_name(pdev), spi_sub_ptr); + if (num_vector >= hw_inst_cnt) + ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, + pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, + pci_name(pdev), spi_sub_ptr); + else + ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, + pci1xxxx_spi_shared_isr, + PCI1XXXX_IRQ_FLAGS | IRQF_SHARED, + pci_name(pdev), spi_bus); if (ret < 0) { dev_err(&pdev->dev, "Unable to request irq : %d", spi_sub_ptr->irq); - ret = -ENODEV; - goto error; + return -ENODEV; } ret = pci1xxxx_spi_dma_init(spi_bus, spi_sub_ptr->irq); if (ret && ret != -EOPNOTSUPP) - goto error; + return ret; /* This register is only applicable for 1st instance */ regval = readl(spi_bus->reg_base + SPI_PCI_CTRL_REG_OFFSET(0)); @@ -801,15 +816,16 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * regval &= ~SPI_INTR; writel(regval, spi_bus->reg_base + SPI_MST_EVENT_MASK_REG_OFFSET(spi_sub_ptr->hw_inst)); - spi_sub_ptr->irq = pci_irq_vector(pdev, iter); - ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, - pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, - pci_name(pdev), spi_sub_ptr); - if (ret < 0) { - dev_err(&pdev->dev, "Unable to request irq : %d", - spi_sub_ptr->irq); - ret = -ENODEV; - goto error; + if (num_vector >= hw_inst_cnt) { + spi_sub_ptr->irq = pci_irq_vector(pdev, iter); + ret = devm_request_irq(&pdev->dev, spi_sub_ptr->irq, + pci1xxxx_spi_isr, PCI1XXXX_IRQ_FLAGS, + pci_name(pdev), spi_sub_ptr); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to request irq : %d", + spi_sub_ptr->irq); + return -ENODEV; + } } } @@ -828,15 +844,11 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id * spi_controller_set_devdata(spi_host, spi_sub_ptr); ret = devm_spi_register_controller(dev, spi_host); if (ret) - goto error; + return ret; } pci_set_drvdata(pdev, spi_bus); return 0; - -error: - pci_release_regions(pdev); - return ret; } static void store_restore_config(struct pci1xxxx_spi *spi_ptr, diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c new file mode 100644 index 000000000000..3b757e3d00c0 --- /dev/null +++ b/drivers/spi/spi-qpic-snand.c @@ -0,0 +1,1675 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * + * Authors: + * Md Sadre Alam <quic_mdalam@quicinc.com> + * Sricharan R <quic_srichara@quicinc.com> + * Varadarajan Narayanan <quic_varada@quicinc.com> + */ +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dma/qcom_adm.h> +#include <linux/dma/qcom_bam_dma.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mtd/nand-qpic-common.h> +#include <linux/mtd/spinand.h> +#include <linux/bitfield.h> + +#define NAND_FLASH_SPI_CFG 0xc0 +#define NAND_NUM_ADDR_CYCLES 0xc4 +#define NAND_BUSY_CHECK_WAIT_CNT 0xc8 +#define NAND_FLASH_FEATURES 0xf64 + +/* QSPI NAND config reg bits */ +#define LOAD_CLK_CNTR_INIT_EN BIT(28) +#define CLK_CNTR_INIT_VAL_VEC 0x924 +#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16) +#define FEA_STATUS_DEV_ADDR 0xc0 +#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8) +#define SPI_CFG BIT(0) +#define SPI_NUM_ADDR 0xDA4DB +#define SPI_WAIT_CNT 0x10 +#define QPIC_QSPI_NUM_CS 1 +#define SPI_TRANSFER_MODE_x1 BIT(29) +#define SPI_TRANSFER_MODE_x4 (3 << 29) +#define SPI_WP BIT(28) +#define SPI_HOLD BIT(27) +#define QPIC_SET_FEATURE BIT(31) + +#define SPINAND_RESET 0xff +#define SPINAND_READID 0x9f +#define SPINAND_GET_FEATURE 0x0f +#define SPINAND_SET_FEATURE 0x1f +#define SPINAND_READ 0x13 +#define SPINAND_ERASE 0xd8 +#define SPINAND_WRITE_EN 0x06 +#define SPINAND_PROGRAM_EXECUTE 0x10 +#define SPINAND_PROGRAM_LOAD 0x84 + +#define ACC_FEATURE 0xe +#define BAD_BLOCK_MARKER_SIZE 0x2 +#define OOB_BUF_SIZE 128 +#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng) + +struct qpic_snand_op { + u32 cmd_reg; + u32 addr1_reg; + u32 addr2_reg; +}; + +struct snandc_read_status { + __le32 snandc_flash; + __le32 snandc_buffer; + __le32 snandc_erased_cw; +}; + +/* + * ECC state struct + * @corrected: ECC corrected + * @bitflips: Max bit flip + * @failed: ECC failed + */ +struct qcom_ecc_stats { + u32 corrected; + u32 bitflips; + u32 failed; +}; + +struct qpic_ecc { + struct device *dev; + int ecc_bytes_hw; + int spare_bytes; + int bbm_size; + int ecc_mode; + int bytes; + int steps; + int step_size; + int strength; + int cw_size; + int cw_data; + u32 cfg0; + u32 cfg1; + u32 cfg0_raw; + u32 cfg1_raw; + u32 ecc_buf_cfg; + u32 ecc_bch_cfg; + u32 clrflashstatus; + u32 clrreadstatus; + bool bch_enabled; +}; + +struct qpic_spi_nand { + struct qcom_nand_controller *snandc; + struct spi_controller *ctlr; + struct mtd_info *mtd; + struct clk *iomacro_clk; + struct qpic_ecc *ecc; + struct qcom_ecc_stats ecc_stats; + struct nand_ecc_engine ecc_eng; + u8 *data_buf; + u8 *oob_buf; + __le32 addr1; + __le32 addr2; + __le32 cmd; + u32 num_cw; + bool oob_rw; + bool page_rw; + bool raw_rw; +}; + +static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc, + int reg, int cw_offset, int read_size, + int is_last_read_loc) +{ + __le32 locreg_val; + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); + + locreg_val = cpu_to_le32(val); + + if (reg == NAND_READ_LOCATION_0) + snandc->regs->read_location0 = locreg_val; + else if (reg == NAND_READ_LOCATION_1) + snandc->regs->read_location1 = locreg_val; + else if (reg == NAND_READ_LOCATION_2) + snandc->regs->read_location2 = locreg_val; + else if (reg == NAND_READ_LOCATION_3) + snandc->regs->read_location3 = locreg_val; +} + +static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc, + int reg, int cw_offset, int read_size, + int is_last_read_loc) +{ + __le32 locreg_val; + u32 val = FIELD_PREP(READ_LOCATION_OFFSET_MASK, cw_offset) | + FIELD_PREP(READ_LOCATION_SIZE_MASK, read_size) | + FIELD_PREP(READ_LOCATION_LAST_MASK, is_last_read_loc); + + locreg_val = cpu_to_le32(val); + + if (reg == NAND_READ_LOCATION_LAST_CW_0) + snandc->regs->read_location_last0 = locreg_val; + else if (reg == NAND_READ_LOCATION_LAST_CW_1) + snandc->regs->read_location_last1 = locreg_val; + else if (reg == NAND_READ_LOCATION_LAST_CW_2) + snandc->regs->read_location_last2 = locreg_val; + else if (reg == NAND_READ_LOCATION_LAST_CW_3) + snandc->regs->read_location_last3 = locreg_val; +} + +static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand) +{ + struct nand_ecc_engine *eng = nand->ecc.engine; + struct qpic_spi_nand *qspi = ecceng_to_qspi(eng); + + return qspi->snandc; +} + +static int qcom_spi_init(struct qcom_nand_controller *snandc) +{ + u32 snand_cfg_val = 0x0; + int ret; + + snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) | + FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) | + FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) | + FIELD_PREP(SPI_CFG, 0); + + snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); + snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR); + snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT); + + qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); + + snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN; + snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); + + qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); + + qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1, + NAND_BAM_NEXT_SGL); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure in submitting spi init descriptor\n"); + return ret; + } + + return ret; +} + +static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct qpic_ecc *qecc = snandc->qspi->ecc; + + if (section > 1) + return -ERANGE; + + oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes; + oobregion->offset = mtd->oobsize - oobregion->length; + + return 0; +} + +static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_device *nand = mtd_to_nanddev(mtd); + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct qpic_ecc *qecc = snandc->qspi->ecc; + + if (section) + return -ERANGE; + + oobregion->length = qecc->steps * 4; + oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size; + + return 0; +} + +static const struct mtd_ooblayout_ops qcom_spi_ooblayout = { + .ecc = qcom_spi_ooblayout_ecc, + .free = qcom_spi_ooblayout_free, +}; + +static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) +{ + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct nand_ecc_props *reqs = &nand->ecc.requirements; + struct nand_ecc_props *user = &nand->ecc.user_conf; + struct nand_ecc_props *conf = &nand->ecc.ctx.conf; + struct mtd_info *mtd = nanddev_to_mtd(nand); + int cwperpage, bad_block_byte, ret; + struct qpic_ecc *ecc_cfg; + + cwperpage = mtd->writesize / NANDC_STEP_SIZE; + snandc->qspi->num_cw = cwperpage; + + ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); + if (!ecc_cfg) + return -ENOMEM; + + if (user->step_size && user->strength) { + ecc_cfg->step_size = user->step_size; + ecc_cfg->strength = user->strength; + } else if (reqs->step_size && reqs->strength) { + ecc_cfg->step_size = reqs->step_size; + ecc_cfg->strength = reqs->strength; + } else { + /* use defaults */ + ecc_cfg->step_size = NANDC_STEP_SIZE; + ecc_cfg->strength = 4; + } + + if (ecc_cfg->step_size != NANDC_STEP_SIZE) { + dev_err(snandc->dev, + "only %u bytes ECC step size is supported\n", + NANDC_STEP_SIZE); + ret = -EOPNOTSUPP; + goto err_free_ecc_cfg; + } + + if (ecc_cfg->strength != 4) { + dev_err(snandc->dev, + "only 4 bits ECC strength is supported\n"); + ret = -EOPNOTSUPP; + goto err_free_ecc_cfg; + } + + snandc->qspi->oob_buf = kmalloc(mtd->writesize + mtd->oobsize, + GFP_KERNEL); + if (!snandc->qspi->oob_buf) { + ret = -ENOMEM; + goto err_free_ecc_cfg; + } + + memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize); + + nand->ecc.ctx.priv = ecc_cfg; + snandc->qspi->mtd = mtd; + + ecc_cfg->ecc_bytes_hw = 7; + ecc_cfg->spare_bytes = 4; + ecc_cfg->bbm_size = 1; + ecc_cfg->bch_enabled = true; + ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size; + + ecc_cfg->steps = cwperpage; + ecc_cfg->cw_data = 516; + ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes; + bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1; + + mtd_set_ooblayout(mtd, &qcom_spi_ooblayout); + + /* + * Free the temporary BAM transaction allocated initially by + * qcom_nandc_alloc(), and allocate a new one based on the + * updated max_cwperpage value. + */ + qcom_free_bam_transaction(snandc); + + snandc->max_cwperpage = cwperpage; + + snandc->bam_txn = qcom_alloc_bam_transaction(snandc); + if (!snandc->bam_txn) { + dev_err(snandc->dev, "failed to allocate BAM transaction\n"); + ret = -ENOMEM; + goto err_free_ecc_cfg; + } + + ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | + FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) | + FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) | + FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | + FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) | + FIELD_PREP(STATUS_BFR_READ, 0) | + FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) | + FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes); + + ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | + FIELD_PREP(CS_ACTIVE_BSY, 0) | + FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) | + FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) | + FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | + FIELD_PREP(WIDE_FLASH, 0) | + FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled); + + ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | + FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | + FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) | + FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0); + + ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | + FIELD_PREP(CS_ACTIVE_BSY, 0) | + FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | + FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | + FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | + FIELD_PREP(WIDE_FLASH, 0) | + FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1); + + ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) | + FIELD_PREP(ECC_SW_RESET, 0) | + FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) | + FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) | + FIELD_PREP(ECC_MODE_MASK, 0) | + FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw); + + ecc_cfg->ecc_buf_cfg = FIELD_PREP(NUM_STEPS_MASK, 0x203); + ecc_cfg->clrflashstatus = FS_READY_BSY_N; + ecc_cfg->clrreadstatus = 0xc0; + + conf->step_size = ecc_cfg->step_size; + conf->strength = ecc_cfg->strength; + + snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET); + snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET); + + dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n", + ecc_cfg->strength, ecc_cfg->step_size); + + return 0; + +err_free_ecc_cfg: + kfree(ecc_cfg); + return ret; +} + +static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand) +{ + struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); + + kfree(ecc_cfg); +} + +static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); + + snandc->qspi->ecc = ecc_cfg; + snandc->qspi->raw_rw = false; + snandc->qspi->oob_rw = false; + snandc->qspi->page_rw = false; + + if (req->datalen) + snandc->qspi->page_rw = true; + + if (req->ooblen) + snandc->qspi->oob_rw = true; + + if (req->mode == MTD_OPS_RAW) + snandc->qspi->raw_rw = true; + + return 0; +} + +static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand, + struct nand_page_io_req *req) +{ + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); + struct mtd_info *mtd = nanddev_to_mtd(nand); + + if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ) + return 0; + + if (snandc->qspi->ecc_stats.failed) + mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed; + else + mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected; + + if (snandc->qspi->ecc_stats.failed) + return -EBADMSG; + else + return snandc->qspi->ecc_stats.bitflips; +} + +static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = { + .init_ctx = qcom_spi_ecc_init_ctx_pipelined, + .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined, + .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined, + .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined, +}; + +/* helper to configure location register values */ +static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg, + int cw_offset, int read_size, int is_last_read_loc) +{ + int reg_base = NAND_READ_LOCATION_0; + int num_cw = snandc->qspi->num_cw; + + if (cw == (num_cw - 1)) + reg_base = NAND_READ_LOCATION_LAST_CW_0; + + reg_base += reg * 4; + + if (cw == (num_cw - 1)) + return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset, + read_size, is_last_read_loc); + else + return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset, + read_size, is_last_read_loc); +} + +static void +qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw) +{ + __le32 *reg = &snandc->regs->read_location0; + int num_cw = snandc->qspi->num_cw; + + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL); + if (cw == (num_cw - 1)) { + reg = &snandc->regs->read_location_last0; + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, + NAND_BAM_NEXT_SGL); + } + + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0); + qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1, + NAND_BAM_NEXT_SGL); +} + +static int qcom_spi_block_erase(struct qcom_nand_controller *snandc) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + int ret; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cfg0 = cpu_to_le32((ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, 0)); + snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw); + snandc->regs->exec = cpu_to_le32(1); + + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to erase block\n"); + return ret; + } + + return 0; +} + +static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc, + bool use_ecc, int cw) +{ + __le32 *reg = &snandc->regs->read_location0; + int num_cw = snandc->qspi->num_cw; + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + if (cw == (num_cw - 1)) { + reg = &snandc->regs->read_location_last0; + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL); + } + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0); +} + +static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt) +{ + int i; + + qcom_nandc_dev_to_mem(snandc, true); + + for (i = 0; i < cw_cnt; i++) { + u32 flash = le32_to_cpu(snandc->reg_read_buf[i]); + + if (flash & (FS_OP_ERR | FS_MPU_ERR)) + return -EIO; + } + + return 0; +} + +static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + struct mtd_info *mtd = snandc->qspi->mtd; + int size, ret = 0; + int col, bbpos; + u32 cfg0, cfg1, ecc_bch_cfg; + u32 num_cw = snandc->qspi->num_cw; + + qcom_clear_bam_transaction(snandc); + qcom_clear_read_regs(snandc); + + size = ecc_cfg->cw_size; + col = ecc_cfg->cw_size * (num_cw - 1); + + memset(snandc->data_buffer, 0xff, size); + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); + snandc->regs->addr1 = snandc->qspi->addr2; + + cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, 0); + cfg1 = ecc_cfg->cfg1_raw; + ecc_bch_cfg = ECC_CFG_ECC_DISABLE; + + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1); + + qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1); + + qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failed to read last cw\n"); + return ret; + } + + ret = qcom_spi_check_raw_flash_errors(snandc, 1); + if (ret) + return ret; + + bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); + + if (snandc->data_buffer[bbpos] == 0xff) + snandc->data_buffer[bbpos + 1] = 0xff; + if (snandc->data_buffer[bbpos] != 0xff) + snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos]; + + memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes); + + return ret; +} + +static int qcom_spi_check_error(struct qcom_nand_controller *snandc) +{ + struct snandc_read_status *buf; + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + int i, num_cw = snandc->qspi->num_cw; + bool flash_op_err = false, erased; + unsigned int max_bitflips = 0; + unsigned int uncorrectable_cws = 0; + + snandc->qspi->ecc_stats.failed = 0; + snandc->qspi->ecc_stats.corrected = 0; + + qcom_nandc_dev_to_mem(snandc, true); + buf = (struct snandc_read_status *)snandc->reg_read_buf; + + for (i = 0; i < num_cw; i++, buf++) { + u32 flash, buffer, erased_cw; + + flash = le32_to_cpu(buf->snandc_flash); + buffer = le32_to_cpu(buf->snandc_buffer); + erased_cw = le32_to_cpu(buf->snandc_erased_cw); + + if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) { + if (ecc_cfg->bch_enabled) + erased = (erased_cw & ERASED_CW) == ERASED_CW; + else + erased = false; + + if (!erased) + uncorrectable_cws |= BIT(i); + + } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) { + flash_op_err = true; + } else { + unsigned int stat; + + stat = buffer & BS_CORRECTABLE_ERR_MSK; + + /* + * The exact number of the corrected bits is + * unknown because the hardware only reports the + * number of the corrected bytes. + * + * Since we have no better solution at the moment, + * report that value as the number of bit errors + * despite that it is inaccurate in most cases. + */ + if (stat && stat != ecc_cfg->strength) + dev_warn_once(snandc->dev, + "Warning: due to hw limitation, the reported number of the corrected bits may be inaccurate\n"); + + snandc->qspi->ecc_stats.corrected += stat; + max_bitflips = max(max_bitflips, stat); + } + } + + if (flash_op_err) + return -EIO; + + if (!uncorrectable_cws) + snandc->qspi->ecc_stats.bitflips = max_bitflips; + else + snandc->qspi->ecc_stats.failed++; + + return 0; +} + +static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf, + u8 *oob_buf, int cw) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + struct mtd_info *mtd = snandc->qspi->mtd; + int data_size1, data_size2, oob_size1, oob_size2; + int ret, reg_off = FLASH_BUF_ACC, read_loc = 0; + int raw_cw = cw; + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; + int col; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + raw_cw = num_cw - 1; + + cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, 0); + cfg1 = ecc_cfg->cfg1_raw; + ecc_bch_cfg = ECC_CFG_ECC_DISABLE; + + col = ecc_cfg->cw_size * cw; + + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1); + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0); + + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); + oob_size1 = ecc_cfg->bbm_size; + + if (cw == (num_cw - 1)) { + data_size2 = NANDC_STEP_SIZE - data_size1 - + ((num_cw - 1) * 4); + oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size2 = ecc_cfg->cw_data - data_size1; + oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0); + read_loc += data_size1; + + qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0); + read_loc += oob_size1; + + qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0); + read_loc += data_size2; + + qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1); + + qcom_spi_config_cw_read(snandc, false, raw_cw); + + qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0); + reg_off += data_size1; + + qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0); + reg_off += oob_size1; + + qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0); + reg_off += data_size2; + + qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to read raw cw %d\n", cw); + return ret; + } + + return qcom_spi_check_raw_flash_errors(snandc, 1); +} + +static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *data_buf = NULL, *oob_buf = NULL; + int ret, cw; + u32 num_cw = snandc->qspi->num_cw; + + if (snandc->qspi->page_rw) + data_buf = op->data.buf.in; + + oob_buf = snandc->qspi->oob_buf; + memset(oob_buf, 0xff, OOB_BUF_SIZE); + + for (cw = 0; cw < num_cw; cw++) { + ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw); + if (ret) + return ret; + + if (data_buf) + data_buf += ecc_cfg->cw_data; + if (oob_buf) + oob_buf += ecc_cfg->bytes; + } + + return 0; +} + +static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *data_buf = NULL, *oob_buf = NULL; + int ret, i; + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; + + data_buf = op->data.buf.in; + oob_buf = snandc->qspi->oob_buf; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); + + qcom_clear_bam_transaction(snandc); + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + for (i = 0; i < num_cw; i++) { + int data_size, oob_size; + + if (i == (num_cw - 1)) { + data_size = 512 - ((num_cw - 1) << 2); + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size = ecc_cfg->cw_data; + oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + if (data_buf && oob_buf) { + qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0); + qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1); + } else if (data_buf) { + qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1); + } else { + qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); + } + + qcom_spi_config_cw_read(snandc, true, i); + + if (data_buf) + qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf, + data_size, 0); + if (oob_buf) { + int j; + + for (j = 0; j < ecc_cfg->bbm_size; j++) + *oob_buf++ = 0xff; + + qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size, 0); + } + + if (data_buf) + data_buf += data_size; + if (oob_buf) + oob_buf += oob_size; + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to read page\n"); + return ret; + } + + return qcom_spi_check_error(snandc); +} + +static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *oob_buf = NULL; + int ret, i; + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; + + oob_buf = op->data.buf.in; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); + + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, + NAND_ERASED_CW_DETECT_CFG, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, + NAND_ERASED_CW_DETECT_CFG, 1, + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); + + for (i = 0; i < num_cw; i++) { + int data_size, oob_size; + + if (i == (num_cw - 1)) { + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size = ecc_cfg->cw_data; + oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); + + qcom_spi_config_cw_read(snandc, true, i); + + if (oob_buf) { + int j; + + for (j = 0; j < ecc_cfg->bbm_size; j++) + *oob_buf++ = 0xff; + + qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size, 0); + } + + if (oob_buf) + oob_buf += oob_size; + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to read oob\n"); + return ret; + } + + return qcom_spi_check_error(snandc); +} + +static int qcom_spi_read_page(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + if (snandc->qspi->page_rw && snandc->qspi->raw_rw) + return qcom_spi_read_page_raw(snandc, op); + + if (snandc->qspi->page_rw) + return qcom_spi_read_page_ecc(snandc, op); + + if (snandc->qspi->oob_rw && snandc->qspi->raw_rw) + return qcom_spi_read_last_cw(snandc, op); + + if (snandc->qspi->oob_rw) + return qcom_spi_read_page_oob(snandc, op); + + return 0; +} + +static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc) +{ + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); + qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, + 1, NAND_BAM_NEXT_SGL); +} + +static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc) +{ + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); + + qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0); + qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1, + NAND_BAM_NEXT_SGL); +} + +static int qcom_spi_program_raw(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + struct mtd_info *mtd = snandc->qspi->mtd; + u8 *data_buf = NULL, *oob_buf = NULL; + int i, ret; + int num_cw = snandc->qspi->num_cw; + u32 cfg0, cfg1, ecc_bch_cfg; + + cfg0 = (ecc_cfg->cfg0_raw & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); + cfg1 = ecc_cfg->cfg1_raw; + ecc_bch_cfg = ECC_CFG_ECC_DISABLE; + + data_buf = snandc->qspi->data_buf; + + oob_buf = snandc->qspi->oob_buf; + memset(oob_buf, 0xff, OOB_BUF_SIZE); + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_config_page_write(snandc); + + for (i = 0; i < num_cw; i++) { + int data_size1, data_size2, oob_size1, oob_size2; + int reg_off = FLASH_BUF_ACC; + + data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); + oob_size1 = ecc_cfg->bbm_size; + + if (i == (num_cw - 1)) { + data_size2 = NANDC_STEP_SIZE - data_size1 - + ((num_cw - 1) << 2); + oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size2 = ecc_cfg->cw_data - data_size1; + oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; + } + + qcom_write_data_dma(snandc, reg_off, data_buf, data_size1, + NAND_BAM_NO_EOT); + reg_off += data_size1; + data_buf += data_size1; + + qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1, + NAND_BAM_NO_EOT); + oob_buf += oob_size1; + reg_off += oob_size1; + + qcom_write_data_dma(snandc, reg_off, data_buf, data_size2, + NAND_BAM_NO_EOT); + reg_off += data_size2; + data_buf += data_size2; + + qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0); + oob_buf += oob_size2; + + qcom_spi_config_cw_write(snandc); + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to write raw page\n"); + return ret; + } + + return 0; +} + +static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *data_buf = NULL, *oob_buf = NULL; + int i, ret; + int num_cw = snandc->qspi->num_cw; + u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; + + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; + + if (snandc->qspi->data_buf) + data_buf = snandc->qspi->data_buf; + + oob_buf = snandc->qspi->oob_buf; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->addr0 = snandc->qspi->addr1; + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); + snandc->regs->exec = cpu_to_le32(1); + + qcom_spi_config_page_write(snandc); + + for (i = 0; i < num_cw; i++) { + int data_size, oob_size; + + if (i == (num_cw - 1)) { + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + + ecc_cfg->spare_bytes; + } else { + data_size = ecc_cfg->cw_data; + oob_size = ecc_cfg->bytes; + } + + if (data_buf) + qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size, + i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0); + + if (i == (num_cw - 1)) { + if (oob_buf) { + oob_buf += ecc_cfg->bbm_size; + qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size, + oob_buf, oob_size, 0); + } + } + + qcom_spi_config_cw_write(snandc); + + if (data_buf) + data_buf += data_size; + if (oob_buf) + oob_buf += oob_size; + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to write page\n"); + return ret; + } + + return 0; +} + +static int qcom_spi_program_oob(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; + u8 *oob_buf = NULL; + int ret, col, data_size, oob_size; + int num_cw = snandc->qspi->num_cw; + u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; + + cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) | + FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1); + cfg1 = ecc_cfg->cfg1; + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; + ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; + + col = ecc_cfg->cw_size * (num_cw - 1); + + oob_buf = snandc->qspi->data_buf; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); + snandc->regs->addr1 = snandc->qspi->addr2; + snandc->regs->cmd = snandc->qspi->cmd; + snandc->regs->cfg0 = cpu_to_le32(cfg0); + snandc->regs->cfg1 = cpu_to_le32(cfg1); + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); + snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); + snandc->regs->exec = cpu_to_le32(1); + + /* calculate the data and oob size for the last codeword/step */ + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); + oob_size = snandc->qspi->mtd->oobavail; + + memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data); + /* override new oob content to last codeword */ + mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size, + oob_buf, 0, snandc->qspi->mtd->oobavail); + qcom_spi_config_page_write(snandc); + qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0); + qcom_spi_config_cw_write(snandc); + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure to write oob\n"); + return ret; + } + + return 0; +} + +static int qcom_spi_program_execute(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + if (snandc->qspi->page_rw && snandc->qspi->raw_rw) + return qcom_spi_program_raw(snandc, op); + + if (snandc->qspi->page_rw) + return qcom_spi_program_ecc(snandc, op); + + if (snandc->qspi->oob_rw) + return qcom_spi_program_oob(snandc, op); + + return 0; +} + +static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd) +{ + switch (opcode) { + case SPINAND_RESET: + *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE); + break; + case SPINAND_READID: + *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID); + break; + case SPINAND_GET_FEATURE: + *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE); + break; + case SPINAND_SET_FEATURE: + *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE | + QPIC_SET_FEATURE); + break; + case SPINAND_READ: + if (snandc->qspi->raw_rw) { + *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | + SPI_WP | SPI_HOLD | OP_PAGE_READ); + } else { + *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | + SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC); + } + + break; + case SPINAND_ERASE: + *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP | + SPI_HOLD | SPI_TRANSFER_MODE_x1; + break; + case SPINAND_WRITE_EN: + *cmd = SPINAND_WRITE_EN; + break; + case SPINAND_PROGRAM_EXECUTE: + *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | + SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE); + break; + case SPINAND_PROGRAM_LOAD: + *cmd = SPINAND_PROGRAM_LOAD; + break; + default: + dev_err(snandc->dev, "Opcode not supported: %u\n", opcode); + return -EOPNOTSUPP; + } + + return 0; +} + +static int qcom_spi_write_page(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + int ret; + u32 cmd; + + ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd); + if (ret < 0) + return ret; + + if (op->cmd.opcode == SPINAND_PROGRAM_LOAD) + snandc->qspi->data_buf = (u8 *)op->data.buf.out; + + return 0; +} + +static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, + const struct spi_mem_op *op) +{ + struct qpic_snand_op s_op = {}; + u32 cmd; + int ret, opcode; + + ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd); + if (ret < 0) + return ret; + + s_op.cmd_reg = cmd; + s_op.addr1_reg = op->addr.val; + s_op.addr2_reg = 0; + + opcode = op->cmd.opcode; + + switch (opcode) { + case SPINAND_WRITE_EN: + return 0; + case SPINAND_PROGRAM_EXECUTE: + s_op.addr1_reg = op->addr.val << 16; + s_op.addr2_reg = op->addr.val >> 16 & 0xff; + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->cmd = cpu_to_le32(cmd); + return qcom_spi_program_execute(snandc, op); + case SPINAND_READ: + s_op.addr1_reg = (op->addr.val << 16); + s_op.addr2_reg = op->addr.val >> 16 & 0xff; + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->cmd = cpu_to_le32(cmd); + return 0; + case SPINAND_ERASE: + s_op.addr2_reg = (op->addr.val >> 16) & 0xffff; + s_op.addr1_reg = op->addr.val; + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); + snandc->qspi->cmd = cpu_to_le32(cmd); + return qcom_spi_block_erase(snandc); + default: + break; + } + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + + snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg); + snandc->regs->exec = cpu_to_le32(1); + snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg); + snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg); + + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); + + ret = qcom_submit_descs(snandc); + if (ret) + dev_err(snandc->dev, "failure in submitting cmd descriptor\n"); + + return ret; +} + +static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) +{ + int ret, val, opcode; + bool copy = false, copy_ftr = false; + + ret = qcom_spi_send_cmdaddr(snandc, op); + if (ret) + return ret; + + snandc->buf_count = 0; + snandc->buf_start = 0; + qcom_clear_read_regs(snandc); + qcom_clear_bam_transaction(snandc); + opcode = op->cmd.opcode; + + switch (opcode) { + case SPINAND_READID: + snandc->buf_count = 4; + qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); + copy = true; + break; + case SPINAND_GET_FEATURE: + snandc->buf_count = 4; + qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); + copy_ftr = true; + break; + case SPINAND_SET_FEATURE: + snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out); + qcom_write_reg_dma(snandc, &snandc->regs->flash_feature, + NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); + break; + case SPINAND_PROGRAM_EXECUTE: + case SPINAND_WRITE_EN: + case SPINAND_RESET: + case SPINAND_ERASE: + case SPINAND_READ: + return 0; + default: + return -EOPNOTSUPP; + } + + ret = qcom_submit_descs(snandc); + if (ret) { + dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode); + return ret; + } + + if (copy) { + qcom_nandc_dev_to_mem(snandc, true); + memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count); + } + + if (copy_ftr) { + qcom_nandc_dev_to_mem(snandc, true); + val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf); + val >>= 8; + memcpy(op->data.buf.in, &val, snandc->buf_count); + } + + return 0; +} + +static bool qcom_spi_is_page_op(const struct spi_mem_op *op) +{ + if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4) + return false; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (op->addr.buswidth == 4 && op->data.buswidth == 4) + return true; + + if (op->addr.nbytes == 2 && op->addr.buswidth == 1) + return true; + + } else if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.buswidth == 4) + return true; + if (op->addr.nbytes == 2 && op->addr.buswidth == 1) + return true; + } + + return false; +} + +static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + if (!spi_mem_default_supports_op(mem, op)) + return false; + + if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) + return false; + + if (qcom_spi_is_page_op(op)) + return true; + + return ((!op->addr.nbytes || op->addr.buswidth == 1) && + (!op->dummy.nbytes || op->dummy.buswidth == 1) && + (!op->data.nbytes || op->data.buswidth == 1)); +} + +static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller); + + dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, + op->addr.val, op->addr.buswidth, op->addr.nbytes, + op->data.buswidth, op->data.nbytes); + + if (qcom_spi_is_page_op(op)) { + if (op->data.dir == SPI_MEM_DATA_IN) + return qcom_spi_read_page(snandc, op); + if (op->data.dir == SPI_MEM_DATA_OUT) + return qcom_spi_write_page(snandc, op); + } else { + return qcom_spi_io_op(snandc, op); + } + + return 0; +} + +static const struct spi_controller_mem_ops qcom_spi_mem_ops = { + .supports_op = qcom_spi_supports_op, + .exec_op = qcom_spi_exec_op, +}; + +static const struct spi_controller_mem_caps qcom_spi_mem_caps = { + .ecc = true, +}; + +static int qcom_spi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *ctlr; + struct qcom_nand_controller *snandc; + struct qpic_spi_nand *qspi; + struct qpic_ecc *ecc; + struct resource *res; + const void *dev_data; + int ret; + + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); + if (!ecc) + return -ENOMEM; + + qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); + if (!qspi) + return -ENOMEM; + + ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false); + if (!ctlr) + return -ENOMEM; + + platform_set_drvdata(pdev, ctlr); + + snandc = spi_controller_get_devdata(ctlr); + qspi->snandc = snandc; + + snandc->dev = dev; + snandc->qspi = qspi; + snandc->qspi->ctlr = ctlr; + snandc->qspi->ecc = ecc; + + dev_data = of_device_get_match_data(dev); + if (!dev_data) { + dev_err(&pdev->dev, "failed to get device data\n"); + return -ENODEV; + } + + snandc->props = dev_data; + snandc->dev = &pdev->dev; + + snandc->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(snandc->core_clk)) + return PTR_ERR(snandc->core_clk); + + snandc->aon_clk = devm_clk_get(dev, "aon"); + if (IS_ERR(snandc->aon_clk)) + return PTR_ERR(snandc->aon_clk); + + snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom"); + if (IS_ERR(snandc->qspi->iomacro_clk)) + return PTR_ERR(snandc->qspi->iomacro_clk); + + snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(snandc->base)) + return PTR_ERR(snandc->base); + + snandc->base_phys = res->start; + snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res), + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(dev, snandc->base_dma)) + return -ENXIO; + + ret = clk_prepare_enable(snandc->core_clk); + if (ret) + goto err_dis_core_clk; + + ret = clk_prepare_enable(snandc->aon_clk); + if (ret) + goto err_dis_aon_clk; + + ret = clk_prepare_enable(snandc->qspi->iomacro_clk); + if (ret) + goto err_dis_iom_clk; + + ret = qcom_nandc_alloc(snandc); + if (ret) + goto err_snand_alloc; + + ret = qcom_spi_init(snandc); + if (ret) + goto err_spi_init; + + /* setup ECC engine */ + snandc->qspi->ecc_eng.dev = &pdev->dev; + snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; + snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined; + snandc->qspi->ecc_eng.priv = snandc; + + ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng); + if (ret) { + dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret); + goto err_spi_init; + } + + ctlr->num_chipselect = QPIC_QSPI_NUM_CS; + ctlr->mem_ops = &qcom_spi_mem_ops; + ctlr->mem_caps = &qcom_spi_mem_caps; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL | + SPI_TX_QUAD | SPI_RX_QUAD; + + ret = spi_register_controller(ctlr); + if (ret) { + dev_err(&pdev->dev, "spi_register_controller failed.\n"); + goto err_spi_init; + } + + return 0; + +err_spi_init: + qcom_nandc_unalloc(snandc); +err_snand_alloc: + clk_disable_unprepare(snandc->qspi->iomacro_clk); +err_dis_iom_clk: + clk_disable_unprepare(snandc->aon_clk); +err_dis_aon_clk: + clk_disable_unprepare(snandc->core_clk); +err_dis_core_clk: + dma_unmap_resource(dev, res->start, resource_size(res), + DMA_BIDIRECTIONAL, 0); + return ret; +} + +static void qcom_spi_remove(struct platform_device *pdev) +{ + struct spi_controller *ctlr = platform_get_drvdata(pdev); + struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + spi_unregister_controller(ctlr); + + qcom_nandc_unalloc(snandc); + + clk_disable_unprepare(snandc->aon_clk); + clk_disable_unprepare(snandc->core_clk); + clk_disable_unprepare(snandc->qspi->iomacro_clk); + + dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res), + DMA_BIDIRECTIONAL, 0); +} + +static const struct qcom_nandc_props ipq9574_snandc_props = { + .dev_cmd_reg_start = 0x7000, + .bam_offset = 0x30000, + .supports_bam = true, +}; + +static const struct of_device_id qcom_snandc_of_match[] = { + { + .compatible = "qcom,ipq9574-snand", + .data = &ipq9574_snandc_props, + }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_snandc_of_match); + +static struct platform_driver qcom_spi_driver = { + .driver = { + .name = "qcom_snand", + .of_match_table = qcom_snandc_of_match, + }, + .probe = qcom_spi_probe, + .remove = qcom_spi_remove, +}; +module_platform_driver(qcom_spi_driver); + +MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores"); +MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/spi/spi-realtek-rtl-snand.c b/drivers/spi/spi-realtek-rtl-snand.c index cd0484041147..741cf2af3e91 100644 --- a/drivers/spi/spi-realtek-rtl-snand.c +++ b/drivers/spi/spi-realtek-rtl-snand.c @@ -364,7 +364,6 @@ static int rtl_snand_probe(struct platform_device *pdev) .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .cache_type = REGCACHE_NONE, }; int irq, ret; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 1bc012fce7cb..1a6381de6f33 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -547,7 +547,7 @@ static int rockchip_spi_config(struct rockchip_spi *rs, cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET; if (spi->mode & SPI_LSB_FIRST) cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET; - if (spi->mode & SPI_CS_HIGH) + if ((spi->mode & SPI_CS_HIGH) && !(spi_get_csgpiod(spi, 0))) cr0 |= BIT(spi_get_chipselect(spi, 0)) << CR0_SOI_OFFSET; if (xfer->rx_buf && xfer->tx_buf) diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index e0c66a24a3cb..627cffea5d5c 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -75,6 +75,19 @@ static bool rpcif_spi_mem_supports_op(struct spi_mem *mem, return true; } +static ssize_t xspi_spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, const void *buf) +{ + struct rpcif *rpc = spi_controller_get_devdata(desc->mem->spi->controller); + + if (offs + desc->info.offset + len > U32_MAX) + return -EINVAL; + + rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len); + + return xspi_dirmap_write(rpc->dev, offs, len, buf); +} + static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf) { @@ -103,7 +116,7 @@ static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc) if (!rpc->dirmap) return -EOPNOTSUPP; - if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) + if (!rpc->xspi && desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) return -EOPNOTSUPP; return 0; @@ -125,6 +138,7 @@ static const struct spi_controller_mem_ops rpcif_spi_mem_ops = { .exec_op = rpcif_spi_mem_exec_op, .dirmap_create = rpcif_spi_mem_dirmap_create, .dirmap_read = rpcif_spi_mem_dirmap_read, + .dirmap_write = xspi_spi_mem_dirmap_write, }; static int rpcif_spi_probe(struct platform_device *pdev) diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 389275dbc003..9c47f5741c5f 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -139,7 +139,9 @@ struct s3c64xx_spi_dma_data { * struct s3c64xx_spi_port_config - SPI Controller hardware info * @fifo_lvl_mask: [DEPRECATED] use @{rx, tx}_fifomask instead. * @rx_lvl_offset: [DEPRECATED] use @{rx,tx}_fifomask instead. - * @fifo_depth: depth of the FIFO. + * @fifo_depth: depth of the FIFOs. Used by compatibles where all the instances + * of the IP define the same FIFO depth. It has higher precedence + * than the FIFO depth specified via DT. * @rx_fifomask: SPI_STATUS.RX_FIFO_LVL mask. Shifted mask defining the field's * length and position. * @tx_fifomask: SPI_STATUS.TX_FIFO_LVL mask. Shifted mask defining the field's diff --git a/drivers/spi/spi-sg2044-nor.c b/drivers/spi/spi-sg2044-nor.c new file mode 100644 index 000000000000..a59aa3fc55d2 --- /dev/null +++ b/drivers/spi/spi-sg2044-nor.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * SG2044 SPI NOR controller driver + * + * Copyright (c) 2025 Longbin Li <looong.bin@gmail.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spi/spi-mem.h> + +/* Hardware register definitions */ +#define SPIFMC_CTRL 0x00 +#define SPIFMC_CTRL_CPHA BIT(12) +#define SPIFMC_CTRL_CPOL BIT(13) +#define SPIFMC_CTRL_HOLD_OL BIT(14) +#define SPIFMC_CTRL_WP_OL BIT(15) +#define SPIFMC_CTRL_LSBF BIT(20) +#define SPIFMC_CTRL_SRST BIT(21) +#define SPIFMC_CTRL_SCK_DIV_SHIFT 0 +#define SPIFMC_CTRL_FRAME_LEN_SHIFT 16 +#define SPIFMC_CTRL_SCK_DIV_MASK 0x7FF + +#define SPIFMC_CE_CTRL 0x04 +#define SPIFMC_CE_CTRL_CEMANUAL BIT(0) +#define SPIFMC_CE_CTRL_CEMANUAL_EN BIT(1) + +#define SPIFMC_DLY_CTRL 0x08 +#define SPIFMC_CTRL_FM_INTVL_MASK 0x000f +#define SPIFMC_CTRL_FM_INTVL BIT(0) +#define SPIFMC_CTRL_CET_MASK 0x0f00 +#define SPIFMC_CTRL_CET BIT(8) + +#define SPIFMC_DMMR 0x0c + +#define SPIFMC_TRAN_CSR 0x10 +#define SPIFMC_TRAN_CSR_TRAN_MODE_MASK GENMASK(1, 0) +#define SPIFMC_TRAN_CSR_TRAN_MODE_RX BIT(0) +#define SPIFMC_TRAN_CSR_TRAN_MODE_TX BIT(1) +#define SPIFMC_TRAN_CSR_FAST_MODE BIT(3) +#define SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT (0x00 << 4) +#define SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT (0x01 << 4) +#define SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT (0x02 << 4) +#define SPIFMC_TRAN_CSR_DMA_EN BIT(6) +#define SPIFMC_TRAN_CSR_MISO_LEVEL BIT(7) +#define SPIFMC_TRAN_CSR_ADDR_BYTES_MASK GENMASK(10, 8) +#define SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT 8 +#define SPIFMC_TRAN_CSR_WITH_CMD BIT(11) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK GENMASK(13, 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE (0x00 << 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_2_BYTE (0x01 << 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE (0x02 << 12) +#define SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE (0x03 << 12) +#define SPIFMC_TRAN_CSR_GO_BUSY BIT(15) +#define SPIFMC_TRAN_CSR_ADDR4B_SHIFT 20 +#define SPIFMC_TRAN_CSR_CMD4B_SHIFT 21 + +#define SPIFMC_TRAN_NUM 0x14 +#define SPIFMC_FIFO_PORT 0x18 +#define SPIFMC_FIFO_PT 0x20 + +#define SPIFMC_INT_STS 0x28 +#define SPIFMC_INT_TRAN_DONE BIT(0) +#define SPIFMC_INT_RD_FIFO BIT(2) +#define SPIFMC_INT_WR_FIFO BIT(3) +#define SPIFMC_INT_RX_FRAME BIT(4) +#define SPIFMC_INT_TX_FRAME BIT(5) + +#define SPIFMC_INT_EN 0x2c +#define SPIFMC_INT_TRAN_DONE_EN BIT(0) +#define SPIFMC_INT_RD_FIFO_EN BIT(2) +#define SPIFMC_INT_WR_FIFO_EN BIT(3) +#define SPIFMC_INT_RX_FRAME_EN BIT(4) +#define SPIFMC_INT_TX_FRAME_EN BIT(5) + +#define SPIFMC_OPT 0x030 +#define SPIFMC_OPT_DISABLE_FIFO_FLUSH BIT(1) + +#define SPIFMC_MAX_FIFO_DEPTH 8 + +#define SPIFMC_MAX_READ_SIZE 0x10000 + +struct sg2044_spifmc { + struct spi_controller *ctrl; + void __iomem *io_base; + struct device *dev; + struct mutex lock; + struct clk *clk; +}; + +static int sg2044_spifmc_wait_int(struct sg2044_spifmc *spifmc, u8 int_type) +{ + u32 stat; + + return readl_poll_timeout(spifmc->io_base + SPIFMC_INT_STS, stat, + (stat & int_type), 0, 1000000); +} + +static int sg2044_spifmc_wait_xfer_size(struct sg2044_spifmc *spifmc, + int xfer_size) +{ + u8 stat; + + return readl_poll_timeout(spifmc->io_base + SPIFMC_FIFO_PT, stat, + ((stat & 0xf) == xfer_size), 1, 1000000); +} + +static u32 sg2044_spifmc_init_reg(struct sg2044_spifmc *spifmc) +{ + u32 reg; + + reg = readl(spifmc->io_base + SPIFMC_TRAN_CSR); + reg &= ~(SPIFMC_TRAN_CSR_TRAN_MODE_MASK | + SPIFMC_TRAN_CSR_FAST_MODE | + SPIFMC_TRAN_CSR_BUS_WIDTH_2_BIT | + SPIFMC_TRAN_CSR_BUS_WIDTH_4_BIT | + SPIFMC_TRAN_CSR_DMA_EN | + SPIFMC_TRAN_CSR_ADDR_BYTES_MASK | + SPIFMC_TRAN_CSR_WITH_CMD | + SPIFMC_TRAN_CSR_FIFO_TRG_LVL_MASK); + + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + return reg; +} + +static ssize_t sg2044_spifmc_read_64k(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op, loff_t from, + size_t len, u_char *buf) +{ + int xfer_size, offset; + u32 reg; + int ret; + int i; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT; + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = op->addr.nbytes - 1; i >= 0; i--) + writeb((from >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < op->dummy.nbytes; i++) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + writel(len, spifmc->io_base + SPIFMC_TRAN_NUM); + writel(0, spifmc->io_base + SPIFMC_INT_STS); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_RD_FIFO); + if (ret < 0) + return ret; + + offset = 0; + while (offset < len) { + xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, len - offset); + + ret = sg2044_spifmc_wait_xfer_size(spifmc, xfer_size); + if (ret < 0) + return ret; + + for (i = 0; i < xfer_size; i++) + buf[i + offset] = readb(spifmc->io_base + SPIFMC_FIFO_PORT); + + offset += xfer_size; + } + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return len; +} + +static ssize_t sg2044_spifmc_read(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + size_t xfer_size; + size_t offset; + loff_t from = op->addr.val; + size_t len = op->data.nbytes; + int ret; + u8 *din = op->data.buf.in; + + offset = 0; + while (offset < len) { + xfer_size = min_t(size_t, SPIFMC_MAX_READ_SIZE, len - offset); + + ret = sg2044_spifmc_read_64k(spifmc, op, from, xfer_size, din); + if (ret < 0) + return ret; + + offset += xfer_size; + din += xfer_size; + from += xfer_size; + } + + return 0; +} + +static ssize_t sg2044_spifmc_write(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + size_t xfer_size; + const u8 *dout = op->data.buf.out; + int i, offset; + int ret; + u32 reg; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT; + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_8_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = op->addr.nbytes - 1; i >= 0; i--) + writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < op->dummy.nbytes; i++) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + writel(0, spifmc->io_base + SPIFMC_INT_STS); + writel(op->data.nbytes, spifmc->io_base + SPIFMC_TRAN_NUM); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_xfer_size(spifmc, 0); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + offset = 0; + while (offset < op->data.nbytes) { + xfer_size = min_t(size_t, SPIFMC_MAX_FIFO_DEPTH, op->data.nbytes - offset); + + ret = sg2044_spifmc_wait_xfer_size(spifmc, 0); + if (ret < 0) + return ret; + + for (i = 0; i < xfer_size; i++) + writeb(dout[i + offset], spifmc->io_base + SPIFMC_FIFO_PORT); + + offset += xfer_size; + } + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return 0; +} + +static ssize_t sg2044_spifmc_tran_cmd(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + int i, ret; + u32 reg; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= (op->addr.nbytes + op->dummy.nbytes) << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT; + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = op->addr.nbytes - 1; i >= 0; i--) + writeb((op->addr.val >> i * 8) & 0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < op->dummy.nbytes; i++) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + + writel(0, spifmc->io_base + SPIFMC_INT_STS); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return 0; +} + +static void sg2044_spifmc_trans(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + if (op->data.dir == SPI_MEM_DATA_IN) + sg2044_spifmc_read(spifmc, op); + else if (op->data.dir == SPI_MEM_DATA_OUT) + sg2044_spifmc_write(spifmc, op); + else + sg2044_spifmc_tran_cmd(spifmc, op); +} + +static ssize_t sg2044_spifmc_trans_reg(struct sg2044_spifmc *spifmc, + const struct spi_mem_op *op) +{ + const u8 *dout = NULL; + u8 *din = NULL; + size_t len = op->data.nbytes; + int ret, i; + u32 reg; + + if (op->data.dir == SPI_MEM_DATA_IN) + din = op->data.buf.in; + else + dout = op->data.buf.out; + + reg = sg2044_spifmc_init_reg(spifmc); + reg |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_1_BYTE; + reg |= SPIFMC_TRAN_CSR_WITH_CMD; + + if (din) { + reg |= SPIFMC_TRAN_CSR_BUS_WIDTH_1_BIT; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX; + + writel(SPIFMC_OPT_DISABLE_FIFO_FLUSH, spifmc->io_base + SPIFMC_OPT); + } else { + /* + * If write values to the Status Register, + * configure TRAN_CSR register as the same as + * sg2044_spifmc_read_reg. + */ + if (op->cmd.opcode == 0x01) { + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_RX; + reg |= SPIFMC_TRAN_CSR_TRAN_MODE_TX; + writel(len, spifmc->io_base + SPIFMC_TRAN_NUM); + } + } + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + writeb(op->cmd.opcode, spifmc->io_base + SPIFMC_FIFO_PORT); + + for (i = 0; i < len; i++) { + if (din) + writeb(0xff, spifmc->io_base + SPIFMC_FIFO_PORT); + else + writeb(dout[i], spifmc->io_base + SPIFMC_FIFO_PORT); + } + + writel(0, spifmc->io_base + SPIFMC_INT_STS); + writel(len, spifmc->io_base + SPIFMC_TRAN_NUM); + reg |= SPIFMC_TRAN_CSR_GO_BUSY; + writel(reg, spifmc->io_base + SPIFMC_TRAN_CSR); + + ret = sg2044_spifmc_wait_int(spifmc, SPIFMC_INT_TRAN_DONE); + if (ret < 0) + return ret; + + if (din) { + while (len--) + *din++ = readb(spifmc->io_base + SPIFMC_FIFO_PORT); + } + + writel(0, spifmc->io_base + SPIFMC_FIFO_PT); + + return 0; +} + +static int sg2044_spifmc_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct sg2044_spifmc *spifmc; + + spifmc = spi_controller_get_devdata(mem->spi->controller); + + mutex_lock(&spifmc->lock); + + if (op->addr.nbytes == 0) + sg2044_spifmc_trans_reg(spifmc, op); + else + sg2044_spifmc_trans(spifmc, op); + + mutex_unlock(&spifmc->lock); + + return 0; +} + +static const struct spi_controller_mem_ops sg2044_spifmc_mem_ops = { + .exec_op = sg2044_spifmc_exec_op, +}; + +static void sg2044_spifmc_init(struct sg2044_spifmc *spifmc) +{ + u32 tran_csr; + u32 reg; + + writel(0, spifmc->io_base + SPIFMC_DMMR); + + reg = readl(spifmc->io_base + SPIFMC_CTRL); + reg |= SPIFMC_CTRL_SRST; + reg &= ~(SPIFMC_CTRL_SCK_DIV_MASK); + reg |= 1; + writel(reg, spifmc->io_base + SPIFMC_CTRL); + + writel(0, spifmc->io_base + SPIFMC_CE_CTRL); + + tran_csr = readl(spifmc->io_base + SPIFMC_TRAN_CSR); + tran_csr |= (0 << SPIFMC_TRAN_CSR_ADDR_BYTES_SHIFT); + tran_csr |= SPIFMC_TRAN_CSR_FIFO_TRG_LVL_4_BYTE; + tran_csr |= SPIFMC_TRAN_CSR_WITH_CMD; + writel(tran_csr, spifmc->io_base + SPIFMC_TRAN_CSR); +} + +static int sg2044_spifmc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *ctrl; + struct sg2044_spifmc *spifmc; + int ret; + + ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*spifmc)); + if (!ctrl) + return -ENOMEM; + + spifmc = spi_controller_get_devdata(ctrl); + + spifmc->clk = devm_clk_get_enabled(&pdev->dev, NULL); + if (IS_ERR(spifmc->clk)) + return dev_err_probe(dev, PTR_ERR(spifmc->clk), "Cannot get and enable AHB clock\n"); + + spifmc->dev = &pdev->dev; + spifmc->ctrl = ctrl; + + spifmc->io_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(spifmc->io_base)) + return PTR_ERR(spifmc->io_base); + + ctrl->num_chipselect = 1; + ctrl->dev.of_node = pdev->dev.of_node; + ctrl->bits_per_word_mask = SPI_BPW_MASK(8); + ctrl->auto_runtime_pm = false; + ctrl->mem_ops = &sg2044_spifmc_mem_ops; + ctrl->mode_bits = SPI_RX_DUAL | SPI_TX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD; + + ret = devm_mutex_init(dev, &spifmc->lock); + if (ret) + return ret; + + sg2044_spifmc_init(spifmc); + sg2044_spifmc_init_reg(spifmc); + + ret = devm_spi_register_controller(&pdev->dev, ctrl); + if (ret) + return dev_err_probe(dev, ret, "spi_register_controller failed\n"); + + return 0; +} + +static const struct of_device_id sg2044_spifmc_match[] = { + { .compatible = "sophgo,sg2044-spifmc-nor" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sg2044_spifmc_match); + +static struct platform_driver sg2044_nor_driver = { + .driver = { + .name = "sg2044,spifmc-nor", + .of_match_table = sg2044_spifmc_match, + }, + .probe = sg2044_spifmc_probe, +}; +module_platform_driver(sg2044_nor_driver); + +MODULE_DESCRIPTION("SG2044 SPI NOR controller driver"); +MODULE_AUTHOR("Longbin Li <looong.bin@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 8a98c313548e..94a867967e02 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -20,6 +20,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sh_dma.h> @@ -62,135 +63,6 @@ struct sh_msiof_spi_priv { #define MAX_SS 3 /* Maximum number of native chip selects */ -#define SITMDR1 0x00 /* Transmit Mode Register 1 */ -#define SITMDR2 0x04 /* Transmit Mode Register 2 */ -#define SITMDR3 0x08 /* Transmit Mode Register 3 */ -#define SIRMDR1 0x10 /* Receive Mode Register 1 */ -#define SIRMDR2 0x14 /* Receive Mode Register 2 */ -#define SIRMDR3 0x18 /* Receive Mode Register 3 */ -#define SITSCR 0x20 /* Transmit Clock Select Register */ -#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ -#define SICTR 0x28 /* Control Register */ -#define SIFCTR 0x30 /* FIFO Control Register */ -#define SISTR 0x40 /* Status Register */ -#define SIIER 0x44 /* Interrupt Enable Register */ -#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ -#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ -#define SITFDR 0x50 /* Transmit FIFO Data Register */ -#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ -#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ -#define SIRFDR 0x60 /* Receive FIFO Data Register */ - -/* SITMDR1 and SIRMDR1 */ -#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ -#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ -#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */ -#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */ -#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ -#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ -#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ -#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ -#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ -#define SIMDR1_FLD_SHIFT 2 -#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ -/* SITMDR1 */ -#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ -#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ -#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ - -/* SITMDR2 and SIRMDR2 */ -#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ -#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ -#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ - -/* SITSCR and SIRSCR */ -#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ -#define SISCR_BRPS(i) (((i) - 1) << 8) -#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ -#define SISCR_BRDV_DIV_2 0 -#define SISCR_BRDV_DIV_4 1 -#define SISCR_BRDV_DIV_8 2 -#define SISCR_BRDV_DIV_16 3 -#define SISCR_BRDV_DIV_32 4 -#define SISCR_BRDV_DIV_1 7 - -/* SICTR */ -#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ -#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ -#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ -#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ -#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ -#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ -#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ -#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ -#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ -#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */ -#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */ -#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ -#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ -#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ -#define SICTR_TXE BIT(9) /* Transmit Enable */ -#define SICTR_RXE BIT(8) /* Receive Enable */ -#define SICTR_TXRST BIT(1) /* Transmit Reset */ -#define SICTR_RXRST BIT(0) /* Receive Reset */ - -/* SIFCTR */ -#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */ -#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */ -#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */ -#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */ -#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */ -#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */ -#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */ -#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */ -#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ -#define SIFCTR_TFUA_SHIFT 20 -#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) -#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ -#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ -#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ -#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ -#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ -#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ -#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ -#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ -#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ -#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ -#define SIFCTR_RFUA_SHIFT 4 -#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT) - -/* SISTR */ -#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ -#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ -#define SISTR_TEOF BIT(23) /* Frame Transmission End */ -#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ -#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ -#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ -#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ -#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ -#define SISTR_REOF BIT(7) /* Frame Reception End */ -#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ -#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ -#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ - -/* SIIER */ -#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ -#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ -#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ -#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ -#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ -#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ -#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ -#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ -#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ -#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ -#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ -#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ -#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ -#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ - - static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) { switch (reg_offs) { @@ -255,11 +127,6 @@ static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p) 100); } -static const u32 sh_msiof_spi_div_array[] = { - SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4, - SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32, -}; - static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, struct spi_transfer *t) { @@ -298,7 +165,9 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, t->effective_speed_hz = parent_rate / (brps << div_pow); - scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps); + /* div_pow == 0 maps to SISCR_BRDV_DIV_1 == all ones */ + scr = FIELD_PREP(SISCR_BRDV, div_pow - 1) | + FIELD_PREP(SISCR_BRPS, brps - 1); sh_msiof_write(p, SITSCR, scr); if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, SIRSCR, scr); @@ -340,18 +209,19 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) return 0; } - val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT; - val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT; + val = FIELD_PREP(SIMDR1_DTDL, sh_msiof_get_delay_bit(p->info->dtdl)) | + FIELD_PREP(SIMDR1_SYNCDL, + sh_msiof_get_delay_bit(p->info->syncdl)); return val; } static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, - u32 cpol, u32 cpha, - u32 tx_hi_z, u32 lsb_first, u32 cs_high) + bool cpol, bool cpha, bool tx_hi_z, + bool lsb_first, bool cs_high) { + bool edge; u32 tmp; - int edge; /* * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG @@ -360,16 +230,18 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, * 1 0 11 11 0 0 * 1 1 11 11 1 1 */ - tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP; - tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT; - tmp |= lsb_first << SIMDR1_BITLSB_SHIFT; + tmp = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI) | + FIELD_PREP(SIMDR1_FLD, 1) | SIMDR1_XXSTP | + FIELD_PREP(SIMDR1_SYNCAC, !cs_high) | + FIELD_PREP(SIMDR1_BITLSB, lsb_first); tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); if (spi_controller_is_target(p->ctlr)) { sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON); } else { sh_msiof_write(p, SITMDR1, tmp | SIMDR1_TRMD | SITMDR1_PCON | - (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT); + FIELD_PREP(SITMDR1_SYNCCH, + ss < MAX_SS ? ss : 0)); } if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ @@ -378,30 +250,42 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, sh_msiof_write(p, SIRMDR1, tmp); tmp = 0; - tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT; - tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT; + tmp |= SICTR_TSCKIZ_SCK | FIELD_PREP(SICTR_TSCKIZ_POL, cpol); + tmp |= SICTR_RSCKIZ_SCK | FIELD_PREP(SICTR_RSCKIZ_POL, cpol); edge = cpol ^ !cpha; - tmp |= edge << SICTR_TEDG_SHIFT; - tmp |= edge << SICTR_REDG_SHIFT; - tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW; + tmp |= FIELD_PREP(SICTR_TEDG, edge); + tmp |= FIELD_PREP(SICTR_REDG, edge); + tmp |= FIELD_PREP(SICTR_TXDIZ, + tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW); sh_msiof_write(p, SICTR, tmp); } static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, const void *tx_buf, void *rx_buf, - u32 bits, u32 words) + u32 bits, u32 words1, u32 words2) { - u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words); + u32 dr2 = FIELD_PREP(SIMDR2_GRP, words2 ? 1 : 0) | + FIELD_PREP(SIMDR2_BITLEN1, bits - 1) | + FIELD_PREP(SIMDR2_WDLEN1, words1 - 1); if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, SITMDR2, dr2); else - sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1); + sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK); if (rx_buf) sh_msiof_write(p, SIRMDR2, dr2); + + if (words2) { + u32 dr3 = FIELD_PREP(SIMDR3_BITLEN2, bits - 1) | + FIELD_PREP(SIMDR3_WDLEN2, words2 - 1); + + sh_msiof_write(p, SITMDR3, dr3); + if (rx_buf) + sh_msiof_write(p, SIRMDR3, dr3); + } } static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) @@ -411,140 +295,154 @@ static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u8 *buf_8 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, buf_8[k] << fs); } static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u16 *buf_16 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, buf_16[k] << fs); } static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u16 *buf_16 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs); } static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, buf_32[k] << fs); } static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs); } static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, unsigned int words, + unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs)); } static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, - const void *tx_buf, int words, int fs) + const void *tx_buf, + unsigned int words, unsigned int fs) { const u32 *buf_32 = tx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs)); } static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u8 *buf_8 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u16 *buf_16 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u16 *buf_16 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]); } static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]); } static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs); } static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, - void *rx_buf, int words, int fs) + void *rx_buf, unsigned int words, + unsigned int fs) { u32 *buf_32 = rx_buf; - int k; + unsigned int k; for (k = 0; k < words; k++) put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]); @@ -564,12 +462,12 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; /* Configure native chip select mode/polarity early */ - clr = SIMDR1_SYNCMD_MASK; - set = SIMDR1_SYNCMD_SPI; + clr = SIMDR1_SYNCMD; + set = FIELD_PREP(SIMDR1_SYNCMD, SIMDR1_SYNCMD_SPI); if (spi->mode & SPI_CS_HIGH) - clr |= BIT(SIMDR1_SYNCAC_SHIFT); + clr |= SIMDR1_SYNCAC; else - set |= BIT(SIMDR1_SYNCAC_SHIFT); + set |= SIMDR1_SYNCAC; pm_runtime_get_sync(&p->pdev->dev); tmp = sh_msiof_read(p, SITMDR1) & ~clr; sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON); @@ -586,7 +484,8 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, { struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); const struct spi_device *spi = msg->spi; - u32 ss, cs_high; + bool cs_high; + u32 ss; /* Configure pins before asserting CS */ if (spi_get_csgpiod(spi, 0)) { @@ -594,12 +493,11 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, cs_high = p->native_cs_high; } else { ss = spi_get_chipselect(spi, 0); - cs_high = !!(spi->mode & SPI_CS_HIGH); + cs_high = spi->mode & SPI_CS_HIGH; } - sh_msiof_spi_set_pin_regs(p, ss, !!(spi->mode & SPI_CPOL), - !!(spi->mode & SPI_CPHA), - !!(spi->mode & SPI_3WIRE), - !!(spi->mode & SPI_LSB_FIRST), cs_high); + sh_msiof_spi_set_pin_regs(p, ss, spi->mode & SPI_CPOL, + spi->mode & SPI_CPHA, spi->mode & SPI_3WIRE, + spi->mode & SPI_LSB_FIRST, cs_high); return 0; } @@ -672,20 +570,22 @@ static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p, static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, void (*tx_fifo)(struct sh_msiof_spi_priv *, - const void *, int, int), + const void *, unsigned int, + unsigned int), void (*rx_fifo)(struct sh_msiof_spi_priv *, - void *, int, int), + void *, unsigned int, + unsigned int), const void *tx_buf, void *rx_buf, - int words, int bits) + unsigned int words, unsigned int bits) { - int fifo_shift; + unsigned int fifo_shift; int ret; /* limit maximum word transfer to rx/tx fifo size */ if (tx_buf) - words = min_t(int, words, p->tx_fifo_size); + words = min(words, p->tx_fifo_size); if (rx_buf) - words = min_t(int, words, p->rx_fifo_size); + words = min(words, p->rx_fifo_size); /* the fifo contents need shifting */ fifo_shift = 32 - bits; @@ -694,7 +594,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, sh_msiof_write(p, SIFCTR, 0); /* setup msiof transfer mode registers */ - sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); + sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words, 0); sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE); /* write tx fifo */ @@ -744,10 +644,12 @@ static void sh_msiof_dma_complete(void *arg) } static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, - void *rx, unsigned int len) + void *rx, unsigned int len, + unsigned int max_wdlen) { u32 ier_bits = 0; struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; + unsigned int words1, words2; dma_cookie_t cookie; int ret; @@ -789,10 +691,14 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } /* 1 stage FIFO watermarks for DMA */ - sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1); + sh_msiof_write(p, SIFCTR, + FIELD_PREP(SIFCTR_TFWM, SIFCTR_TFWM_1) | + FIELD_PREP(SIFCTR_RFWM, SIFCTR_RFWM_1)); /* setup msiof transfer mode registers (32-bit words) */ - sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); + words1 = min(len / 4, max_wdlen); + words2 = len / 4 - words1; + sh_msiof_spi_set_mode_regs(p, tx, rx, 32, words1, words2); sh_msiof_write(p, SIIER, ier_bits); @@ -911,9 +817,12 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, struct spi_transfer *t) { struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); + unsigned int max_wdlen = FIELD_MAX(SIMDR2_WDLEN1) + 1; void (*copy32)(u32 *, const u32 *, unsigned int); - void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); - void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); + void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, unsigned int, + unsigned int); + void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, unsigned int, + unsigned int); const void *tx_buf = t->tx_buf; void *rx_buf = t->rx_buf; unsigned int len = t->len; @@ -931,17 +840,17 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, if (!spi_controller_is_target(p->ctlr)) sh_msiof_spi_set_clk_regs(p, t); + if (tx_buf) + max_wdlen = min(max_wdlen, p->tx_fifo_size); + if (rx_buf) + max_wdlen = min(max_wdlen, p->rx_fifo_size); + while (ctlr->dma_tx && len > 15) { /* * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. */ - unsigned int l = 0; - - if (tx_buf) - l = min(round_down(len, 4), p->tx_fifo_size * 4); - if (rx_buf) - l = min(round_down(len, 4), p->rx_fifo_size * 4); + unsigned int l = min(round_down(len, 4), 2 * max_wdlen * 4); if (bits <= 8) { copy32 = copy_bswap32; @@ -954,7 +863,7 @@ static int sh_msiof_transfer_one(struct spi_controller *ctlr, if (tx_buf) copy32(p->tx_dma_page, tx_buf, l / 4); - ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l); + ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l, max_wdlen); if (ret == -EAGAIN) { dev_warn_once(&p->pdev->dev, "DMA not available, falling back to PIO\n"); @@ -1061,7 +970,7 @@ static const struct sh_msiof_chipdata rcar_gen2_data = { .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, - .rx_fifo_size = 64, + .rx_fifo_size = 128, .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 0, }; @@ -1070,7 +979,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = { .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, - .rx_fifo_size = 64, + .rx_fifo_size = 256, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, + .min_div_pow = 1, +}; + +static const struct sh_msiof_chipdata rcar_gen4_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), + .tx_fifo_size = 256, + .rx_fifo_size = 256, .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 1, }; @@ -1079,7 +997,7 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = { .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, - .rx_fifo_size = 64, + .rx_fifo_size = 256, .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 1, .flags = SH_MSIOF_FLAG_FIXED_DTDL_200, @@ -1087,20 +1005,14 @@ static const struct sh_msiof_chipdata rcar_r8a7795_data = { static const struct of_device_id sh_msiof_match[] __maybe_unused = { { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, - { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data }, - { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data }, { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data }, { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data }, - { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data }, { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data }, - { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data }, + { .compatible = "renesas,msiof-r8a779a0", .data = &rcar_gen3_data }, + { .compatible = "renesas,msiof-r8a779f0", .data = &rcar_gen3_data }, + { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen4_data }, { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */ - {}, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sh_msiof_match); @@ -1276,20 +1188,26 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) const struct sh_msiof_chipdata *chipdata; struct sh_msiof_spi_info *info; struct sh_msiof_spi_priv *p; + struct device *dev = &pdev->dev; unsigned long clksrc; int i; int ret; - chipdata = of_device_get_match_data(&pdev->dev); + /* Check whether MSIOF is used as I2S mode or SPI mode by checking "port" node */ + struct device_node *port __free(device_node) = of_graph_get_next_port(dev->of_node, NULL); + if (port) /* It was MSIOF-I2S */ + return -ENODEV; + + chipdata = of_device_get_match_data(dev); if (chipdata) { - info = sh_msiof_spi_parse_dt(&pdev->dev); + info = sh_msiof_spi_parse_dt(dev); } else { chipdata = (const void *)pdev->id_entry->driver_data; - info = dev_get_platdata(&pdev->dev); + info = dev_get_platdata(dev); } if (!info) { - dev_err(&pdev->dev, "failed to obtain device info\n"); + dev_err(dev, "failed to obtain device info\n"); return -ENXIO; } @@ -1297,11 +1215,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) info->dtdl = 200; if (info->mode == MSIOF_SPI_TARGET) - ctlr = spi_alloc_target(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); + ctlr = spi_alloc_target(dev, sizeof(struct sh_msiof_spi_priv)); else - ctlr = spi_alloc_host(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); + ctlr = spi_alloc_host(dev, sizeof(struct sh_msiof_spi_priv)); if (ctlr == NULL) return -ENOMEM; @@ -1315,9 +1231,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) init_completion(&p->done); init_completion(&p->done_txdma); - p->clk = devm_clk_get(&pdev->dev, NULL); + p->clk = devm_clk_get(dev, NULL); if (IS_ERR(p->clk)) { - dev_err(&pdev->dev, "cannot get clock\n"); + dev_err(dev, "cannot get clock\n"); ret = PTR_ERR(p->clk); goto err1; } @@ -1334,15 +1250,14 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) goto err1; } - ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0, - dev_name(&pdev->dev), p); + ret = devm_request_irq(dev, i, sh_msiof_spi_irq, 0, dev_name(dev), p); if (ret) { - dev_err(&pdev->dev, "unable to request irq\n"); + dev_err(dev, "unable to request irq\n"); goto err1; } p->pdev = pdev; - pm_runtime_enable(&pdev->dev); + pm_runtime_enable(dev); /* Platform data may override FIFO sizes */ p->tx_fifo_size = chipdata->tx_fifo_size; @@ -1361,7 +1276,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ctlr->flags = chipdata->ctlr_flags; ctlr->bus_num = pdev->id; ctlr->num_chipselect = p->info->num_chipselect; - ctlr->dev.of_node = pdev->dev.of_node; + ctlr->dev.of_node = dev->of_node; ctlr->setup = sh_msiof_spi_setup; ctlr->prepare_message = sh_msiof_prepare_message; ctlr->target_abort = sh_msiof_target_abort; @@ -1373,11 +1288,11 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ret = sh_msiof_request_dma(p); if (ret < 0) - dev_warn(&pdev->dev, "DMA not available, using PIO\n"); + dev_warn(dev, "DMA not available, using PIO\n"); - ret = devm_spi_register_controller(&pdev->dev, ctlr); + ret = devm_spi_register_controller(dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); + dev_err(dev, "devm_spi_register_controller error.\n"); goto err2; } @@ -1385,7 +1300,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) err2: sh_msiof_release_dma(p); - pm_runtime_disable(&pdev->dev); + pm_runtime_disable(dev); err1: spi_controller_put(ctlr); return ret; diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c new file mode 100644 index 000000000000..4ab7e86f4bd5 --- /dev/null +++ b/drivers/spi/spi-stm32-ospi.c @@ -0,0 +1,1081 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2025 - All Rights Reserved + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/sizes.h> +#include <linux/spi/spi-mem.h> +#include <linux/types.h> + +#define OSPI_CR 0x00 +#define CR_EN BIT(0) +#define CR_ABORT BIT(1) +#define CR_DMAEN BIT(2) +#define CR_FTHRES_SHIFT 8 +#define CR_TEIE BIT(16) +#define CR_TCIE BIT(17) +#define CR_SMIE BIT(19) +#define CR_APMS BIT(22) +#define CR_CSSEL BIT(24) +#define CR_FMODE_MASK GENMASK(29, 28) +#define CR_FMODE_INDW (0U) +#define CR_FMODE_INDR (1U) +#define CR_FMODE_APM (2U) +#define CR_FMODE_MM (3U) + +#define OSPI_DCR1 0x08 +#define DCR1_DLYBYP BIT(3) +#define DCR1_DEVSIZE_MASK GENMASK(20, 16) +#define DCR1_MTYP_MASK GENMASK(26, 24) +#define DCR1_MTYP_MX_MODE 1 +#define DCR1_MTYP_HP_MEMMODE 4 + +#define OSPI_DCR2 0x0c +#define DCR2_PRESC_MASK GENMASK(7, 0) + +#define OSPI_SR 0x20 +#define SR_TEF BIT(0) +#define SR_TCF BIT(1) +#define SR_FTF BIT(2) +#define SR_SMF BIT(3) +#define SR_BUSY BIT(5) + +#define OSPI_FCR 0x24 +#define FCR_CTEF BIT(0) +#define FCR_CTCF BIT(1) +#define FCR_CSMF BIT(3) + +#define OSPI_DLR 0x40 +#define OSPI_AR 0x48 +#define OSPI_DR 0x50 +#define OSPI_PSMKR 0x80 +#define OSPI_PSMAR 0x88 + +#define OSPI_CCR 0x100 +#define CCR_IMODE_MASK GENMASK(2, 0) +#define CCR_IDTR BIT(3) +#define CCR_ISIZE_MASK GENMASK(5, 4) +#define CCR_ADMODE_MASK GENMASK(10, 8) +#define CCR_ADMODE_8LINES 4 +#define CCR_ADDTR BIT(11) +#define CCR_ADSIZE_MASK GENMASK(13, 12) +#define CCR_ADSIZE_32BITS 3 +#define CCR_DMODE_MASK GENMASK(26, 24) +#define CCR_DMODE_8LINES 4 +#define CCR_DQSE BIT(29) +#define CCR_DDTR BIT(27) +#define CCR_BUSWIDTH_0 0x0 +#define CCR_BUSWIDTH_1 0x1 +#define CCR_BUSWIDTH_2 0x2 +#define CCR_BUSWIDTH_4 0x3 +#define CCR_BUSWIDTH_8 0x4 + +#define OSPI_TCR 0x108 +#define TCR_DCYC_MASK GENMASK(4, 0) +#define TCR_DHQC BIT(28) +#define TCR_SSHIFT BIT(30) + +#define OSPI_IR 0x110 + +#define STM32_OSPI_MAX_MMAP_SZ SZ_256M +#define STM32_OSPI_MAX_NORCHIP 2 + +#define STM32_FIFO_TIMEOUT_US 30000 +#define STM32_ABT_TIMEOUT_US 100000 +#define STM32_COMP_TIMEOUT_MS 5000 +#define STM32_BUSY_TIMEOUT_US 100000 + + +#define STM32_AUTOSUSPEND_DELAY -1 + +struct stm32_ospi { + struct device *dev; + struct spi_controller *ctrl; + struct clk *clk; + struct reset_control *rstc; + + struct completion data_completion; + struct completion match_completion; + + struct dma_chan *dma_chtx; + struct dma_chan *dma_chrx; + struct completion dma_completion; + + void __iomem *regs_base; + void __iomem *mm_base; + phys_addr_t regs_phys_base; + resource_size_t mm_size; + u32 clk_rate; + u32 fmode; + u32 cr_reg; + u32 dcr_reg; + u32 flash_presc[STM32_OSPI_MAX_NORCHIP]; + int irq; + unsigned long status_timeout; + + /* + * To protect device configuration, could be different between + * 2 flash access + */ + struct mutex lock; +}; + +static void stm32_ospi_read_fifo(u8 *val, void __iomem *addr) +{ + *val = readb_relaxed(addr); +} + +static void stm32_ospi_write_fifo(u8 *val, void __iomem *addr) +{ + writeb_relaxed(*val, addr); +} + +static int stm32_ospi_abort(struct stm32_ospi *ospi) +{ + void __iomem *regs_base = ospi->regs_base; + u32 cr; + int timeout; + + cr = readl_relaxed(regs_base + OSPI_CR) | CR_ABORT; + writel_relaxed(cr, regs_base + OSPI_CR); + + /* wait clear of abort bit by hw */ + timeout = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_CR, + cr, !(cr & CR_ABORT), 1, + STM32_ABT_TIMEOUT_US); + + if (timeout) + dev_err(ospi->dev, "%s abort timeout:%d\n", __func__, timeout); + + return timeout; +} + +static int stm32_ospi_poll(struct stm32_ospi *ospi, u8 *buf, u32 len, bool read) +{ + void __iomem *regs_base = ospi->regs_base; + void (*fifo)(u8 *val, void __iomem *addr); + u32 sr; + int ret; + + if (read) + fifo = stm32_ospi_read_fifo; + else + fifo = stm32_ospi_write_fifo; + + while (len--) { + ret = readl_relaxed_poll_timeout_atomic(regs_base + OSPI_SR, + sr, sr & SR_FTF, 1, + STM32_FIFO_TIMEOUT_US); + if (ret) { + dev_err(ospi->dev, "fifo timeout (len:%d stat:%#x)\n", + len, sr); + return ret; + } + fifo(buf++, regs_base + OSPI_DR); + } + + return 0; +} + +static int stm32_ospi_wait_nobusy(struct stm32_ospi *ospi) +{ + u32 sr; + + return readl_relaxed_poll_timeout_atomic(ospi->regs_base + OSPI_SR, + sr, !(sr & SR_BUSY), 1, + STM32_BUSY_TIMEOUT_US); +} + +static int stm32_ospi_wait_cmd(struct stm32_ospi *ospi) +{ + void __iomem *regs_base = ospi->regs_base; + u32 cr, sr; + int err = 0; + + if ((readl_relaxed(regs_base + OSPI_SR) & SR_TCF) || + ospi->fmode == CR_FMODE_APM) + goto out; + + reinit_completion(&ospi->data_completion); + cr = readl_relaxed(regs_base + OSPI_CR); + writel_relaxed(cr | CR_TCIE | CR_TEIE, regs_base + OSPI_CR); + + if (!wait_for_completion_timeout(&ospi->data_completion, + msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) + err = -ETIMEDOUT; + + sr = readl_relaxed(regs_base + OSPI_SR); + if (sr & SR_TCF) + /* avoid false timeout */ + err = 0; + if (sr & SR_TEF) + err = -EIO; + +out: + /* clear flags */ + writel_relaxed(FCR_CTCF | FCR_CTEF, regs_base + OSPI_FCR); + + if (!err) + err = stm32_ospi_wait_nobusy(ospi); + + return err; +} + +static void stm32_ospi_dma_callback(void *arg) +{ + struct completion *dma_completion = arg; + + complete(dma_completion); +} + +static irqreturn_t stm32_ospi_irq(int irq, void *dev_id) +{ + struct stm32_ospi *ospi = (struct stm32_ospi *)dev_id; + void __iomem *regs_base = ospi->regs_base; + u32 cr, sr; + + cr = readl_relaxed(regs_base + OSPI_CR); + sr = readl_relaxed(regs_base + OSPI_SR); + + if (cr & CR_SMIE && sr & SR_SMF) { + /* disable irq */ + cr &= ~CR_SMIE; + writel_relaxed(cr, regs_base + OSPI_CR); + complete(&ospi->match_completion); + + return IRQ_HANDLED; + } + + if (sr & (SR_TEF | SR_TCF)) { + /* disable irq */ + cr &= ~CR_TCIE & ~CR_TEIE; + writel_relaxed(cr, regs_base + OSPI_CR); + complete(&ospi->data_completion); + } + + return IRQ_HANDLED; +} + +static void stm32_ospi_dma_setup(struct stm32_ospi *ospi, + struct dma_slave_config *dma_cfg) +{ + if (dma_cfg && ospi->dma_chrx) { + if (dmaengine_slave_config(ospi->dma_chrx, dma_cfg)) { + dev_err(ospi->dev, "dma rx config failed\n"); + dma_release_channel(ospi->dma_chrx); + ospi->dma_chrx = NULL; + } + } + + if (dma_cfg && ospi->dma_chtx) { + if (dmaengine_slave_config(ospi->dma_chtx, dma_cfg)) { + dev_err(ospi->dev, "dma tx config failed\n"); + dma_release_channel(ospi->dma_chtx); + ospi->dma_chtx = NULL; + } + } + + init_completion(&ospi->dma_completion); +} + +static int stm32_ospi_tx_mm(struct stm32_ospi *ospi, + const struct spi_mem_op *op) +{ + memcpy_fromio(op->data.buf.in, ospi->mm_base + op->addr.val, + op->data.nbytes); + return 0; +} + +static int stm32_ospi_tx_dma(struct stm32_ospi *ospi, + const struct spi_mem_op *op) +{ + struct dma_async_tx_descriptor *desc; + void __iomem *regs_base = ospi->regs_base; + enum dma_transfer_direction dma_dir; + struct dma_chan *dma_ch; + struct sg_table sgt; + dma_cookie_t cookie; + u32 cr, t_out; + int err; + + if (op->data.dir == SPI_MEM_DATA_IN) { + dma_dir = DMA_DEV_TO_MEM; + dma_ch = ospi->dma_chrx; + } else { + dma_dir = DMA_MEM_TO_DEV; + dma_ch = ospi->dma_chtx; + } + + /* + * Spi_map_buf return -EINVAL if the buffer is not DMA-able + * (DMA-able: in vmalloc | kmap | virt_addr_valid) + */ + err = spi_controller_dma_map_mem_op_data(ospi->ctrl, op, &sgt); + if (err) + return err; + + desc = dmaengine_prep_slave_sg(dma_ch, sgt.sgl, sgt.nents, + dma_dir, DMA_PREP_INTERRUPT); + if (!desc) { + err = -ENOMEM; + goto out_unmap; + } + + cr = readl_relaxed(regs_base + OSPI_CR); + + reinit_completion(&ospi->dma_completion); + desc->callback = stm32_ospi_dma_callback; + desc->callback_param = &ospi->dma_completion; + cookie = dmaengine_submit(desc); + err = dma_submit_error(cookie); + if (err) + goto out; + + dma_async_issue_pending(dma_ch); + + writel_relaxed(cr | CR_DMAEN, regs_base + OSPI_CR); + + t_out = sgt.nents * STM32_COMP_TIMEOUT_MS; + if (!wait_for_completion_timeout(&ospi->dma_completion, + msecs_to_jiffies(t_out))) + err = -ETIMEDOUT; + + if (err) + dmaengine_terminate_all(dma_ch); + +out: + writel_relaxed(cr & ~CR_DMAEN, regs_base + OSPI_CR); +out_unmap: + spi_controller_dma_unmap_mem_op_data(ospi->ctrl, op, &sgt); + + return err; +} + +static int stm32_ospi_xfer(struct stm32_ospi *ospi, const struct spi_mem_op *op) +{ + u8 *buf; + + if (!op->data.nbytes) + return 0; + + if (ospi->fmode == CR_FMODE_MM) + return stm32_ospi_tx_mm(ospi, op); + else if (((op->data.dir == SPI_MEM_DATA_IN && ospi->dma_chrx) || + (op->data.dir == SPI_MEM_DATA_OUT && ospi->dma_chtx)) && + op->data.nbytes > 8) + if (!stm32_ospi_tx_dma(ospi, op)) + return 0; + + if (op->data.dir == SPI_MEM_DATA_IN) + buf = op->data.buf.in; + else + buf = (u8 *)op->data.buf.out; + + return stm32_ospi_poll(ospi, buf, op->data.nbytes, + op->data.dir == SPI_MEM_DATA_IN); +} + +static int stm32_ospi_wait_poll_status(struct stm32_ospi *ospi, + const struct spi_mem_op *op) +{ + void __iomem *regs_base = ospi->regs_base; + u32 cr; + + reinit_completion(&ospi->match_completion); + cr = readl_relaxed(regs_base + OSPI_CR); + writel_relaxed(cr | CR_SMIE, regs_base + OSPI_CR); + + if (!wait_for_completion_timeout(&ospi->match_completion, + msecs_to_jiffies(ospi->status_timeout))) { + u32 sr = readl_relaxed(regs_base + OSPI_SR); + + /* Avoid false timeout */ + if (!(sr & SR_SMF)) + return -ETIMEDOUT; + } + + writel_relaxed(FCR_CSMF, regs_base + OSPI_FCR); + + return 0; +} + +static int stm32_ospi_get_mode(u8 buswidth) +{ + switch (buswidth) { + case 8: + return CCR_BUSWIDTH_8; + case 4: + return CCR_BUSWIDTH_4; + default: + return buswidth; + } +} + +static int stm32_ospi_send(struct spi_device *spi, const struct spi_mem_op *op) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(spi->controller); + void __iomem *regs_base = ospi->regs_base; + u32 ccr, cr, dcr2, tcr; + int timeout, err = 0, err_poll_status = 0; + u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1]; + + dev_dbg(ospi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", + op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, + op->dummy.buswidth, op->data.buswidth, + op->addr.val, op->data.nbytes); + + cr = readl_relaxed(ospi->regs_base + OSPI_CR); + cr &= ~CR_CSSEL; + cr |= FIELD_PREP(CR_CSSEL, cs); + cr &= ~CR_FMODE_MASK; + cr |= FIELD_PREP(CR_FMODE_MASK, ospi->fmode); + writel_relaxed(cr, regs_base + OSPI_CR); + + if (op->data.nbytes) + writel_relaxed(op->data.nbytes - 1, regs_base + OSPI_DLR); + + /* set prescaler */ + dcr2 = readl_relaxed(regs_base + OSPI_DCR2); + dcr2 |= FIELD_PREP(DCR2_PRESC_MASK, ospi->flash_presc[cs]); + writel_relaxed(dcr2, regs_base + OSPI_DCR2); + + ccr = FIELD_PREP(CCR_IMODE_MASK, stm32_ospi_get_mode(op->cmd.buswidth)); + + if (op->addr.nbytes) { + ccr |= FIELD_PREP(CCR_ADMODE_MASK, + stm32_ospi_get_mode(op->addr.buswidth)); + ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1); + } + + tcr = TCR_SSHIFT; + if (op->dummy.buswidth && op->dummy.nbytes) { + tcr |= FIELD_PREP(TCR_DCYC_MASK, + op->dummy.nbytes * 8 / op->dummy.buswidth); + } + writel_relaxed(tcr, regs_base + OSPI_TCR); + + if (op->data.nbytes) { + ccr |= FIELD_PREP(CCR_DMODE_MASK, + stm32_ospi_get_mode(op->data.buswidth)); + } + + writel_relaxed(ccr, regs_base + OSPI_CCR); + + /* set instruction, must be set after ccr register update */ + writel_relaxed(op->cmd.opcode, regs_base + OSPI_IR); + + if (op->addr.nbytes && ospi->fmode != CR_FMODE_MM) + writel_relaxed(op->addr.val, regs_base + OSPI_AR); + + if (ospi->fmode == CR_FMODE_APM) + err_poll_status = stm32_ospi_wait_poll_status(ospi, op); + + err = stm32_ospi_xfer(ospi, op); + + /* + * Abort in: + * -error case + * -read memory map: prefetching must be stopped if we read the last + * byte of device (device size - fifo size). like device size is not + * knows, the prefetching is always stop. + */ + if (err || err_poll_status || ospi->fmode == CR_FMODE_MM) + goto abort; + + /* Wait end of tx in indirect mode */ + err = stm32_ospi_wait_cmd(ospi); + if (err) + goto abort; + + return 0; + +abort: + timeout = stm32_ospi_abort(ospi); + writel_relaxed(FCR_CTCF | FCR_CSMF, regs_base + OSPI_FCR); + + if (err || err_poll_status || timeout) + dev_err(ospi->dev, "%s err:%d err_poll_status:%d abort timeout:%d\n", + __func__, err, err_poll_status, timeout); + + return err; +} + +static int stm32_ospi_poll_status(struct spi_mem *mem, + const struct spi_mem_op *op, + u16 mask, u16 match, + unsigned long initial_delay_us, + unsigned long polling_rate_us, + unsigned long timeout_ms) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller); + void __iomem *regs_base = ospi->regs_base; + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + + writel_relaxed(mask, regs_base + OSPI_PSMKR); + writel_relaxed(match, regs_base + OSPI_PSMAR); + ospi->fmode = CR_FMODE_APM; + ospi->status_timeout = timeout_ms; + + ret = stm32_ospi_send(mem->spi, op); + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret; +} + +static int stm32_ospi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(mem->spi->controller); + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) + ospi->fmode = CR_FMODE_INDR; + else + ospi->fmode = CR_FMODE_INDW; + + ret = stm32_ospi_send(mem->spi, op); + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret; +} + +static int stm32_ospi_dirmap_create(struct spi_mem_dirmap_desc *desc) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller); + + if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) + return -EOPNOTSUPP; + + /* Should never happen, as mm_base == null is an error probe exit condition */ + if (!ospi->mm_base && desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) + return -EOPNOTSUPP; + + if (!ospi->mm_size) + return -EOPNOTSUPP; + + return 0; +} + +static ssize_t stm32_ospi_dirmap_read(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, void *buf) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(desc->mem->spi->controller); + struct spi_mem_op op; + u32 addr_max; + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + /* + * Make a local copy of desc op_tmpl and complete dirmap rdesc + * spi_mem_op template with offs, len and *buf in order to get + * all needed transfer information into struct spi_mem_op + */ + memcpy(&op, &desc->info.op_tmpl, sizeof(struct spi_mem_op)); + dev_dbg(ospi->dev, "%s len = 0x%zx offs = 0x%llx buf = 0x%p\n", __func__, len, offs, buf); + + op.data.nbytes = len; + op.addr.val = desc->info.offset + offs; + op.data.buf.in = buf; + + addr_max = op.addr.val + op.data.nbytes + 1; + if (addr_max < ospi->mm_size && op.addr.buswidth) + ospi->fmode = CR_FMODE_MM; + else + ospi->fmode = CR_FMODE_INDR; + + ret = stm32_ospi_send(desc->mem->spi, &op); + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret ?: len; +} + +static int stm32_ospi_transfer_one_message(struct spi_controller *ctrl, + struct spi_message *msg) +{ + struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl); + struct spi_transfer *transfer; + struct spi_device *spi = msg->spi; + struct spi_mem_op op; + struct gpio_desc *cs_gpiod = spi->cs_gpiod[ffs(spi->cs_index_mask) - 1]; + int ret = 0; + + if (!cs_gpiod) + return -EOPNOTSUPP; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + mutex_lock(&ospi->lock); + + gpiod_set_value_cansleep(cs_gpiod, true); + + list_for_each_entry(transfer, &msg->transfers, transfer_list) { + u8 dummy_bytes = 0; + + memset(&op, 0, sizeof(op)); + + dev_dbg(ospi->dev, "tx_buf:%p tx_nbits:%d rx_buf:%p rx_nbits:%d len:%d dummy_data:%d\n", + transfer->tx_buf, transfer->tx_nbits, + transfer->rx_buf, transfer->rx_nbits, + transfer->len, transfer->dummy_data); + + /* + * OSPI hardware supports dummy bytes transfer. + * If current transfer is dummy byte, merge it with the next + * transfer in order to take into account OSPI block constraint + */ + if (transfer->dummy_data) { + op.dummy.buswidth = transfer->tx_nbits; + op.dummy.nbytes = transfer->len; + dummy_bytes = transfer->len; + + /* If happens, means that message is not correctly built */ + if (list_is_last(&transfer->transfer_list, &msg->transfers)) { + ret = -EINVAL; + goto end_of_transfer; + } + + transfer = list_next_entry(transfer, transfer_list); + } + + op.data.nbytes = transfer->len; + + if (transfer->rx_buf) { + ospi->fmode = CR_FMODE_INDR; + op.data.buswidth = transfer->rx_nbits; + op.data.dir = SPI_MEM_DATA_IN; + op.data.buf.in = transfer->rx_buf; + } else { + ospi->fmode = CR_FMODE_INDW; + op.data.buswidth = transfer->tx_nbits; + op.data.dir = SPI_MEM_DATA_OUT; + op.data.buf.out = transfer->tx_buf; + } + + ret = stm32_ospi_send(spi, &op); + if (ret) + goto end_of_transfer; + + msg->actual_length += transfer->len + dummy_bytes; + } + +end_of_transfer: + gpiod_set_value_cansleep(cs_gpiod, false); + + mutex_unlock(&ospi->lock); + + msg->status = ret; + spi_finalize_current_message(ctrl); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return ret; +} + +static int stm32_ospi_setup(struct spi_device *spi) +{ + struct spi_controller *ctrl = spi->controller; + struct stm32_ospi *ospi = spi_controller_get_devdata(ctrl); + void __iomem *regs_base = ospi->regs_base; + int ret; + u8 cs = spi->chip_select[ffs(spi->cs_index_mask) - 1]; + + if (ctrl->busy) + return -EBUSY; + + if (!spi->max_speed_hz) + return -EINVAL; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + ospi->flash_presc[cs] = DIV_ROUND_UP(ospi->clk_rate, spi->max_speed_hz) - 1; + + mutex_lock(&ospi->lock); + + ospi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_EN; + writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR); + + /* set dcr fsize to max address */ + ospi->dcr_reg = DCR1_DEVSIZE_MASK | DCR1_DLYBYP; + writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1); + + mutex_unlock(&ospi->lock); + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return 0; +} + +/* + * No special host constraint, so use default spi_mem_default_supports_op + * to check supported mode. + */ +static const struct spi_controller_mem_ops stm32_ospi_mem_ops = { + .exec_op = stm32_ospi_exec_op, + .dirmap_create = stm32_ospi_dirmap_create, + .dirmap_read = stm32_ospi_dirmap_read, + .poll_status = stm32_ospi_poll_status, +}; + +static int stm32_ospi_get_resources(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stm32_ospi *ospi = platform_get_drvdata(pdev); + struct resource *res; + struct reserved_mem *rmem = NULL; + struct device_node *node; + int ret; + + ospi->regs_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(ospi->regs_base)) + return PTR_ERR(ospi->regs_base); + + ospi->regs_phys_base = res->start; + + ospi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ospi->clk)) + return dev_err_probe(dev, PTR_ERR(ospi->clk), + "Can't get clock\n"); + + ospi->clk_rate = clk_get_rate(ospi->clk); + if (!ospi->clk_rate) { + dev_err(dev, "Invalid clock rate\n"); + return -EINVAL; + } + + ospi->irq = platform_get_irq(pdev, 0); + if (ospi->irq < 0) + return ospi->irq; + + ret = devm_request_irq(dev, ospi->irq, stm32_ospi_irq, 0, + dev_name(dev), ospi); + if (ret) { + dev_err(dev, "Failed to request irq\n"); + return ret; + } + + ospi->rstc = devm_reset_control_array_get_exclusive_released(dev); + if (IS_ERR(ospi->rstc)) + return dev_err_probe(dev, PTR_ERR(ospi->rstc), + "Can't get reset\n"); + + ospi->dma_chrx = dma_request_chan(dev, "rx"); + if (IS_ERR(ospi->dma_chrx)) { + ret = PTR_ERR(ospi->dma_chrx); + ospi->dma_chrx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma; + } + + ospi->dma_chtx = dma_request_chan(dev, "tx"); + if (IS_ERR(ospi->dma_chtx)) { + ret = PTR_ERR(ospi->dma_chtx); + ospi->dma_chtx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma; + } + + node = of_parse_phandle(dev->of_node, "memory-region", 0); + if (node) + rmem = of_reserved_mem_lookup(node); + of_node_put(node); + + if (rmem) { + ospi->mm_size = rmem->size; + ospi->mm_base = devm_ioremap(dev, rmem->base, rmem->size); + if (!ospi->mm_base) { + dev_err(dev, "unable to map memory region: %pa+%pa\n", + &rmem->base, &rmem->size); + ret = -ENOMEM; + goto err_dma; + } + + if (ospi->mm_size > STM32_OSPI_MAX_MMAP_SZ) { + dev_err(dev, "Memory map size outsize bounds\n"); + ret = -EINVAL; + goto err_dma; + } + } else { + dev_info(dev, "No memory-map region found\n"); + } + + init_completion(&ospi->data_completion); + init_completion(&ospi->match_completion); + + return 0; + +err_dma: + dev_info(dev, "Can't get all resources (%d)\n", ret); + + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); + + return ret; +}; + +static int stm32_ospi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_controller *ctrl; + struct stm32_ospi *ospi; + struct dma_slave_config dma_cfg; + struct device_node *child; + int ret; + u8 spi_flash_count = 0; + + /* + * Flash subnodes sanity check: + * 1 or 2 spi-nand/spi-nor flashes => supported + * All other flash node configuration => not supported + */ + for_each_available_child_of_node(dev->of_node, child) { + if (of_device_is_compatible(child, "jedec,spi-nor") || + of_device_is_compatible(child, "spi-nand")) + spi_flash_count++; + } + + if (spi_flash_count == 0 || spi_flash_count > 2) { + dev_err(dev, "Incorrect DT flash node\n"); + return -ENODEV; + } + + ctrl = devm_spi_alloc_host(dev, sizeof(*ospi)); + if (!ctrl) + return -ENOMEM; + + ospi = spi_controller_get_devdata(ctrl); + ospi->ctrl = ctrl; + + ospi->dev = &pdev->dev; + platform_set_drvdata(pdev, ospi); + + ret = stm32_ospi_get_resources(pdev); + if (ret) + return ret; + + memset(&dma_cfg, 0, sizeof(dma_cfg)); + dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + dma_cfg.src_addr = ospi->regs_phys_base + OSPI_DR; + dma_cfg.dst_addr = ospi->regs_phys_base + OSPI_DR; + dma_cfg.src_maxburst = 4; + dma_cfg.dst_maxburst = 4; + stm32_ospi_dma_setup(ospi, &dma_cfg); + + mutex_init(&ospi->lock); + + ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD | + SPI_TX_OCTAL | SPI_RX_OCTAL; + ctrl->flags = SPI_CONTROLLER_HALF_DUPLEX; + ctrl->setup = stm32_ospi_setup; + ctrl->bus_num = -1; + ctrl->mem_ops = &stm32_ospi_mem_ops; + ctrl->use_gpio_descriptors = true; + ctrl->transfer_one_message = stm32_ospi_transfer_one_message; + ctrl->num_chipselect = STM32_OSPI_MAX_NORCHIP; + ctrl->dev.of_node = dev->of_node; + + pm_runtime_enable(ospi->dev); + pm_runtime_set_autosuspend_delay(ospi->dev, STM32_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(ospi->dev); + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + goto err_pm_enable; + + ret = reset_control_acquire(ospi->rstc); + if (ret) { + dev_err_probe(dev, ret, "Can not acquire reset %d\n", ret); + goto err_pm_resume; + } + + reset_control_assert(ospi->rstc); + udelay(2); + reset_control_deassert(ospi->rstc); + + ret = spi_register_controller(ctrl); + if (ret) { + /* Disable ospi */ + writel_relaxed(0, ospi->regs_base + OSPI_CR); + goto err_pm_resume; + } + + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return 0; + +err_pm_resume: + pm_runtime_put_sync_suspend(ospi->dev); + +err_pm_enable: + pm_runtime_force_suspend(ospi->dev); + mutex_destroy(&ospi->lock); + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); + + return ret; +} + +static void stm32_ospi_remove(struct platform_device *pdev) +{ + struct stm32_ospi *ospi = platform_get_drvdata(pdev); + int ret; + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return; + + spi_unregister_controller(ospi->ctrl); + /* Disable ospi */ + writel_relaxed(0, ospi->regs_base + OSPI_CR); + mutex_destroy(&ospi->lock); + + if (ospi->dma_chtx) + dma_release_channel(ospi->dma_chtx); + if (ospi->dma_chrx) + dma_release_channel(ospi->dma_chrx); + + reset_control_release(ospi->rstc); + + pm_runtime_put_sync_suspend(ospi->dev); + pm_runtime_force_suspend(ospi->dev); +} + +static int __maybe_unused stm32_ospi_suspend(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + + pinctrl_pm_select_sleep_state(dev); + + reset_control_release(ospi->rstc); + + return pm_runtime_force_suspend(ospi->dev); +} + +static int __maybe_unused stm32_ospi_resume(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + void __iomem *regs_base = ospi->regs_base; + int ret; + + ret = pm_runtime_force_resume(ospi->dev); + if (ret < 0) + return ret; + + pinctrl_pm_select_default_state(dev); + + ret = pm_runtime_resume_and_get(ospi->dev); + if (ret < 0) + return ret; + + ret = reset_control_acquire(ospi->rstc); + if (ret) { + dev_err(dev, "Can not acquire reset\n"); + return ret; + } + + writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR); + writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1); + pm_runtime_mark_last_busy(ospi->dev); + pm_runtime_put_autosuspend(ospi->dev); + + return 0; +} + +static int __maybe_unused stm32_ospi_runtime_suspend(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + + clk_disable_unprepare(ospi->clk); + + return 0; +} + +static int __maybe_unused stm32_ospi_runtime_resume(struct device *dev) +{ + struct stm32_ospi *ospi = dev_get_drvdata(dev); + + return clk_prepare_enable(ospi->clk); +} + +static const struct dev_pm_ops stm32_ospi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(stm32_ospi_suspend, stm32_ospi_resume) + SET_RUNTIME_PM_OPS(stm32_ospi_runtime_suspend, + stm32_ospi_runtime_resume, NULL) +}; + +static const struct of_device_id stm32_ospi_of_match[] = { + { .compatible = "st,stm32mp25-ospi" }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_ospi_of_match); + +static struct platform_driver stm32_ospi_driver = { + .probe = stm32_ospi_probe, + .remove = stm32_ospi_remove, + .driver = { + .name = "stm32-ospi", + .pm = &stm32_ospi_pm_ops, + .of_match_table = stm32_ospi_of_match, + }, +}; +module_platform_driver(stm32_ospi_driver); + +MODULE_DESCRIPTION("STMicroelectronics STM32 OCTO SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 540b6948b24d..9691197bbf5a 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -362,11 +362,6 @@ static int stm32_qspi_send(struct spi_device *spi, const struct spi_mem_op *op) u32 ccr, cr; int timeout, err = 0, err_poll_status = 0; - dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth, - op->addr.val, op->data.nbytes); - cr = readl_relaxed(qspi->io_base + QSPI_CR); cr &= ~CR_PRESC_MASK & ~CR_FSEL; cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index da3517d7102d..dc22b98bdbcc 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -2069,9 +2069,15 @@ static int stm32_spi_probe(struct platform_device *pdev) struct resource *res; struct reset_control *rst; struct device_node *np = pdev->dev.of_node; + const struct stm32_spi_cfg *cfg; bool device_mode; int ret; - const struct stm32_spi_cfg *cfg = of_device_get_match_data(&pdev->dev); + + cfg = of_device_get_match_data(&pdev->dev); + if (!cfg) { + dev_err(&pdev->dev, "Failed to get match data for platform\n"); + return -ENODEV; + } device_mode = of_property_read_bool(np, "spi-slave"); if (!cfg->has_device_mode && device_mode) { diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index fcbe864c9b7d..aa92fd5a35a9 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -264,6 +264,9 @@ static int sun4i_spi_transfer_one(struct spi_controller *host, else reg |= SUN4I_CTL_DHB; + /* Now that the settings are correct, enable the interface */ + reg |= SUN4I_CTL_ENABLE; + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ @@ -404,7 +407,7 @@ static int sun4i_spi_runtime_resume(struct device *dev) } sun4i_spi_write(sspi, SUN4I_CTL_REG, - SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); + SUN4I_CTL_MASTER | SUN4I_CTL_TP); return 0; @@ -462,6 +465,7 @@ static int sun4i_spi_probe(struct platform_device *pdev) sspi->host = host; host->max_speed_hz = 100 * 1000 * 1000; host->min_speed_hz = 3 * 1000; + host->use_gpio_descriptors = true; host->set_cs = sun4i_spi_set_cs; host->transfer_one = sun4i_spi_transfer_one; host->num_chipselect = 4; diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 3822d7c8d8ed..795a8482c2c7 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -728,9 +728,9 @@ static int tegra_spi_set_hw_cs_timing(struct spi_device *spi) u32 inactive_cycles; u8 cs_state; - if (setup->unit != SPI_DELAY_UNIT_SCK || - hold->unit != SPI_DELAY_UNIT_SCK || - inactive->unit != SPI_DELAY_UNIT_SCK) { + if ((setup->value && setup->unit != SPI_DELAY_UNIT_SCK) || + (hold->value && hold->unit != SPI_DELAY_UNIT_SCK) || + (inactive->value && inactive->unit != SPI_DELAY_UNIT_SCK)) { dev_err(&spi->dev, "Invalid delay unit %d, should be SPI_DELAY_UNIT_SCK\n", SPI_DELAY_UNIT_SCK); diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 08e49a876894..3be7499db21e 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -22,6 +22,7 @@ #include <linux/spi/spi.h> #include <linux/acpi.h> #include <linux/property.h> +#include <linux/sizes.h> #define QSPI_COMMAND1 0x000 #define QSPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) @@ -110,6 +111,9 @@ #define QSPI_DMA_BLK 0x024 #define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0) +#define QSPI_DMA_MEM_ADDRESS 0x028 +#define QSPI_DMA_HI_ADDRESS 0x02c + #define QSPI_TX_FIFO 0x108 #define QSPI_RX_FIFO 0x188 @@ -134,7 +138,7 @@ #define QSPI_COMMAND_VALUE_SET(X) (((x) & 0xFF) << 0) #define QSPI_CMB_SEQ_CMD_CFG 0x1a0 -#define QSPI_COMMAND_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_COMMAND_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_COMMAND_X1_X2_X4_MASK (0x03 << 13) #define QSPI_COMMAND_SDR_DDR BIT(12) #define QSPI_COMMAND_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -147,7 +151,7 @@ #define QSPI_ADDRESS_VALUE_SET(X) (((x) & 0xFFFF) << 0) #define QSPI_CMB_SEQ_ADDR_CFG 0x1ac -#define QSPI_ADDRESS_X1_X2_X4(x) (((x) & 0x3) << 13) +#define QSPI_ADDRESS_X1_X2_X4(x) ((((x) >> 1) & 0x3) << 13) #define QSPI_ADDRESS_X1_X2_X4_MASK (0x03 << 13) #define QSPI_ADDRESS_SDR_DDR BIT(12) #define QSPI_ADDRESS_SIZE_SET(x) (((x) & 0xFF) << 0) @@ -156,15 +160,19 @@ #define DATA_DIR_RX BIT(1) #define QSPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) -#define DEFAULT_QSPI_DMA_BUF_LEN (64 * 1024) -#define CMD_TRANSFER 0 -#define ADDR_TRANSFER 1 -#define DATA_TRANSFER 2 +#define DEFAULT_QSPI_DMA_BUF_LEN SZ_64K + +enum tegra_qspi_transfer_type { + CMD_TRANSFER = 0, + ADDR_TRANSFER = 1, + DUMMY_TRANSFER = 2, + DATA_TRANSFER = 3 +}; struct tegra_qspi_soc_data { - bool has_dma; bool cmb_xfer_capable; bool supports_tpm; + bool has_ext_dma; unsigned int cs_count; }; @@ -399,9 +407,6 @@ tegra_qspi_read_rx_fifo_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_tra static void tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) { - dma_sync_single_for_cpu(tqspi->dev, tqspi->tx_dma_phys, - tqspi->dma_buf_size, DMA_TO_DEVICE); - /* * In packed mode, each word in FIFO may contain multiple packets * based on bits per word. So all bytes in each FIFO word are valid. @@ -434,17 +439,11 @@ tegra_qspi_copy_client_txbuf_to_qspi_txbuf(struct tegra_qspi *tqspi, struct spi_ tqspi->cur_tx_pos += write_bytes; } - - dma_sync_single_for_device(tqspi->dev, tqspi->tx_dma_phys, - tqspi->dma_buf_size, DMA_TO_DEVICE); } static void tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_transfer *t) { - dma_sync_single_for_cpu(tqspi->dev, tqspi->rx_dma_phys, - tqspi->dma_buf_size, DMA_FROM_DEVICE); - if (tqspi->is_packed) { tqspi->cur_rx_pos += tqspi->curr_dma_words * tqspi->bytes_per_word; } else { @@ -470,9 +469,6 @@ tegra_qspi_copy_qspi_rxbuf_to_client_rxbuf(struct tegra_qspi *tqspi, struct spi_ tqspi->cur_rx_pos += read_bytes; } - - dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, - tqspi->dma_buf_size, DMA_FROM_DEVICE); } static void tegra_qspi_dma_complete(void *args) @@ -600,13 +596,16 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4; - dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); - dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); + if (t->tx_buf) + dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE); + if (t->rx_buf) + dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE); } static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t) { struct dma_slave_config dma_sconfig = { 0 }; + dma_addr_t rx_dma_phys, tx_dma_phys; unsigned int len; u8 dma_burst; int ret = 0; @@ -629,60 +628,84 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct len = tqspi->curr_dma_words * 4; /* set attention level based on length of transfer */ - val = 0; - if (len & 0xf) { - val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; - dma_burst = 1; - } else if (((len) >> 4) & 0x1) { - val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; - dma_burst = 4; - } else { - val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; - dma_burst = 8; + if (tqspi->soc_data->has_ext_dma) { + val = 0; + if (len & 0xf) { + val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1; + dma_burst = 1; + } else if (((len) >> 4) & 0x1) { + val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4; + dma_burst = 4; + } else { + val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8; + dma_burst = 8; + } + + tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); } - tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL); tqspi->dma_control_reg = val; dma_sconfig.device_fc = true; + if (tqspi->cur_direction & DATA_DIR_TX) { - dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; - dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dma_sconfig.dst_maxburst = dma_burst; - ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); - if (ret < 0) { - dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); - return ret; - } + if (tqspi->tx_dma_chan) { + dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO; + dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_sconfig.dst_maxburst = dma_burst; + ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig); + if (ret < 0) { + dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); + return ret; + } - tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); - ret = tegra_qspi_start_tx_dma(tqspi, t, len); - if (ret < 0) { - dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); - return ret; + tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); + ret = tegra_qspi_start_tx_dma(tqspi, t, len); + if (ret < 0) { + dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret); + return ret; + } + } else { + if (tqspi->is_packed) + tx_dma_phys = t->tx_dma; + else + tx_dma_phys = tqspi->tx_dma_phys; + tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t); + tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys), + QSPI_DMA_MEM_ADDRESS); + tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff), + QSPI_DMA_HI_ADDRESS); } } if (tqspi->cur_direction & DATA_DIR_RX) { - dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; - dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - dma_sconfig.src_maxburst = dma_burst; - ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); - if (ret < 0) { - dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); - return ret; - } - - dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys, - tqspi->dma_buf_size, - DMA_FROM_DEVICE); + if (tqspi->rx_dma_chan) { + dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO; + dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_sconfig.src_maxburst = dma_burst; + ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig); + if (ret < 0) { + dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret); + return ret; + } - ret = tegra_qspi_start_rx_dma(tqspi, t, len); - if (ret < 0) { - dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); - if (tqspi->cur_direction & DATA_DIR_TX) - dmaengine_terminate_all(tqspi->tx_dma_chan); - return ret; + ret = tegra_qspi_start_rx_dma(tqspi, t, len); + if (ret < 0) { + dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret); + if (tqspi->cur_direction & DATA_DIR_TX) + dmaengine_terminate_all(tqspi->tx_dma_chan); + return ret; + } + } else { + if (tqspi->is_packed) + rx_dma_phys = t->rx_dma; + else + rx_dma_phys = tqspi->rx_dma_phys; + + tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys), + QSPI_DMA_MEM_ADDRESS); + tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff), + QSPI_DMA_HI_ADDRESS); } } @@ -721,9 +744,6 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) { - if (!tqspi->soc_data->has_dma) - return; - if (tqspi->tx_dma_buf) { dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, tqspi->tx_dma_buf, tqspi->tx_dma_phys); @@ -754,16 +774,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) u32 *dma_buf; int err; - if (!tqspi->soc_data->has_dma) - return 0; + if (tqspi->soc_data->has_ext_dma) { + dma_chan = dma_request_chan(tqspi->dev, "rx"); + if (IS_ERR(dma_chan)) { + err = PTR_ERR(dma_chan); + goto err_out; + } - dma_chan = dma_request_chan(tqspi->dev, "rx"); - if (IS_ERR(dma_chan)) { - err = PTR_ERR(dma_chan); - goto err_out; - } + tqspi->rx_dma_chan = dma_chan; - tqspi->rx_dma_chan = dma_chan; + dma_chan = dma_request_chan(tqspi->dev, "tx"); + if (IS_ERR(dma_chan)) { + err = PTR_ERR(dma_chan); + goto err_out; + } + + tqspi->tx_dma_chan = dma_chan; + } else { + if (!device_iommu_mapped(tqspi->dev)) { + dev_warn(tqspi->dev, + "IOMMU not enabled in device-tree, falling back to PIO mode\n"); + return 0; + } + } dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); if (!dma_buf) { @@ -774,14 +807,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) tqspi->rx_dma_buf = dma_buf; tqspi->rx_dma_phys = dma_phys; - dma_chan = dma_request_chan(tqspi->dev, "tx"); - if (IS_ERR(dma_chan)) { - err = PTR_ERR(dma_chan); - goto err_out; - } - - tqspi->tx_dma_chan = dma_chan; - dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL); if (!dma_buf) { err = -ENOMEM; @@ -1036,10 +1061,6 @@ static u32 tegra_qspi_addr_config(bool is_ddr, u8 bus_width, u8 len) { u32 addr_config = 0; - /* Extract Address configuration and value */ - is_ddr = 0; //Only SDR mode supported - bus_width = 0; //X1 mode - if (is_ddr) addr_config |= QSPI_ADDRESS_SDR_DDR; else @@ -1079,16 +1100,23 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, switch (transfer_phase) { case CMD_TRANSFER: /* X1 SDR mode */ - cmd_config = tegra_qspi_cmd_config(false, 0, + cmd_config = tegra_qspi_cmd_config(false, xfer->tx_nbits, xfer->len); cmd_value = *((const u8 *)(xfer->tx_buf)); break; case ADDR_TRANSFER: /* X1 SDR mode */ - addr_config = tegra_qspi_addr_config(false, 0, + addr_config = tegra_qspi_addr_config(false, xfer->tx_nbits, xfer->len); address_value = *((const u32 *)(xfer->tx_buf)); break; + case DUMMY_TRANSFER: + if (xfer->dummy_data) { + tqspi->dummy_cycles = xfer->len * 8 / xfer->tx_nbits; + break; + } + transfer_phase++; + fallthrough; case DATA_TRANSFER: /* Program Command, Address value in register */ tegra_qspi_writel(tqspi, cmd_value, QSPI_CMB_SEQ_CMD); @@ -1117,18 +1145,17 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, (&tqspi->xfer_completion, QSPI_DMA_TIMEOUT); - if (WARN_ON(ret == 0)) { - dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n", - ret); - if (tqspi->is_curr_dma_xfer && - (tqspi->cur_direction & DATA_DIR_TX)) - dmaengine_terminate_all - (tqspi->tx_dma_chan); - - if (tqspi->is_curr_dma_xfer && - (tqspi->cur_direction & DATA_DIR_RX)) - dmaengine_terminate_all - (tqspi->rx_dma_chan); + if (WARN_ON_ONCE(ret == 0)) { + dev_err_ratelimited(tqspi->dev, + "QSPI Transfer failed with timeout\n"); + if (tqspi->is_curr_dma_xfer) { + if ((tqspi->cur_direction & DATA_DIR_TX) && + tqspi->tx_dma_chan) + dmaengine_terminate_all(tqspi->tx_dma_chan); + if ((tqspi->cur_direction & DATA_DIR_RX) && + tqspi->rx_dma_chan) + dmaengine_terminate_all(tqspi->rx_dma_chan); + } /* Abort transfer by resetting pio/dma bit */ if (!tqspi->is_curr_dma_xfer) { @@ -1163,26 +1190,22 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, ret = -EIO; goto exit; } - if (!xfer->cs_change) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } break; default: ret = -EINVAL; goto exit; } msg->actual_length += xfer->len; + if (!xfer->cs_change && transfer_phase == DATA_TRANSFER) { + tegra_qspi_transfer_end(spi); + spi_transfer_delay_exec(xfer); + } transfer_phase++; } ret = 0; exit: msg->status = ret; - if (ret < 0) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } return ret; } @@ -1247,10 +1270,12 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi, QSPI_DMA_TIMEOUT); if (WARN_ON(ret == 0)) { dev_err(tqspi->dev, "transfer timeout\n"); - if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) - dmaengine_terminate_all(tqspi->tx_dma_chan); - if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX)) - dmaengine_terminate_all(tqspi->rx_dma_chan); + if (tqspi->is_curr_dma_xfer) { + if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan) + dmaengine_terminate_all(tqspi->tx_dma_chan); + if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan) + dmaengine_terminate_all(tqspi->rx_dma_chan); + } tegra_qspi_handle_error(tqspi); ret = -EIO; goto complete_xfer; @@ -1300,7 +1325,9 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, list_for_each_entry(xfer, &msg->transfers, transfer_list) { transfer_count++; } - if (!tqspi->soc_data->cmb_xfer_capable || transfer_count != 3) + if (!tqspi->soc_data->cmb_xfer_capable) + return false; + if (transfer_count > 4 || transfer_count < 3) return false; xfer = list_first_entry(&msg->transfers, typeof(*xfer), transfer_list); @@ -1310,7 +1337,14 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, if (xfer->len > 4 || xfer->len < 3) return false; xfer = list_next_entry(xfer, transfer_list); - if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) + if (transfer_count == 4) { + if (xfer->dummy_data != 1) + return false; + if ((xfer->len * 8 / xfer->tx_nbits) > QSPI_DUMMY_CYCLES_MAX) + return false; + xfer = list_next_entry(xfer, transfer_list); + } + if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) return false; return true; @@ -1371,41 +1405,43 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) unsigned int total_fifo_words; unsigned long flags; long wait_status; - int err = 0; + int num_errors = 0; if (tqspi->cur_direction & DATA_DIR_TX) { if (tqspi->tx_status) { - dmaengine_terminate_all(tqspi->tx_dma_chan); - err += 1; - } else { + if (tqspi->tx_dma_chan) + dmaengine_terminate_all(tqspi->tx_dma_chan); + num_errors++; + } else if (tqspi->tx_dma_chan) { wait_status = wait_for_completion_interruptible_timeout( &tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT); if (wait_status <= 0) { dmaengine_terminate_all(tqspi->tx_dma_chan); dev_err(tqspi->dev, "failed TX DMA transfer\n"); - err += 1; + num_errors++; } } } if (tqspi->cur_direction & DATA_DIR_RX) { if (tqspi->rx_status) { - dmaengine_terminate_all(tqspi->rx_dma_chan); - err += 2; - } else { + if (tqspi->rx_dma_chan) + dmaengine_terminate_all(tqspi->rx_dma_chan); + num_errors++; + } else if (tqspi->rx_dma_chan) { wait_status = wait_for_completion_interruptible_timeout( &tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT); if (wait_status <= 0) { dmaengine_terminate_all(tqspi->rx_dma_chan); dev_err(tqspi->dev, "failed RX DMA transfer\n"); - err += 2; + num_errors++; } } } spin_lock_irqsave(&tqspi->lock, flags); - if (err) { + if (num_errors) { tegra_qspi_dma_unmap_xfer(tqspi, t); tegra_qspi_handle_error(tqspi); complete(&tqspi->xfer_completion); @@ -1431,9 +1467,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi) /* continue transfer in current message */ total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t); if (total_fifo_words > QSPI_FIFO_DEPTH) - err = tegra_qspi_start_dma_based_transfer(tqspi, t); + num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t); else - err = tegra_qspi_start_cpu_based_transfer(tqspi, t); + num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t); exit: spin_unlock_irqrestore(&tqspi->lock, flags); @@ -1461,28 +1497,28 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data) } static struct tegra_qspi_soc_data tegra210_qspi_soc_data = { - .has_dma = true, + .has_ext_dma = true, .cmb_xfer_capable = false, .supports_tpm = false, .cs_count = 1, }; static struct tegra_qspi_soc_data tegra186_qspi_soc_data = { - .has_dma = true, + .has_ext_dma = true, .cmb_xfer_capable = true, .supports_tpm = false, .cs_count = 1, }; static struct tegra_qspi_soc_data tegra234_qspi_soc_data = { - .has_dma = false, + .has_ext_dma = false, .cmb_xfer_capable = true, .supports_tpm = true, .cs_count = 1, }; static struct tegra_qspi_soc_data tegra241_qspi_soc_data = { - .has_dma = false, + .has_ext_dma = true, .cmb_xfer_capable = true, .supports_tpm = true, .cs_count = 4, diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c index 3bd0149d8f4e..1a40c4866ce1 100644 --- a/drivers/spi/spi-xcomm.c +++ b/drivers/spi/spi-xcomm.c @@ -44,8 +44,8 @@ struct spi_xcomm { u8 buf[63]; }; -static void spi_xcomm_gpio_set_value(struct gpio_chip *chip, - unsigned int offset, int val) +static int spi_xcomm_gpio_set_value(struct gpio_chip *chip, + unsigned int offset, int val) { struct spi_xcomm *spi_xcomm = gpiochip_get_data(chip); unsigned char buf[2]; @@ -53,7 +53,7 @@ static void spi_xcomm_gpio_set_value(struct gpio_chip *chip, buf[0] = SPI_XCOMM_CMD_GPIO_SET; buf[1] = !!val; - i2c_master_send(spi_xcomm->i2c, buf, 2); + return i2c_master_send(spi_xcomm->i2c, buf, 2); } static int spi_xcomm_gpio_get_direction(struct gpio_chip *chip, @@ -70,7 +70,7 @@ static int spi_xcomm_gpio_add(struct spi_xcomm *spi_xcomm) return 0; spi_xcomm->gc.get_direction = spi_xcomm_gpio_get_direction; - spi_xcomm->gc.set = spi_xcomm_gpio_set_value; + spi_xcomm->gc.set_rv = spi_xcomm_gpio_set_value; spi_xcomm->gc.can_sleep = 1; spi_xcomm->gc.base = -1; spi_xcomm->gc.ngpio = 1; diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index 2bd25c75f881..5232483c4a3a 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -540,10 +540,6 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem, int err = 0, i; u8 *tmpbuf; - dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth); - zynq_qspi_chipselect(mem->spi, true); zynq_qspi_config_op(xqspi, mem->spi, op); diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index d800d79f62a7..595b6dc10845 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -82,7 +82,6 @@ #define GQSPI_GENFIFO_RX 0x00020000 #define GQSPI_GENFIFO_STRIPE 0x00040000 #define GQSPI_GENFIFO_POLL 0x00080000 -#define GQSPI_GENFIFO_EXP_START 0x00000100 #define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004 #define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002 #define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001 @@ -580,6 +579,8 @@ static int zynqmp_qspi_config_op(struct zynqmp_qspi *xqspi, zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); zynqmp_qspi_set_tapdelay(xqspi, baud_rate_val); } + + dev_dbg(xqspi->dev, "config speed %u\n", req_speed_hz); return 0; } @@ -670,69 +671,77 @@ static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size) static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits, u32 genfifoentry) { - u32 transfer_len = 0; + u32 transfer_len, tempcount, exponent; + u8 imm_data; - if (xqspi->txbuf) { - genfifoentry &= ~GQSPI_GENFIFO_RX; - genfifoentry |= GQSPI_GENFIFO_DATA_XFER; - genfifoentry |= GQSPI_GENFIFO_TX; - transfer_len = xqspi->bytes_to_transfer; - } else if (xqspi->rxbuf) { - genfifoentry &= ~GQSPI_GENFIFO_TX; - genfifoentry |= GQSPI_GENFIFO_DATA_XFER; + genfifoentry |= GQSPI_GENFIFO_DATA_XFER; + if (xqspi->rxbuf) { genfifoentry |= GQSPI_GENFIFO_RX; if (xqspi->mode == GQSPI_MODE_DMA) transfer_len = xqspi->dma_rx_bytes; else transfer_len = xqspi->bytes_to_receive; } else { - /* Sending dummy circles here */ - genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX); - genfifoentry |= GQSPI_GENFIFO_DATA_XFER; transfer_len = xqspi->bytes_to_transfer; } + + if (xqspi->txbuf) + genfifoentry |= GQSPI_GENFIFO_TX; + genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits); xqspi->genfifoentry = genfifoentry; - - if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) { - genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK; - genfifoentry |= transfer_len; - zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry); - } else { - int tempcount = transfer_len; - u32 exponent = 8; /* 2^8 = 256 */ - u8 imm_data = tempcount & 0xFF; - - tempcount &= ~(tempcount & 0xFF); - /* Immediate entry */ - if (tempcount != 0) { - /* Exponent entries */ - genfifoentry |= GQSPI_GENFIFO_EXP; - while (tempcount != 0) { - if (tempcount & GQSPI_GENFIFO_EXP_START) { - genfifoentry &= - ~GQSPI_GENFIFO_IMM_DATA_MASK; - genfifoentry |= exponent; - zynqmp_gqspi_write(xqspi, - GQSPI_GEN_FIFO_OFST, - genfifoentry); - } - tempcount = tempcount >> 1; - exponent++; - } - } - if (imm_data != 0) { - genfifoentry &= ~GQSPI_GENFIFO_EXP; - genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK; - genfifoentry |= (u8)(imm_data & 0xFF); + dev_dbg(xqspi->dev, "genfifo %05x transfer_len %u\n", + genfifoentry, transfer_len); + + /* Exponent entries */ + imm_data = transfer_len; + tempcount = transfer_len >> 8; + exponent = 8; + genfifoentry |= GQSPI_GENFIFO_EXP; + while (tempcount) { + if (tempcount & 1) zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, - genfifoentry); - } - } - if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) { - /* Dummy generic FIFO entry */ - zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0); + genfifoentry | exponent); + tempcount >>= 1; + exponent++; } + + /* Immediate entry */ + genfifoentry &= ~GQSPI_GENFIFO_EXP; + if (imm_data) + zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, + genfifoentry | imm_data); + + /* Dummy generic FIFO entry */ + if (xqspi->mode == GQSPI_MODE_IO && xqspi->rxbuf) + zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0); +} + +/** + * zynqmp_qspi_disable_dma() - Disable DMA mode + * @xqspi: GQSPI instance + */ +static void zynqmp_qspi_disable_dma(struct zynqmp_qspi *xqspi) +{ + u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); + + config_reg &= ~GQSPI_CFG_MODE_EN_MASK; + zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); + xqspi->mode = GQSPI_MODE_IO; +} + +/** + * zynqmp_qspi_enable_dma() - Enable DMA mode + * @xqspi: GQSPI instance + */ +static void zynqmp_qspi_enable_dma(struct zynqmp_qspi *xqspi) +{ + u32 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); + + config_reg &= ~GQSPI_CFG_MODE_EN_MASK; + config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK; + zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); + xqspi->mode = GQSPI_MODE_DMA; } /** @@ -744,7 +753,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits, */ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) { - u32 config_reg, genfifoentry; + u32 genfifoentry; dma_unmap_single(xqspi->dev, xqspi->dma_addr, xqspi->dma_rx_bytes, DMA_FROM_DEVICE); @@ -758,9 +767,7 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) if (xqspi->bytes_to_receive > 0) { /* Switch to IO mode,for remaining bytes to receive */ - config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); + zynqmp_qspi_disable_dma(xqspi); /* Initiate the transfer of remaining bytes */ genfifoentry = xqspi->genfifoentry; @@ -799,7 +806,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) { struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id; - irqreturn_t ret = IRQ_NONE; u32 status, mask, dma_status = 0; status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST); @@ -814,27 +820,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) dma_status); } - if (mask & GQSPI_ISR_TXNOT_FULL_MASK) { + if (!mask && !dma_status) + return IRQ_NONE; + + if (mask & GQSPI_ISR_TXNOT_FULL_MASK) zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL); - ret = IRQ_HANDLED; - } - if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) { + if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) zynqmp_process_dma_irq(xqspi); - ret = IRQ_HANDLED; - } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && - (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) { + else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && + (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL); - ret = IRQ_HANDLED; - } if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) { zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK); complete(&xqspi->data_completion); - ret = IRQ_HANDLED; } - return ret; + return IRQ_HANDLED; } /** @@ -845,17 +848,14 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) */ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi) { - u32 rx_bytes, rx_rem, config_reg; + u32 rx_bytes, rx_rem; dma_addr_t addr; u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf; if (xqspi->bytes_to_receive < 8 || ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) { /* Setting to IO mode */ - config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); - xqspi->mode = GQSPI_MODE_IO; + zynqmp_qspi_disable_dma(xqspi); xqspi->dma_rx_bytes = 0; return 0; } @@ -878,14 +878,7 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi) zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST, ((u32)addr) & 0xfff); - /* Enabling the DMA mode */ - config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg); - - /* Switch to DMA mode */ - xqspi->mode = GQSPI_MODE_DMA; + zynqmp_qspi_enable_dma(xqspi); /* Write the number of bytes to transfer */ zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes); @@ -905,18 +898,10 @@ static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi) static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits, u32 genfifoentry) { - u32 config_reg; - zynqmp_qspi_fillgenfifo(xqspi, tx_nbits, genfifoentry); zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH); - if (xqspi->mode == GQSPI_MODE_DMA) { - config_reg = zynqmp_gqspi_read(xqspi, - GQSPI_CONFIG_OFST); - config_reg &= ~GQSPI_CFG_MODE_EN_MASK; - zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, - config_reg); - xqspi->mode = GQSPI_MODE_IO; - } + if (xqspi->mode == GQSPI_MODE_DMA) + zynqmp_qspi_disable_dma(xqspi); } /** @@ -1059,18 +1044,14 @@ static unsigned long zynqmp_qspi_timeout(struct zynqmp_qspi *xqspi, u8 bits, static int zynqmp_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct zynqmp_qspi *xqspi = spi_controller_get_devdata - (mem->spi->controller); + struct zynqmp_qspi *xqspi = + spi_controller_get_devdata(mem->spi->controller); unsigned long timeout; int err = 0, i; u32 genfifoentry = 0; u16 opcode = op->cmd.opcode; u64 opaddr; - dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n", - op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, - op->dummy.buswidth, op->data.buswidth); - mutex_lock(&xqspi->op_lock); zynqmp_qspi_config_op(xqspi, op); zynqmp_qspi_chipselect(mem->spi, false); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ff07c87dbadc..0ffa3f9f2870 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -31,6 +31,7 @@ #include <linux/ptp_clock_kernel.h> #include <linux/sched/rt.h> #include <linux/slab.h> +#include <linux/spi/offload/types.h> #include <linux/spi/spi.h> #include <linux/spi/spi-mem.h> #include <uapi/linux/sched/types.h> @@ -42,7 +43,7 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); #include "internals.h" -static DEFINE_IDR(spi_master_idr); +static DEFINE_IDR(spi_controller_idr); static void spidev_release(struct device *dev) { @@ -305,7 +306,7 @@ static const struct attribute_group spi_controller_statistics_group = { .attrs = spi_controller_statistics_attrs, }; -static const struct attribute_group *spi_master_groups[] = { +static const struct attribute_group *spi_controller_groups[] = { &spi_controller_statistics_group, NULL, }; @@ -1075,10 +1076,8 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) * Avoid calling into the driver (or doing delays) if the chip select * isn't actually changing from the last time this was called. */ - if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && - spi_is_last_cs(spi)) || - (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask && - !spi_is_last_cs(spi))) && + if (!force && (enable == spi_is_last_cs(spi)) && + (spi->controller->last_cs_index_mask == spi->cs_index_mask) && (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) return; @@ -1087,9 +1086,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi->controller->last_cs_index_mask = spi->cs_index_mask; for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS; - spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; - if (spi->mode & SPI_CS_HIGH) + spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; + if (spi->controller->last_cs_mode_high) enable = !enable; /* @@ -1106,7 +1105,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi_toggle_csgpiod(spi, idx, enable, activate); } } - /* Some SPI masters need both GPIO CS & slave_select */ + /* Some SPI controllers need both GPIO CS & ->set_cs() */ if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && spi->controller->set_cs) spi->controller->set_cs(spi, !enable); @@ -1495,10 +1494,7 @@ static void _spi_transfer_delay_ns(u32 ns) } else { u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); - if (us <= 10) - udelay(us); - else - usleep_range(us, us + DIV_ROUND_UP(us, 10)); + fsleep(us); } } @@ -2534,7 +2530,7 @@ err_out: * @ctlr: Pointer to spi_controller device * * Registers an spi_device for each child node of controller node which - * represents a valid SPI slave. + * represents a valid SPI target device. */ static void of_register_spi_devices(struct spi_controller *ctlr) { @@ -2819,7 +2815,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, if (!lookup.max_speed_hz && ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { - /* Apple does not use _CRS but nested devices for SPI slaves */ + /* Apple does not use _CRS but nested devices for SPI target devices */ acpi_spi_parse_apple_properties(adev, &lookup); } @@ -2911,7 +2907,7 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr) SPI_ACPI_ENUMERATE_MAX_DEPTH, acpi_spi_add_device, NULL, ctlr, NULL); if (ACPI_FAILURE(status)) - dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); + dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n"); } #else static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} @@ -2925,16 +2921,15 @@ static void spi_controller_release(struct device *dev) kfree(ctlr); } -static const struct class spi_master_class = { +static const struct class spi_controller_class = { .name = "spi_master", .dev_release = spi_controller_release, - .dev_groups = spi_master_groups, + .dev_groups = spi_controller_groups, }; #ifdef CONFIG_SPI_SLAVE /** - * spi_target_abort - abort the ongoing transfer request on an SPI slave - * controller + * spi_target_abort - abort the ongoing transfer request on an SPI target controller * @spi: device used for the current transfer */ int spi_target_abort(struct spi_device *spi) @@ -2979,13 +2974,13 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr, child = device_find_any_child(&ctlr->dev); if (child) { - /* Remove registered slave */ + /* Remove registered target device */ device_unregister(child); put_device(child); } if (strcmp(name, "(null)")) { - /* Register new slave */ + /* Register new target device */ spi = spi_alloc_device(ctlr); if (!spi) return -ENOMEM; @@ -3004,40 +2999,40 @@ static ssize_t slave_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(slave); -static struct attribute *spi_slave_attrs[] = { +static struct attribute *spi_target_attrs[] = { &dev_attr_slave.attr, NULL, }; -static const struct attribute_group spi_slave_group = { - .attrs = spi_slave_attrs, +static const struct attribute_group spi_target_group = { + .attrs = spi_target_attrs, }; -static const struct attribute_group *spi_slave_groups[] = { +static const struct attribute_group *spi_target_groups[] = { &spi_controller_statistics_group, - &spi_slave_group, + &spi_target_group, NULL, }; -static const struct class spi_slave_class = { +static const struct class spi_target_class = { .name = "spi_slave", .dev_release = spi_controller_release, - .dev_groups = spi_slave_groups, + .dev_groups = spi_target_groups, }; #else -extern struct class spi_slave_class; /* dummy */ +extern struct class spi_target_class; /* dummy */ #endif /** - * __spi_alloc_controller - allocate an SPI master or slave controller + * __spi_alloc_controller - allocate an SPI host or target controller * @dev: the controller, possibly using the platform_bus * @size: how much zeroed driver-private data to allocate; the pointer to this * memory is in the driver_data field of the returned device, accessible * with spi_controller_get_devdata(); the memory is cacheline aligned; * drivers granting DMA access to portions of their private data need to * round up @size using ALIGN(size, dma_get_cache_alignment()). - * @slave: flag indicating whether to allocate an SPI master (false) or SPI - * slave (true) controller + * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true) + * controller * Context: can sleep * * This call is used only by SPI controller drivers, which are the @@ -3054,7 +3049,7 @@ extern struct class spi_slave_class; /* dummy */ * Return: the SPI controller structure on success, else NULL. */ struct spi_controller *__spi_alloc_controller(struct device *dev, - unsigned int size, bool slave) + unsigned int size, bool target) { struct spi_controller *ctlr; size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); @@ -3075,11 +3070,11 @@ struct spi_controller *__spi_alloc_controller(struct device *dev, mutex_init(&ctlr->add_lock); ctlr->bus_num = -1; ctlr->num_chipselect = 1; - ctlr->slave = slave; - if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave) - ctlr->dev.class = &spi_slave_class; + ctlr->target = target; + if (IS_ENABLED(CONFIG_SPI_SLAVE) && target) + ctlr->dev.class = &spi_target_class; else - ctlr->dev.class = &spi_master_class; + ctlr->dev.class = &spi_controller_class; ctlr->dev.parent = dev; pm_suspend_ignore_children(&ctlr->dev, true); spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); @@ -3097,7 +3092,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr) * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() * @dev: physical device of SPI controller * @size: how much zeroed driver-private data to allocate - * @slave: whether to allocate an SPI master (false) or SPI slave (true) + * @target: whether to allocate an SPI host (false) or SPI target (true) controller * Context: can sleep * * Allocate an SPI controller and automatically release a reference on it @@ -3110,7 +3105,7 @@ static void devm_spi_release_controller(struct device *dev, void *ctlr) */ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, unsigned int size, - bool slave) + bool target) { struct spi_controller **ptr, *ctlr; @@ -3119,7 +3114,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, if (!ptr) return NULL; - ctlr = __spi_alloc_controller(dev, size, slave); + ctlr = __spi_alloc_controller(dev, size, target); if (ctlr) { ctlr->devm_allocated = true; *ptr = ctlr; @@ -3133,8 +3128,8 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); /** - * spi_get_gpio_descs() - grab chip select GPIOs for the master - * @ctlr: The SPI master to grab GPIO descriptors for + * spi_get_gpio_descs() - grab chip select GPIOs for the controller + * @ctlr: The SPI controller to grab GPIO descriptors for */ static int spi_get_gpio_descs(struct spi_controller *ctlr) { @@ -3232,7 +3227,7 @@ static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int e int id; mutex_lock(&board_lock); - id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL); + id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL); mutex_unlock(&board_lock); if (WARN(id < 0, "couldn't get idr")) return id == -ENOSPC ? -EBUSY : id; @@ -3381,7 +3376,7 @@ destroy_queue: spi_destroy_queue(ctlr); free_bus_id: mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); + idr_remove(&spi_controller_idr, ctlr->bus_num); mutex_unlock(&board_lock); return status; } @@ -3393,8 +3388,7 @@ static void devm_spi_unregister(struct device *dev, void *res) } /** - * devm_spi_register_controller - register managed SPI host or target - * controller + * devm_spi_register_controller - register managed SPI host or target controller * @dev: device managing SPI controller * @ctlr: initialized controller, originally from spi_alloc_host() or * spi_alloc_target() @@ -3434,7 +3428,7 @@ static int __unregister(struct device *dev, void *null) } /** - * spi_unregister_controller - unregister SPI master or slave controller + * spi_unregister_controller - unregister SPI host or target controller * @ctlr: the controller being unregistered * Context: can sleep * @@ -3458,7 +3452,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) /* First make sure that this controller was ever added */ mutex_lock(&board_lock); - found = idr_find(&spi_master_idr, id); + found = idr_find(&spi_controller_idr, id); mutex_unlock(&board_lock); if (ctlr->queued) { if (spi_destroy_queue(ctlr)) @@ -3473,7 +3467,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) /* Free bus id */ mutex_lock(&board_lock); if (found == ctlr) - idr_remove(&spi_master_idr, id); + idr_remove(&spi_controller_idr, id); mutex_unlock(&board_lock); if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) @@ -3806,7 +3800,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr, size_t maxsize; int ret; - maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word)); + maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word); if (xfer->len > maxsize) { ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, maxsize); @@ -4100,6 +4094,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) return -EINVAL; + /* DDR mode is supported only if controller has dtr_caps=true. + * default considered as SDR mode for SPI and QSPI controller. + * Note: This is applicable only to QSPI controller. + */ + if (xfer->dtr_mode && !ctlr->dtr_caps) + return -EINVAL; + /* * SPI transfer length should be multiple of SPI word size * where SPI word size should be power-of-two multiple. @@ -4137,10 +4138,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->tx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) + !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_TX_QUAD)) + !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) + return -EINVAL; + if ((xfer->tx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_TX_OCTAL)) return -EINVAL; } /* Check transfer rx_nbits */ @@ -4153,15 +4157,27 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->rx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) + !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_RX_QUAD)) + !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL))) + return -EINVAL; + if ((xfer->rx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_RX_OCTAL)) return -EINVAL; } if (_spi_xfer_word_delay_update(xfer, spi)) return -EINVAL; + + /* Make sure controller supports required offload features. */ + if (xfer->offload_flags) { + if (!message->offload) + return -EINVAL; + + if (xfer->offload_flags & ~message->offload->xfer_flags) + return -EINVAL; + } } message->status = -EINPROGRESS; @@ -4617,7 +4633,7 @@ EXPORT_SYMBOL_GPL(spi_sync_locked); /** * spi_bus_lock - obtain a lock for exclusive SPI bus usage - * @ctlr: SPI bus master that should be locked for exclusive bus access + * @ctlr: SPI bus controller that should be locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep @@ -4648,7 +4664,7 @@ EXPORT_SYMBOL_GPL(spi_bus_lock); /** * spi_bus_unlock - release the lock for exclusive SPI bus usage - * @ctlr: SPI bus master that was locked for exclusive bus access + * @ctlr: SPI bus controller that was locked for exclusive bus access * Context: can sleep * * This call may only be used from a context that may sleep. The sleep @@ -4765,9 +4781,9 @@ static struct spi_controller *of_find_spi_controller_by_node(struct device_node { struct device *dev; - dev = class_find_device_by_of_node(&spi_master_class, node); + dev = class_find_device_by_of_node(&spi_controller_class, node); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) - dev = class_find_device_by_of_node(&spi_slave_class, node); + dev = class_find_device_by_of_node(&spi_target_class, node); if (!dev) return NULL; @@ -4847,10 +4863,10 @@ struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev { struct device *dev; - dev = class_find_device(&spi_master_class, NULL, adev, + dev = class_find_device(&spi_controller_class, NULL, adev, spi_acpi_controller_match); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) - dev = class_find_device(&spi_slave_class, NULL, adev, + dev = class_find_device(&spi_target_class, NULL, adev, spi_acpi_controller_match); if (!dev) return NULL; @@ -4920,12 +4936,12 @@ static int __init spi_init(void) if (status < 0) goto err1; - status = class_register(&spi_master_class); + status = class_register(&spi_controller_class); if (status < 0) goto err2; if (IS_ENABLED(CONFIG_SPI_SLAVE)) { - status = class_register(&spi_slave_class); + status = class_register(&spi_target_class); if (status < 0) goto err3; } @@ -4938,7 +4954,7 @@ static int __init spi_init(void) return 0; err3: - class_unregister(&spi_master_class); + class_unregister(&spi_controller_class); err2: bus_unregister(&spi_bus_type); err1: diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 58ae4304fdab..6108959c28d9 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -706,6 +706,7 @@ static const struct spi_device_id spidev_spi_ids[] = { { .name = /* cisco */ "spi-petra" }, { .name = /* dh */ "dhcom-board" }, { .name = /* elgin */ "jg10309-01" }, + { .name = /* gocontroll */ "moduline-module-slot"}, { .name = /* lineartechnology */ "ltc2488" }, { .name = /* lwn */ "bk4" }, { .name = /* lwn */ "bk4-spi" }, @@ -737,6 +738,7 @@ static const struct of_device_id spidev_dt_ids[] = { { .compatible = "cisco,spi-petra", .data = &spidev_of_check }, { .compatible = "dh,dhcom-board", .data = &spidev_of_check }, { .compatible = "elgin,jg10309-01", .data = &spidev_of_check }, + { .compatible = "gocontroll,moduline-module-slot", .data = &spidev_of_check}, { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check }, { .compatible = "lwn,bk4", .data = &spidev_of_check }, { .compatible = "lwn,bk4-spi", .data = &spidev_of_check }, |