summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig37
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/atmel-quadspi.c130
-rw-r--r--drivers/spi/spi-ar934x.c235
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c1
-rw-r--r--drivers/spi/spi-efm32.c44
-rw-r--r--drivers/spi/spi-fsi.c558
-rw-r--r--drivers/spi/spi-fsl-dspi.c732
-rw-r--r--drivers/spi/spi-fsl-lpspi.c9
-rw-r--r--drivers/spi/spi-fsl-qspi.c4
-rw-r--r--drivers/spi/spi-geni-qcom.c26
-rw-r--r--drivers/spi/spi-hisi-sfc-v3xx.c99
-rw-r--r--drivers/spi/spi-mem.c7
-rw-r--r--drivers/spi/spi-meson-spicc.c496
-rw-r--r--drivers/spi/spi-mtk-nor.c689
-rw-r--r--drivers/spi/spi-mux.c187
-rw-r--r--drivers/spi/spi-mxs.c3
-rw-r--r--drivers/spi/spi-nxp-fspi.c63
-rw-r--r--drivers/spi/spi-omap2-mcspi.c103
-rw-r--r--drivers/spi/spi-pxa2xx.c56
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-rockchip.c5
-rw-r--r--drivers/spi/spi-rspi.c44
-rw-r--r--drivers/spi/spi-s3c24xx.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c31
-rw-r--r--drivers/spi/spi-stm32.c62
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c3
-rw-r--r--drivers/spi/spi.c66
-rw-r--r--drivers/spi/spidev.c28
29 files changed, 3095 insertions, 640 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index d6ed0c355954..efce98e9844e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -62,6 +62,13 @@ config SPI_ALTERA
help
This is the driver for the Altera SPI Controller.
+config SPI_AR934X
+ tristate "Qualcomm Atheros AR934X/QCA95XX SPI controller driver"
+ depends on ATH79 || COMPILE_TEST
+ help
+ This enables support for the SPI controller present on the
+ Qualcomm Atheros AR934X/QCA95XX SoCs.
+
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
depends on ATH79 || COMPILE_TEST
@@ -264,6 +271,13 @@ config SPI_FALCON
has only been tested with m25p80 type chips. The hardware has no
support for other types of SPI peripherals.
+config SPI_FSI
+ tristate "FSI SPI driver"
+ depends on FSI
+ help
+ This enables support for the driver for FSI bus attached SPI
+ controllers.
+
config SPI_FSL_LPSPI
tristate "Freescale i.MX LPSPI controller"
depends on ARCH_MXC || COMPILE_TEST
@@ -285,7 +299,6 @@ config SPI_HISI_SFC_V3XX
tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets"
depends on (ARM64 && ACPI) || COMPILE_TEST
depends on HAS_IOMEM
- select CONFIG_MTD_SPI_NOR
help
This enables support for HiSilicon v3xx SPI-NOR flash controller
found in hi16xx chipsets.
@@ -415,6 +428,7 @@ config SPI_FSL_ESPI
config SPI_MESON_SPICC
tristate "Amlogic Meson SPICC controller"
+ depends on COMMON_CLK
depends on ARCH_MESON || COMPILE_TEST
help
This enables master mode support for the SPICC (SPI communication
@@ -443,6 +457,16 @@ config SPI_MT7621
help
This selects a driver for the MediaTek MT7621 SPI Controller.
+config SPI_MTK_NOR
+ tristate "MediaTek SPI NOR controller"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ This enables support for SPI NOR controller found on MediaTek
+ ARM SoCs. This is a controller specifically for SPI-NOR flash.
+ It can perform generic SPI transfers up to 6 bytes via generic
+ SPI interface as well as several SPI-NOR specific instructions
+ via SPI MEM interface.
+
config SPI_NPCM_FIU
tristate "Nuvoton NPCM FLASH Interface Unit"
depends on ARCH_NPCM || COMPILE_TEST
@@ -890,6 +914,17 @@ config SPI_ZYNQMP_GQSPI
# Add new SPI master controllers in alphabetical order above this line
#
+comment "SPI Multiplexer support"
+
+config SPI_MUX
+ tristate "SPI multiplexer support"
+ select MULTIPLEXER
+ help
+ This adds support for SPI multiplexers. Each SPI mux will be
+ accessible as a SPI controller, the devices behind the mux will appear
+ to be chip selects on this controller. It is still necessary to
+ select one or more specific mux-controller drivers.
+
#
# There are lots of SPI device types, with sensors and memory
# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 9b65ec5afc5e..28f601327f8c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -9,11 +9,13 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
# config declarations into driver model code
obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
+obj-$(CONFIG_SPI_MUX) += spi-mux.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
# SPI master controller drivers (bus)
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
+obj-$(CONFIG_SPI_AR934X) += spi-ar934x.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
@@ -40,6 +42,7 @@ spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
+obj-$(CONFIG_SPI_FSI) += spi-fsi.o
obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
@@ -62,6 +65,7 @@ obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
+obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index fd8007ebb145..cb44d1e169aa 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -149,6 +149,7 @@ struct atmel_qspi {
struct clk *qspick;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
+ resource_size_t mmap_size;
u32 pending;
u32 mr;
u32 scr;
@@ -172,6 +173,81 @@ static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
+#ifdef VERBOSE_DEBUG
+static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz)
+{
+ switch (offset) {
+ case QSPI_CR:
+ return "CR";
+ case QSPI_MR:
+ return "MR";
+ case QSPI_RD:
+ return "MR";
+ case QSPI_TD:
+ return "TD";
+ case QSPI_SR:
+ return "SR";
+ case QSPI_IER:
+ return "IER";
+ case QSPI_IDR:
+ return "IDR";
+ case QSPI_IMR:
+ return "IMR";
+ case QSPI_SCR:
+ return "SCR";
+ case QSPI_IAR:
+ return "IAR";
+ case QSPI_ICR:
+ return "ICR/WICR";
+ case QSPI_IFR:
+ return "IFR";
+ case QSPI_RICR:
+ return "RICR";
+ case QSPI_SMR:
+ return "SMR";
+ case QSPI_SKR:
+ return "SKR";
+ case QSPI_WPMR:
+ return "WPMR";
+ case QSPI_WPSR:
+ return "WPSR";
+ case QSPI_VERSION:
+ return "VERSION";
+ default:
+ snprintf(tmp, sz, "0x%02x", offset);
+ break;
+ }
+
+ return tmp;
+}
+#endif /* VERBOSE_DEBUG */
+
+static u32 atmel_qspi_read(struct atmel_qspi *aq, u32 offset)
+{
+ u32 value = readl_relaxed(aq->regs + offset);
+
+#ifdef VERBOSE_DEBUG
+ char tmp[8];
+
+ dev_vdbg(&aq->pdev->dev, "read 0x%08x from %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ return value;
+}
+
+static void atmel_qspi_write(u32 value, struct atmel_qspi *aq, u32 offset)
+{
+#ifdef VERBOSE_DEBUG
+ char tmp[8];
+
+ dev_vdbg(&aq->pdev->dev, "write 0x%08x into %s\n", value,
+ atmel_qspi_reg_name(offset, tmp, sizeof(tmp)));
+#endif /* VERBOSE_DEBUG */
+
+ writel_relaxed(value, aq->regs + offset);
+}
+
static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
const struct atmel_qspi_mode *mode)
{
@@ -292,32 +368,32 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
* Serial Memory Mode (SMM).
*/
if (aq->mr != QSPI_MR_SMM) {
- writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
aq->mr = QSPI_MR_SMM;
}
/* Clear pending interrupts */
- (void)readl_relaxed(aq->regs + QSPI_SR);
+ (void)atmel_qspi_read(aq, QSPI_SR);
if (aq->caps->has_ricr) {
if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN)
ifr |= QSPI_IFR_APBTFRTYP_READ;
/* Set QSPI Instruction Frame registers */
- writel_relaxed(iar, aq->regs + QSPI_IAR);
+ atmel_qspi_write(iar, aq, QSPI_IAR);
if (op->data.dir == SPI_MEM_DATA_IN)
- writel_relaxed(icr, aq->regs + QSPI_RICR);
+ atmel_qspi_write(icr, aq, QSPI_RICR);
else
- writel_relaxed(icr, aq->regs + QSPI_WICR);
- writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ atmel_qspi_write(icr, aq, QSPI_WICR);
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
} else {
if (op->data.dir == SPI_MEM_DATA_OUT)
ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
/* Set QSPI Instruction Frame registers */
- writel_relaxed(iar, aq->regs + QSPI_IAR);
- writel_relaxed(icr, aq->regs + QSPI_ICR);
- writel_relaxed(ifr, aq->regs + QSPI_IFR);
+ atmel_qspi_write(iar, aq, QSPI_IAR);
+ atmel_qspi_write(icr, aq, QSPI_ICR);
+ atmel_qspi_write(ifr, aq, QSPI_IFR);
}
return 0;
@@ -329,6 +405,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u32 sr, offset;
int err;
+ /*
+ * Check if the address exceeds the MMIO window size. An improvement
+ * would be to add support for regular SPI mode and fall back to it
+ * when the flash memories overrun the controller's memory space.
+ */
+ if (op->addr.val + op->data.nbytes > aq->mmap_size)
+ return -ENOTSUPP;
+
err = atmel_qspi_set_cfg(aq, op, &offset);
if (err)
return err;
@@ -336,7 +420,7 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
/* Skip to the final steps if there is no data */
if (op->data.nbytes) {
/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)readl_relaxed(aq->regs + QSPI_IFR);
+ (void)atmel_qspi_read(aq, QSPI_IFR);
/* Send/Receive data */
if (op->data.dir == SPI_MEM_DATA_IN)
@@ -347,22 +431,22 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
op->data.nbytes);
/* Release the chip-select */
- writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR);
}
/* Poll INSTRuction End status */
- sr = readl_relaxed(aq->regs + QSPI_SR);
+ sr = atmel_qspi_read(aq, QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
return err;
/* Wait for INSTRuction End interrupt */
reinit_completion(&aq->cmd_completion);
aq->pending = sr & QSPI_SR_CMD_COMPLETED;
- writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER);
+ atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER);
if (!wait_for_completion_timeout(&aq->cmd_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
- writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR);
+ atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR);
return err;
}
@@ -401,7 +485,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
scbr--;
aq->scr = QSPI_SCR_SCBR(scbr);
- writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
return 0;
}
@@ -409,14 +493,14 @@ static int atmel_qspi_setup(struct spi_device *spi)
static void atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
- writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
- writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
+ atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR);
aq->mr = QSPI_MR_SMM;
/* Enable the QSPI controller */
- writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR);
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
@@ -424,8 +508,8 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
struct atmel_qspi *aq = dev_id;
u32 status, mask, pending;
- status = readl_relaxed(aq->regs + QSPI_SR);
- mask = readl_relaxed(aq->regs + QSPI_IMR);
+ status = atmel_qspi_read(aq, QSPI_SR);
+ mask = atmel_qspi_read(aq, QSPI_IMR);
pending = status & mask;
if (!pending)
@@ -480,6 +564,8 @@ static int atmel_qspi_probe(struct platform_device *pdev)
goto exit;
}
+ aq->mmap_size = resource_size(res);
+
/* Get the peripheral clock */
aq->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
@@ -558,7 +644,7 @@ static int atmel_qspi_remove(struct platform_device *pdev)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
spi_unregister_controller(ctrl);
- writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR);
+ atmel_qspi_write(QSPI_CR_QSPIDIS, aq, QSPI_CR);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
return 0;
@@ -585,7 +671,7 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
atmel_qspi_init(aq);
- writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
+ atmel_qspi_write(aq->scr, aq, QSPI_SCR);
return 0;
}
diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c
new file mode 100644
index 000000000000..d08dec09d423
--- /dev/null
+++ b/drivers/spi/spi-ar934x.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// SPI controller driver for Qualcomm Atheros AR934x/QCA95xx SoCs
+//
+// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
+//
+// Based on spi-mt7621.c:
+// Copyright (C) 2011 Sergiy <piratfm@gmail.com>
+// Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
+// Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+#define DRIVER_NAME "spi-ar934x"
+
+#define AR934X_SPI_REG_FS 0x00
+#define AR934X_SPI_ENABLE BIT(0)
+
+#define AR934X_SPI_REG_IOC 0x08
+#define AR934X_SPI_IOC_INITVAL 0x70000
+
+#define AR934X_SPI_REG_CTRL 0x04
+#define AR934X_SPI_CLK_MASK GENMASK(5, 0)
+
+#define AR934X_SPI_DATAOUT 0x10
+
+#define AR934X_SPI_REG_SHIFT_CTRL 0x14
+#define AR934X_SPI_SHIFT_EN BIT(31)
+#define AR934X_SPI_SHIFT_CS(n) BIT(28 + (n))
+#define AR934X_SPI_SHIFT_TERM 26
+#define AR934X_SPI_SHIFT_VAL(cs, term, count) \
+ (AR934X_SPI_SHIFT_EN | AR934X_SPI_SHIFT_CS(cs) | \
+ (term) << AR934X_SPI_SHIFT_TERM | (count))
+
+#define AR934X_SPI_DATAIN 0x18
+
+struct ar934x_spi {
+ struct spi_controller *ctlr;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int clk_freq;
+};
+
+static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq)
+{
+ int div = DIV_ROUND_UP(sp->clk_freq, freq * 2) - 1;
+
+ if (div < 0)
+ return 0;
+ else if (div > AR934X_SPI_CLK_MASK)
+ return -EINVAL;
+ else
+ return div;
+}
+
+static int ar934x_spi_setup(struct spi_device *spi)
+{
+ struct ar934x_spi *sp = spi_controller_get_devdata(spi->master);
+
+ if ((spi->max_speed_hz == 0) ||
+ (spi->max_speed_hz > (sp->clk_freq / 2))) {
+ spi->max_speed_hz = sp->clk_freq / 2;
+ } else if (spi->max_speed_hz < (sp->clk_freq / 128)) {
+ dev_err(&spi->dev, "spi clock is too low\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ar934x_spi_transfer_one_message(struct spi_controller *master,
+ struct spi_message *m)
+{
+ struct ar934x_spi *sp = spi_controller_get_devdata(master);
+ struct spi_transfer *t = NULL;
+ struct spi_device *spi = m->spi;
+ unsigned long trx_done, trx_cur;
+ int stat = 0;
+ u8 term = 0;
+ int div, i;
+ u32 reg;
+ const u8 *tx_buf;
+ u8 *buf;
+
+ m->actual_length = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->speed_hz)
+ div = ar934x_spi_clk_div(sp, t->speed_hz);
+ else
+ div = ar934x_spi_clk_div(sp, spi->max_speed_hz);
+ if (div < 0) {
+ stat = -EIO;
+ goto msg_done;
+ }
+
+ reg = ioread32(sp->base + AR934X_SPI_REG_CTRL);
+ reg &= ~AR934X_SPI_CLK_MASK;
+ reg |= div;
+ iowrite32(reg, sp->base + AR934X_SPI_REG_CTRL);
+ iowrite32(0, sp->base + AR934X_SPI_DATAOUT);
+
+ for (trx_done = 0; trx_done < t->len; trx_done += 4) {
+ trx_cur = t->len - trx_done;
+ if (trx_cur > 4)
+ trx_cur = 4;
+ else if (list_is_last(&t->transfer_list, &m->transfers))
+ term = 1;
+
+ if (t->tx_buf) {
+ tx_buf = t->tx_buf + trx_done;
+ reg = tx_buf[0];
+ for (i = 1; i < trx_cur; i++)
+ reg = reg << 8 | tx_buf[i];
+ iowrite32(reg, sp->base + AR934X_SPI_DATAOUT);
+ }
+
+ reg = AR934X_SPI_SHIFT_VAL(spi->chip_select, term,
+ trx_cur * 8);
+ iowrite32(reg, sp->base + AR934X_SPI_REG_SHIFT_CTRL);
+ stat = readl_poll_timeout(
+ sp->base + AR934X_SPI_REG_SHIFT_CTRL, reg,
+ !(reg & AR934X_SPI_SHIFT_EN), 0, 5);
+ if (stat < 0)
+ goto msg_done;
+
+ if (t->rx_buf) {
+ reg = ioread32(sp->base + AR934X_SPI_DATAIN);
+ buf = t->rx_buf + trx_done;
+ for (i = 0; i < trx_cur; i++) {
+ buf[trx_cur - i - 1] = reg & 0xff;
+ reg >>= 8;
+ }
+ }
+ }
+ m->actual_length += t->len;
+ }
+
+msg_done:
+ m->status = stat;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static const struct of_device_id ar934x_spi_match[] = {
+ { .compatible = "qca,ar934x-spi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ar934x_spi_match);
+
+static int ar934x_spi_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct ar934x_spi *sp;
+ void __iomem *base;
+ struct clk *clk;
+ int ret;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (!ctlr) {
+ dev_info(&pdev->dev, "failed to allocate spi controller\n");
+ return -ENOMEM;
+ }
+
+ /* disable flash mapping and expose spi controller registers */
+ iowrite32(AR934X_SPI_ENABLE, base + AR934X_SPI_REG_FS);
+ /* restore pins to default state: CSn=1 DO=CLK=0 */
+ iowrite32(AR934X_SPI_IOC_INITVAL, base + AR934X_SPI_REG_IOC);
+
+ ctlr->mode_bits = SPI_LSB_FIRST;
+ ctlr->setup = ar934x_spi_setup;
+ ctlr->transfer_one_message = ar934x_spi_transfer_one_message;
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->num_chipselect = 3;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+
+ sp = spi_controller_get_devdata(ctlr);
+ sp->base = base;
+ sp->clk = clk;
+ sp->clk_freq = clk_get_rate(clk);
+ sp->ctlr = ctlr;
+
+ return devm_spi_register_controller(&pdev->dev, ctlr);
+}
+
+static int ar934x_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct ar934x_spi *sp;
+
+ ctlr = dev_get_drvdata(&pdev->dev);
+ sp = spi_controller_get_devdata(ctlr);
+
+ clk_disable_unprepare(sp->clk);
+
+ return 0;
+}
+
+static struct platform_driver ar934x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = ar934x_spi_match,
+ },
+ .probe = ar934x_spi_probe,
+ .remove = ar934x_spi_remove,
+};
+
+module_platform_driver(ar934x_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Qualcomm Atheros AR934x/QCA95xx");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 7327309ea3d5..6c235306c0e4 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -366,7 +366,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
goto out_disable_clk;
rate = clk_get_rate(pll_clk);
- clk_disable_unprepare(pll_clk);
if (!rate) {
ret = -EINVAL;
goto out_disable_pll_clk;
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index 64d4c441b641..ea6e4a7b3feb 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -6,14 +6,13 @@
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/efm32-spi.h>
+#include <linux/of.h>
#define DRIVER_NAME "efm32-spi"
@@ -82,9 +81,6 @@ struct efm32_spi_ddata {
const u8 *tx_buf;
u8 *rx_buf;
unsigned tx_len, rx_len;
-
- /* chip selects */
- unsigned csgpio[];
};
#define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev))
@@ -102,14 +98,6 @@ static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset)
return readl_relaxed(ddata->base + offset);
}
-static void efm32_spi_chipselect(struct spi_device *spi, int is_on)
-{
- struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
- int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE);
-
- gpio_set_value(ddata->csgpio[spi->chip_select], value);
-}
-
static int efm32_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
@@ -320,17 +308,11 @@ static int efm32_spi_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
struct device_node *np = pdev->dev.of_node;
- int num_cs, i;
if (!np)
return -EINVAL;
- num_cs = of_gpio_named_count(np, "cs-gpios");
- if (num_cs < 0)
- return num_cs;
-
- master = spi_alloc_master(&pdev->dev,
- sizeof(*ddata) + num_cs * sizeof(unsigned));
+ master = spi_alloc_master(&pdev->dev, sizeof(*ddata));
if (!master) {
dev_dbg(&pdev->dev,
"failed to allocate spi master controller\n");
@@ -340,14 +322,13 @@ static int efm32_spi_probe(struct platform_device *pdev)
master->dev.of_node = pdev->dev.of_node;
- master->num_chipselect = num_cs;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ master->use_gpio_descriptors = true;
ddata = spi_master_get_devdata(master);
ddata->bitbang.master = master;
- ddata->bitbang.chipselect = efm32_spi_chipselect;
ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
@@ -361,25 +342,6 @@ static int efm32_spi_probe(struct platform_device *pdev)
goto err;
}
- for (i = 0; i < num_cs; ++i) {
- ret = of_get_named_gpio(np, "cs-gpios", i);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n",
- i, ret);
- goto err;
- }
- ddata->csgpio[i] = ret;
- dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]);
- ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i],
- GPIOF_OUT_INIT_LOW, DRIVER_NAME);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "failed to configure csgpio#%u (%d)\n",
- i, ret);
- goto err;
- }
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
new file mode 100644
index 000000000000..37a3e0f8e752
--- /dev/null
+++ b/drivers/spi/spi-fsi.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright (C) IBM Corporation 2020
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/fsi.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define FSI_ENGID_SPI 0x23
+#define FSI_MBOX_ROOT_CTRL_8 0x2860
+
+#define FSI2SPI_DATA0 0x00
+#define FSI2SPI_DATA1 0x04
+#define FSI2SPI_CMD 0x08
+#define FSI2SPI_CMD_WRITE BIT(31)
+#define FSI2SPI_RESET 0x18
+#define FSI2SPI_STATUS 0x1c
+#define FSI2SPI_STATUS_ANY_ERROR BIT(31)
+#define FSI2SPI_IRQ 0x20
+
+#define SPI_FSI_BASE 0x70000
+#define SPI_FSI_INIT_TIMEOUT_MS 1000
+#define SPI_FSI_MAX_TRANSFER_SIZE 2048
+
+#define SPI_FSI_ERROR 0x0
+#define SPI_FSI_COUNTER_CFG 0x1
+#define SPI_FSI_COUNTER_CFG_LOOPS(x) (((u64)(x) & 0xffULL) << 32)
+#define SPI_FSI_CFG1 0x2
+#define SPI_FSI_CLOCK_CFG 0x3
+#define SPI_FSI_CLOCK_CFG_MM_ENABLE BIT_ULL(32)
+#define SPI_FSI_CLOCK_CFG_ECC_DISABLE (BIT_ULL(35) | BIT_ULL(33))
+#define SPI_FSI_CLOCK_CFG_RESET1 (BIT_ULL(36) | BIT_ULL(38))
+#define SPI_FSI_CLOCK_CFG_RESET2 (BIT_ULL(37) | BIT_ULL(39))
+#define SPI_FSI_CLOCK_CFG_MODE (BIT_ULL(41) | BIT_ULL(42))
+#define SPI_FSI_CLOCK_CFG_SCK_RECV_DEL GENMASK_ULL(51, 44)
+#define SPI_FSI_CLOCK_CFG_SCK_NO_DEL BIT_ULL(51)
+#define SPI_FSI_CLOCK_CFG_SCK_DIV GENMASK_ULL(63, 52)
+#define SPI_FSI_MMAP 0x4
+#define SPI_FSI_DATA_TX 0x5
+#define SPI_FSI_DATA_RX 0x6
+#define SPI_FSI_SEQUENCE 0x7
+#define SPI_FSI_SEQUENCE_STOP 0x00
+#define SPI_FSI_SEQUENCE_SEL_SLAVE(x) (0x10 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_SHIFT_OUT(x) (0x30 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_SHIFT_IN(x) (0x40 | ((x) & 0xf))
+#define SPI_FSI_SEQUENCE_COPY_DATA_TX 0xc0
+#define SPI_FSI_SEQUENCE_BRANCH(x) (0xe0 | ((x) & 0xf))
+#define SPI_FSI_STATUS 0x8
+#define SPI_FSI_STATUS_ERROR \
+ (GENMASK_ULL(31, 21) | GENMASK_ULL(15, 12))
+#define SPI_FSI_STATUS_SEQ_STATE GENMASK_ULL(55, 48)
+#define SPI_FSI_STATUS_SEQ_STATE_IDLE BIT_ULL(48)
+#define SPI_FSI_STATUS_TDR_UNDERRUN BIT_ULL(57)
+#define SPI_FSI_STATUS_TDR_OVERRUN BIT_ULL(58)
+#define SPI_FSI_STATUS_TDR_FULL BIT_ULL(59)
+#define SPI_FSI_STATUS_RDR_UNDERRUN BIT_ULL(61)
+#define SPI_FSI_STATUS_RDR_OVERRUN BIT_ULL(62)
+#define SPI_FSI_STATUS_RDR_FULL BIT_ULL(63)
+#define SPI_FSI_STATUS_ANY_ERROR \
+ (SPI_FSI_STATUS_ERROR | SPI_FSI_STATUS_TDR_UNDERRUN | \
+ SPI_FSI_STATUS_TDR_OVERRUN | SPI_FSI_STATUS_RDR_UNDERRUN | \
+ SPI_FSI_STATUS_RDR_OVERRUN)
+#define SPI_FSI_PORT_CTRL 0x9
+
+struct fsi_spi {
+ struct device *dev; /* SPI controller device */
+ struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
+ u32 base;
+};
+
+struct fsi_spi_sequence {
+ int bit;
+ u64 data;
+};
+
+static int fsi_spi_check_status(struct fsi_spi *ctx)
+{
+ int rc;
+ u32 sts;
+ __be32 sts_be;
+
+ rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be,
+ sizeof(sts_be));
+ if (rc)
+ return rc;
+
+ sts = be32_to_cpu(sts_be);
+ if (sts & FSI2SPI_STATUS_ANY_ERROR) {
+ dev_err(ctx->dev, "Error with FSI2SPI interface: %08x.\n", sts);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
+{
+ int rc;
+ __be32 cmd_be;
+ __be32 data_be;
+ u32 cmd = offset + ctx->base;
+
+ *value = 0ULL;
+
+ if (cmd & FSI2SPI_CMD_WRITE)
+ return -EINVAL;
+
+ cmd_be = cpu_to_be32(cmd);
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
+ if (rc)
+ return rc;
+
+ rc = fsi_spi_check_status(ctx);
+ if (rc)
+ return rc;
+
+ rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ *value |= (u64)be32_to_cpu(data_be) << 32;
+
+ rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ *value |= (u64)be32_to_cpu(data_be);
+ dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
+
+ return 0;
+}
+
+static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
+{
+ int rc;
+ __be32 cmd_be;
+ __be32 data_be;
+ u32 cmd = offset + ctx->base;
+
+ if (cmd & FSI2SPI_CMD_WRITE)
+ return -EINVAL;
+
+ dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
+
+ data_be = cpu_to_be32(upper_32_bits(value));
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ data_be = cpu_to_be32(lower_32_bits(value));
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be,
+ sizeof(data_be));
+ if (rc)
+ return rc;
+
+ cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
+ rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
+ if (rc)
+ return rc;
+
+ return fsi_spi_check_status(ctx);
+}
+
+static int fsi_spi_data_in(u64 in, u8 *rx, int len)
+{
+ int i;
+ int num_bytes = min(len, 8);
+
+ for (i = 0; i < num_bytes; ++i)
+ rx[i] = (u8)(in >> (8 * ((num_bytes - 1) - i)));
+
+ return num_bytes;
+}
+
+static int fsi_spi_data_out(u64 *out, const u8 *tx, int len)
+{
+ int i;
+ int num_bytes = min(len, 8);
+ u8 *out_bytes = (u8 *)out;
+
+ /* Unused bytes of the tx data should be 0. */
+ *out = 0ULL;
+
+ for (i = 0; i < num_bytes; ++i)
+ out_bytes[8 - (i + 1)] = tx[i];
+
+ return num_bytes;
+}
+
+static int fsi_spi_reset(struct fsi_spi *ctx)
+{
+ int rc;
+
+ dev_dbg(ctx->dev, "Resetting SPI controller.\n");
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ SPI_FSI_CLOCK_CFG_RESET1);
+ if (rc)
+ return rc;
+
+ return fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ SPI_FSI_CLOCK_CFG_RESET2);
+}
+
+static int fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
+{
+ /*
+ * Add the next byte of instruction to the 8-byte sequence register.
+ * Then decrement the counter so that the next instruction will go in
+ * the right place. Return the number of "slots" left in the sequence
+ * register.
+ */
+ seq->data |= (u64)val << seq->bit;
+ seq->bit -= 8;
+
+ return ((64 - seq->bit) / 8) - 2;
+}
+
+static void fsi_spi_sequence_init(struct fsi_spi_sequence *seq)
+{
+ seq->bit = 56;
+ seq->data = 0ULL;
+}
+
+static int fsi_spi_sequence_transfer(struct fsi_spi *ctx,
+ struct fsi_spi_sequence *seq,
+ struct spi_transfer *transfer)
+{
+ int loops;
+ int idx;
+ int rc;
+ u8 len = min(transfer->len, 8U);
+ u8 rem = transfer->len % len;
+
+ loops = transfer->len / len;
+
+ if (transfer->tx_buf) {
+ idx = fsi_spi_sequence_add(seq,
+ SPI_FSI_SEQUENCE_SHIFT_OUT(len));
+ if (rem)
+ rem = SPI_FSI_SEQUENCE_SHIFT_OUT(rem);
+ } else if (transfer->rx_buf) {
+ idx = fsi_spi_sequence_add(seq,
+ SPI_FSI_SEQUENCE_SHIFT_IN(len));
+ if (rem)
+ rem = SPI_FSI_SEQUENCE_SHIFT_IN(rem);
+ } else {
+ return -EINVAL;
+ }
+
+ if (loops > 1) {
+ fsi_spi_sequence_add(seq, SPI_FSI_SEQUENCE_BRANCH(idx));
+
+ if (rem)
+ fsi_spi_sequence_add(seq, rem);
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_COUNTER_CFG,
+ SPI_FSI_COUNTER_CFG_LOOPS(loops - 1));
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int fsi_spi_transfer_data(struct fsi_spi *ctx,
+ struct spi_transfer *transfer)
+{
+ int rc = 0;
+ u64 status = 0ULL;
+
+ if (transfer->tx_buf) {
+ int nb;
+ int sent = 0;
+ u64 out = 0ULL;
+ const u8 *tx = transfer->tx_buf;
+
+ while (transfer->len > sent) {
+ nb = fsi_spi_data_out(&out, &tx[sent],
+ (int)transfer->len - sent);
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_DATA_TX, out);
+ if (rc)
+ return rc;
+
+ do {
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & SPI_FSI_STATUS_ANY_ERROR) {
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ return -EREMOTEIO;
+ }
+ } while (status & SPI_FSI_STATUS_TDR_FULL);
+
+ sent += nb;
+ }
+ } else if (transfer->rx_buf) {
+ int recv = 0;
+ u64 in = 0ULL;
+ u8 *rx = transfer->rx_buf;
+
+ while (transfer->len > recv) {
+ do {
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
+ &status);
+ if (rc)
+ return rc;
+
+ if (status & SPI_FSI_STATUS_ANY_ERROR) {
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ return -EREMOTEIO;
+ }
+ } while (!(status & SPI_FSI_STATUS_RDR_FULL));
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
+ if (rc)
+ return rc;
+
+ recv += fsi_spi_data_in(in, &rx[recv],
+ (int)transfer->len - recv);
+ }
+ }
+
+ return 0;
+}
+
+static int fsi_spi_transfer_init(struct fsi_spi *ctx)
+{
+ int rc;
+ bool reset = false;
+ unsigned long end;
+ u64 seq_state;
+ u64 clock_cfg = 0ULL;
+ u64 status = 0ULL;
+ u64 wanted_clock_cfg = SPI_FSI_CLOCK_CFG_ECC_DISABLE |
+ SPI_FSI_CLOCK_CFG_SCK_NO_DEL |
+ FIELD_PREP(SPI_FSI_CLOCK_CFG_SCK_DIV, 4);
+
+ end = jiffies + msecs_to_jiffies(SPI_FSI_INIT_TIMEOUT_MS);
+ do {
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, &status);
+ if (rc)
+ return rc;
+
+ seq_state = status & SPI_FSI_STATUS_SEQ_STATE;
+
+ if (status & (SPI_FSI_STATUS_ANY_ERROR |
+ SPI_FSI_STATUS_TDR_FULL |
+ SPI_FSI_STATUS_RDR_FULL)) {
+ if (reset)
+ return -EIO;
+
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ reset = true;
+ continue;
+ }
+ } while (seq_state && (seq_state != SPI_FSI_STATUS_SEQ_STATE_IDLE));
+
+ rc = fsi_spi_read_reg(ctx, SPI_FSI_CLOCK_CFG, &clock_cfg);
+ if (rc)
+ return rc;
+
+ if ((clock_cfg & (SPI_FSI_CLOCK_CFG_MM_ENABLE |
+ SPI_FSI_CLOCK_CFG_ECC_DISABLE |
+ SPI_FSI_CLOCK_CFG_MODE |
+ SPI_FSI_CLOCK_CFG_SCK_RECV_DEL |
+ SPI_FSI_CLOCK_CFG_SCK_DIV)) != wanted_clock_cfg)
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_CLOCK_CFG,
+ wanted_clock_cfg);
+
+ return rc;
+}
+
+static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *mesg)
+{
+ int rc = 0;
+ u8 seq_slave = SPI_FSI_SEQUENCE_SEL_SLAVE(mesg->spi->chip_select + 1);
+ struct spi_transfer *transfer;
+ struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
+
+ list_for_each_entry(transfer, &mesg->transfers, transfer_list) {
+ struct fsi_spi_sequence seq;
+ struct spi_transfer *next = NULL;
+
+ /* Sequencer must do shift out (tx) first. */
+ if (!transfer->tx_buf ||
+ transfer->len > SPI_FSI_MAX_TRANSFER_SIZE) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_dbg(ctx->dev, "Start tx of %d bytes.\n", transfer->len);
+
+ rc = fsi_spi_transfer_init(ctx);
+ if (rc < 0)
+ goto error;
+
+ fsi_spi_sequence_init(&seq);
+ fsi_spi_sequence_add(&seq, seq_slave);
+
+ rc = fsi_spi_sequence_transfer(ctx, &seq, transfer);
+ if (rc)
+ goto error;
+
+ if (!list_is_last(&transfer->transfer_list,
+ &mesg->transfers)) {
+ next = list_next_entry(transfer, transfer_list);
+
+ /* Sequencer can only do shift in (rx) after tx. */
+ if (next->rx_buf) {
+ if (next->len > SPI_FSI_MAX_TRANSFER_SIZE) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ dev_dbg(ctx->dev, "Sequence rx of %d bytes.\n",
+ next->len);
+
+ rc = fsi_spi_sequence_transfer(ctx, &seq,
+ next);
+ if (rc)
+ goto error;
+ } else {
+ next = NULL;
+ }
+ }
+
+ fsi_spi_sequence_add(&seq, SPI_FSI_SEQUENCE_SEL_SLAVE(0));
+
+ rc = fsi_spi_write_reg(ctx, SPI_FSI_SEQUENCE, seq.data);
+ if (rc)
+ goto error;
+
+ rc = fsi_spi_transfer_data(ctx, transfer);
+ if (rc)
+ goto error;
+
+ if (next) {
+ rc = fsi_spi_transfer_data(ctx, next);
+ if (rc)
+ goto error;
+
+ transfer = next;
+ }
+ }
+
+error:
+ mesg->status = rc;
+ spi_finalize_current_message(ctlr);
+
+ return rc;
+}
+
+static size_t fsi_spi_max_transfer_size(struct spi_device *spi)
+{
+ return SPI_FSI_MAX_TRANSFER_SIZE;
+}
+
+static int fsi_spi_probe(struct device *dev)
+{
+ int rc;
+ u32 root_ctrl_8;
+ struct device_node *np;
+ int num_controllers_registered = 0;
+ struct fsi_device *fsi = to_fsi_dev(dev);
+
+ /*
+ * Check the SPI mux before attempting to probe. If the mux isn't set
+ * then the SPI controllers can't access their slave devices.
+ */
+ rc = fsi_slave_read(fsi->slave, FSI_MBOX_ROOT_CTRL_8, &root_ctrl_8,
+ sizeof(root_ctrl_8));
+ if (rc)
+ return rc;
+
+ if (!root_ctrl_8) {
+ dev_dbg(dev, "SPI mux not set, aborting probe.\n");
+ return -ENODEV;
+ }
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ u32 base;
+ struct fsi_spi *ctx;
+ struct spi_controller *ctlr;
+
+ if (of_property_read_u32(np, "reg", &base))
+ continue;
+
+ ctlr = spi_alloc_master(dev, sizeof(*ctx));
+ if (!ctlr)
+ break;
+
+ ctlr->dev.of_node = np;
+ ctlr->num_chipselect = of_get_available_child_count(np) ?: 1;
+ ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
+ ctlr->max_transfer_size = fsi_spi_max_transfer_size;
+ ctlr->transfer_one_message = fsi_spi_transfer_one_message;
+
+ ctx = spi_controller_get_devdata(ctlr);
+ ctx->dev = &ctlr->dev;
+ ctx->fsi = fsi;
+ ctx->base = base + SPI_FSI_BASE;
+
+ rc = devm_spi_register_controller(dev, ctlr);
+ if (rc)
+ spi_controller_put(ctlr);
+ else
+ num_controllers_registered++;
+ }
+
+ if (!num_controllers_registered)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct fsi_device_id fsi_spi_ids[] = {
+ { FSI_ENGID_SPI, FSI_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(fsi, fsi_spi_ids);
+
+static struct fsi_driver fsi_spi_driver = {
+ .id_table = fsi_spi_ids,
+ .drv = {
+ .name = "spi-fsi",
+ .bus = &fsi_bus_type,
+ .probe = fsi_spi_probe,
+ },
+};
+module_fsi_driver(fsi_spi_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("FSI attached SPI controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 6ec2dcb8c57a..50e41f66a2d7 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -20,16 +20,9 @@
#define DRIVER_NAME "fsl-dspi"
-#ifdef CONFIG_M5441x
-#define DSPI_FIFO_SIZE 16
-#else
-#define DSPI_FIFO_SIZE 4
-#endif
-#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
-
#define SPI_MCR 0x00
#define SPI_MCR_MASTER BIT(31)
-#define SPI_MCR_PCSIS (0x3F << 16)
+#define SPI_MCR_PCSIS(x) ((x) << 16)
#define SPI_MCR_CLR_TXF BIT(11)
#define SPI_MCR_CLR_RXF BIT(10)
#define SPI_MCR_XSPI BIT(3)
@@ -79,6 +72,7 @@
#define SPI_RSER 0x30
#define SPI_RSER_TCFQE BIT(31)
#define SPI_RSER_EOQFE BIT(28)
+#define SPI_RSER_CMDTCFE BIT(23)
#define SPI_PUSHR 0x34
#define SPI_PUSHR_CMD_CONT BIT(15)
@@ -109,57 +103,95 @@
#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
#define SPI_FRAME_EBITS(bits) SPI_CTARE_FMSZE(((bits) - 1) >> 4)
-/* Register offsets for regmap_pushr */
-#define PUSHR_CMD 0x0
-#define PUSHR_TX 0x2
-
#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
struct chip_data {
u32 ctar_val;
- u16 void_write_data;
};
enum dspi_trans_mode {
DSPI_EOQ_MODE = 0,
- DSPI_TCFQ_MODE,
+ DSPI_XSPI_MODE,
DSPI_DMA_MODE,
};
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
u8 max_clock_factor;
- bool ptp_sts_supported;
- bool xspi_mode;
-};
-
-static const struct fsl_dspi_devtype_data vf610_data = {
- .trans_mode = DSPI_DMA_MODE,
- .max_clock_factor = 2,
-};
-
-static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
- .trans_mode = DSPI_TCFQ_MODE,
- .max_clock_factor = 8,
- .ptp_sts_supported = true,
- .xspi_mode = true,
+ int fifo_size;
};
-static const struct fsl_dspi_devtype_data ls2085a_data = {
- .trans_mode = DSPI_TCFQ_MODE,
- .max_clock_factor = 8,
- .ptp_sts_supported = true,
+enum {
+ LS1021A,
+ LS1012A,
+ LS1028A,
+ LS1043A,
+ LS1046A,
+ LS2080A,
+ LS2085A,
+ LX2160A,
+ MCF5441X,
+ VF610,
};
-static const struct fsl_dspi_devtype_data coldfire_data = {
- .trans_mode = DSPI_EOQ_MODE,
- .max_clock_factor = 8,
+static const struct fsl_dspi_devtype_data devtype_data[] = {
+ [VF610] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 2,
+ .fifo_size = 4,
+ },
+ [LS1021A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS1012A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS1028A] = {
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS1043A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS1046A] = {
+ /* Has A-011218 DMA erratum */
+ .trans_mode = DSPI_XSPI_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
+ [LS2080A] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LS2085A] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [LX2160A] = {
+ .trans_mode = DSPI_DMA_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 4,
+ },
+ [MCF5441X] = {
+ .trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 8,
+ .fifo_size = 16,
+ },
};
struct fsl_dspi_dma {
- /* Length of transfer in words of DSPI_FIFO_SIZE */
- u32 curr_xfer_len;
-
u32 *tx_dma_buf;
struct dma_chan *chan_tx;
dma_addr_t tx_dma_phys;
@@ -189,36 +221,99 @@ struct fsl_dspi {
size_t len;
const void *tx;
void *rx;
- void *rx_end;
- u16 void_write_data;
u16 tx_cmd;
- u8 bits_per_word;
- u8 bytes_per_word;
const struct fsl_dspi_devtype_data *devtype_data;
- wait_queue_head_t waitq;
- u32 waitflags;
+ struct completion xfer_done;
struct fsl_dspi_dma *dma;
+
+ int oper_word_size;
+ int oper_bits_per_word;
+
+ int words_in_flight;
+
+ /*
+ * Offsets for CMD and TXDATA within SPI_PUSHR when accessed
+ * individually (in XSPI mode)
+ */
+ int pushr_cmd;
+ int pushr_tx;
+
+ void (*host_to_dev)(struct fsl_dspi *dspi, u32 *txdata);
+ void (*dev_to_host)(struct fsl_dspi *dspi, u32 rxdata);
};
+static void dspi_native_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ memcpy(txdata, dspi->tx, dspi->oper_word_size);
+ dspi->tx += dspi->oper_word_size;
+}
+
+static void dspi_native_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ memcpy(dspi->rx, &rxdata, dspi->oper_word_size);
+ dspi->rx += dspi->oper_word_size;
+}
+
+static void dspi_8on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ *txdata = cpu_to_be32(*(u32 *)dspi->tx);
+ dspi->tx += sizeof(u32);
+}
+
+static void dspi_8on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ *(u32 *)dspi->rx = be32_to_cpu(rxdata);
+ dspi->rx += sizeof(u32);
+}
+
+static void dspi_8on16_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ *txdata = cpu_to_be16(*(u16 *)dspi->tx);
+ dspi->tx += sizeof(u16);
+}
+
+static void dspi_8on16_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ *(u16 *)dspi->rx = be16_to_cpu(rxdata);
+ dspi->rx += sizeof(u16);
+}
+
+static void dspi_16on32_host_to_dev(struct fsl_dspi *dspi, u32 *txdata)
+{
+ u16 hi = *(u16 *)dspi->tx;
+ u16 lo = *(u16 *)(dspi->tx + 2);
+
+ *txdata = (u32)hi << 16 | lo;
+ dspi->tx += sizeof(u32);
+}
+
+static void dspi_16on32_dev_to_host(struct fsl_dspi *dspi, u32 rxdata)
+{
+ u16 hi = rxdata & 0xffff;
+ u16 lo = rxdata >> 16;
+
+ *(u16 *)dspi->rx = lo;
+ *(u16 *)(dspi->rx + 2) = hi;
+ dspi->rx += sizeof(u32);
+}
+
+/*
+ * Pop one word from the TX buffer for pushing into the
+ * PUSHR register (TX FIFO)
+ */
static u32 dspi_pop_tx(struct fsl_dspi *dspi)
{
u32 txdata = 0;
- if (dspi->tx) {
- if (dspi->bytes_per_word == 1)
- txdata = *(u8 *)dspi->tx;
- else if (dspi->bytes_per_word == 2)
- txdata = *(u16 *)dspi->tx;
- else /* dspi->bytes_per_word == 4 */
- txdata = *(u32 *)dspi->tx;
- dspi->tx += dspi->bytes_per_word;
- }
- dspi->len -= dspi->bytes_per_word;
+ if (dspi->tx)
+ dspi->host_to_dev(dspi, &txdata);
+ dspi->len -= dspi->oper_word_size;
return txdata;
}
+/* Prepare one TX FIFO entry (txdata plus cmd) */
static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
@@ -231,21 +326,12 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
return cmd << 16 | data;
}
+/* Push one word to the RX buffer from the POPR register (RX FIFO) */
static void dspi_push_rx(struct fsl_dspi *dspi, u32 rxdata)
{
if (!dspi->rx)
return;
-
- /* Mask off undefined bits */
- rxdata &= (1 << dspi->bits_per_word) - 1;
-
- if (dspi->bytes_per_word == 1)
- *(u8 *)dspi->rx = rxdata;
- else if (dspi->bytes_per_word == 2)
- *(u16 *)dspi->rx = rxdata;
- else /* dspi->bytes_per_word == 4 */
- *(u32 *)dspi->rx = rxdata;
- dspi->rx += dspi->bytes_per_word;
+ dspi->dev_to_host(dspi, rxdata);
}
static void dspi_tx_dma_callback(void *arg)
@@ -263,7 +349,7 @@ static void dspi_rx_dma_callback(void *arg)
int i;
if (dspi->rx) {
- for (i = 0; i < dma->curr_xfer_len; i++)
+ for (i = 0; i < dspi->words_in_flight; i++)
dspi_push_rx(dspi, dspi->dma->rx_dma_buf[i]);
}
@@ -277,12 +363,12 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
int time_left;
int i;
- for (i = 0; i < dma->curr_xfer_len; i++)
+ for (i = 0; i < dspi->words_in_flight; i++)
dspi->dma->tx_dma_buf[i] = dspi_pop_tx_pushr(dspi);
dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
dma->tx_dma_phys,
- dma->curr_xfer_len *
+ dspi->words_in_flight *
DMA_SLAVE_BUSWIDTH_4_BYTES,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -300,7 +386,7 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
dma->rx_dma_phys,
- dma->curr_xfer_len *
+ dspi->words_in_flight *
DMA_SLAVE_BUSWIDTH_4_BYTES,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
@@ -348,45 +434,42 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
return 0;
}
+static void dspi_setup_accel(struct fsl_dspi *dspi);
+
static int dspi_dma_xfer(struct fsl_dspi *dspi)
{
struct spi_message *message = dspi->cur_msg;
struct device *dev = &dspi->pdev->dev;
- struct fsl_dspi_dma *dma = dspi->dma;
- int curr_remaining_bytes;
- int bytes_per_buffer;
int ret = 0;
- curr_remaining_bytes = dspi->len;
- bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
- while (curr_remaining_bytes) {
- /* Check if current transfer fits the DMA buffer */
- dma->curr_xfer_len = curr_remaining_bytes
- / dspi->bytes_per_word;
- if (dma->curr_xfer_len > bytes_per_buffer)
- dma->curr_xfer_len = bytes_per_buffer;
+ /*
+ * dspi->len gets decremented by dspi_pop_tx_pushr in
+ * dspi_next_xfer_dma_submit
+ */
+ while (dspi->len) {
+ /* Figure out operational bits-per-word for this chunk */
+ dspi_setup_accel(dspi);
+
+ dspi->words_in_flight = dspi->len / dspi->oper_word_size;
+ if (dspi->words_in_flight > dspi->devtype_data->fifo_size)
+ dspi->words_in_flight = dspi->devtype_data->fifo_size;
+
+ message->actual_length += dspi->words_in_flight *
+ dspi->oper_word_size;
ret = dspi_next_xfer_dma_submit(dspi);
if (ret) {
dev_err(dev, "DMA transfer failed\n");
- goto exit;
-
- } else {
- const int len =
- dma->curr_xfer_len * dspi->bytes_per_word;
- curr_remaining_bytes -= len;
- message->actual_length += len;
- if (curr_remaining_bytes < 0)
- curr_remaining_bytes = 0;
+ break;
}
}
-exit:
return ret;
}
static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
{
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct device *dev = &dspi->pdev->dev;
struct dma_slave_config cfg;
struct fsl_dspi_dma *dma;
@@ -410,15 +493,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
goto err_tx_channel;
}
- dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
- &dma->tx_dma_phys, GFP_KERNEL);
+ dma->tx_dma_buf = dma_alloc_coherent(dma->chan_tx->device->dev,
+ dma_bufsize, &dma->tx_dma_phys,
+ GFP_KERNEL);
if (!dma->tx_dma_buf) {
ret = -ENOMEM;
goto err_tx_dma_buf;
}
- dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
- &dma->rx_dma_phys, GFP_KERNEL);
+ dma->rx_dma_buf = dma_alloc_coherent(dma->chan_rx->device->dev,
+ dma_bufsize, &dma->rx_dma_phys,
+ GFP_KERNEL);
if (!dma->rx_dma_buf) {
ret = -ENOMEM;
goto err_rx_dma_buf;
@@ -454,11 +539,11 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
return 0;
err_slave_config:
- dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
- dma->rx_dma_buf, dma->rx_dma_phys);
+ dma_free_coherent(dma->chan_rx->device->dev,
+ dma_bufsize, dma->rx_dma_buf, dma->rx_dma_phys);
err_rx_dma_buf:
- dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
- dma->tx_dma_buf, dma->tx_dma_phys);
+ dma_free_coherent(dma->chan_tx->device->dev,
+ dma_bufsize, dma->tx_dma_buf, dma->tx_dma_phys);
err_tx_dma_buf:
dma_release_channel(dma->chan_tx);
err_tx_channel:
@@ -472,21 +557,21 @@ err_tx_channel:
static void dspi_release_dma(struct fsl_dspi *dspi)
{
+ int dma_bufsize = dspi->devtype_data->fifo_size * 2;
struct fsl_dspi_dma *dma = dspi->dma;
- struct device *dev = &dspi->pdev->dev;
if (!dma)
return;
if (dma->chan_tx) {
- dma_unmap_single(dev, dma->tx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
+ dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
+ dma_bufsize, DMA_TO_DEVICE);
dma_release_channel(dma->chan_tx);
}
if (dma->chan_rx) {
- dma_unmap_single(dev, dma->rx_dma_phys,
- DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
+ dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
+ dma_bufsize, DMA_FROM_DEVICE);
dma_release_channel(dma->chan_rx);
}
}
@@ -562,124 +647,220 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns,
}
}
-static void fifo_write(struct fsl_dspi *dspi)
+static void dspi_pushr_write(struct fsl_dspi *dspi)
{
regmap_write(dspi->regmap, SPI_PUSHR, dspi_pop_tx_pushr(dspi));
}
-static void cmd_fifo_write(struct fsl_dspi *dspi)
+static void dspi_pushr_cmd_write(struct fsl_dspi *dspi, u16 cmd)
{
- u16 cmd = dspi->tx_cmd;
-
- if (dspi->len > 0)
+ /*
+ * The only time when the PCS doesn't need continuation after this word
+ * is when it's last. We need to look ahead, because we actually call
+ * dspi_pop_tx (the function that decrements dspi->len) _after_
+ * dspi_pushr_cmd_write with XSPI mode. As for how much in advance? One
+ * word is enough. If there's more to transmit than that,
+ * dspi_xspi_write will know to split the FIFO writes in 2, and
+ * generate a new PUSHR command with the final word that will have PCS
+ * deasserted (not continued) here.
+ */
+ if (dspi->len > dspi->oper_word_size)
cmd |= SPI_PUSHR_CMD_CONT;
- regmap_write(dspi->regmap_pushr, PUSHR_CMD, cmd);
+ regmap_write(dspi->regmap_pushr, dspi->pushr_cmd, cmd);
}
-static void tx_fifo_write(struct fsl_dspi *dspi, u16 txdata)
+static void dspi_pushr_txdata_write(struct fsl_dspi *dspi, u16 txdata)
{
- regmap_write(dspi->regmap_pushr, PUSHR_TX, txdata);
+ regmap_write(dspi->regmap_pushr, dspi->pushr_tx, txdata);
}
-static void dspi_tcfq_write(struct fsl_dspi *dspi)
+static void dspi_xspi_fifo_write(struct fsl_dspi *dspi, int num_words)
{
- /* Clear transfer count */
- dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
+ int num_bytes = num_words * dspi->oper_word_size;
+ u16 tx_cmd = dspi->tx_cmd;
- if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
- /* Write the CMD FIFO entry first, and then the two
- * corresponding TX FIFO entries.
- */
- u32 data = dspi_pop_tx(dspi);
+ /*
+ * If the PCS needs to de-assert (i.e. we're at the end of the buffer
+ * and cs_change does not want the PCS to stay on), then we need a new
+ * PUSHR command, since this one (for the body of the buffer)
+ * necessarily has the CONT bit set.
+ * So send one word less during this go, to force a split and a command
+ * with a single word next time, when CONT will be unset.
+ */
+ if (!(dspi->tx_cmd & SPI_PUSHR_CMD_CONT) && num_bytes == dspi->len)
+ tx_cmd |= SPI_PUSHR_CMD_EOQ;
- cmd_fifo_write(dspi);
- tx_fifo_write(dspi, data & 0xFFFF);
- tx_fifo_write(dspi, data >> 16);
- } else {
- /* Write one entry to both TX FIFO and CMD FIFO
- * simultaneously.
- */
- fifo_write(dspi);
- }
-}
+ /* Update CTARE */
+ regmap_write(dspi->regmap, SPI_CTARE(0),
+ SPI_FRAME_EBITS(dspi->oper_bits_per_word) |
+ SPI_CTARE_DTCP(num_words));
-static u32 fifo_read(struct fsl_dspi *dspi)
-{
- u32 rxdata = 0;
+ /*
+ * Write the CMD FIFO entry first, and then the two
+ * corresponding TX FIFO entries (or one...).
+ */
+ dspi_pushr_cmd_write(dspi, tx_cmd);
- regmap_read(dspi->regmap, SPI_POPR, &rxdata);
- return rxdata;
-}
+ /* Fill TX FIFO with as many transfers as possible */
+ while (num_words--) {
+ u32 data = dspi_pop_tx(dspi);
-static void dspi_tcfq_read(struct fsl_dspi *dspi)
-{
- dspi_push_rx(dspi, fifo_read(dspi));
+ dspi_pushr_txdata_write(dspi, data & 0xFFFF);
+ if (dspi->oper_bits_per_word > 16)
+ dspi_pushr_txdata_write(dspi, data >> 16);
+ }
}
-static void dspi_eoq_write(struct fsl_dspi *dspi)
+static void dspi_eoq_fifo_write(struct fsl_dspi *dspi, int num_words)
{
- int fifo_size = DSPI_FIFO_SIZE;
u16 xfer_cmd = dspi->tx_cmd;
/* Fill TX FIFO with as many transfers as possible */
- while (dspi->len && fifo_size--) {
+ while (num_words--) {
dspi->tx_cmd = xfer_cmd;
/* Request EOQF for last transfer in FIFO */
- if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
+ if (num_words == 0)
dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
- /* Clear transfer count for first transfer in FIFO */
- if (fifo_size == (DSPI_FIFO_SIZE - 1))
- dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
/* Write combined TX FIFO and CMD FIFO entry */
- fifo_write(dspi);
+ dspi_pushr_write(dspi);
}
}
-static void dspi_eoq_read(struct fsl_dspi *dspi)
+static u32 dspi_popr_read(struct fsl_dspi *dspi)
{
- int fifo_size = DSPI_FIFO_SIZE;
+ u32 rxdata = 0;
+
+ regmap_read(dspi->regmap, SPI_POPR, &rxdata);
+ return rxdata;
+}
+
+static void dspi_fifo_read(struct fsl_dspi *dspi)
+{
+ int num_fifo_entries = dspi->words_in_flight;
/* Read one FIFO entry and push to rx buffer */
- while ((dspi->rx < dspi->rx_end) && fifo_size--)
- dspi_push_rx(dspi, fifo_read(dspi));
+ while (num_fifo_entries--)
+ dspi_push_rx(dspi, dspi_popr_read(dspi));
}
-static int dspi_rxtx(struct fsl_dspi *dspi)
+static void dspi_setup_accel(struct fsl_dspi *dspi)
{
+ struct spi_transfer *xfer = dspi->cur_transfer;
+ bool odd = !!(dspi->len & 1);
+
+ /* No accel for frames not multiple of 8 bits at the moment */
+ if (xfer->bits_per_word % 8)
+ goto no_accel;
+
+ if (!odd && dspi->len <= dspi->devtype_data->fifo_size * 2) {
+ dspi->oper_bits_per_word = 16;
+ } else if (odd && dspi->len <= dspi->devtype_data->fifo_size) {
+ dspi->oper_bits_per_word = 8;
+ } else {
+ /* Start off with maximum supported by hardware */
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
+ dspi->oper_bits_per_word = 32;
+ else
+ dspi->oper_bits_per_word = 16;
+
+ /*
+ * And go down only if the buffer can't be sent with
+ * words this big
+ */
+ do {
+ if (dspi->len >= DIV_ROUND_UP(dspi->oper_bits_per_word, 8))
+ break;
+
+ dspi->oper_bits_per_word /= 2;
+ } while (dspi->oper_bits_per_word > 8);
+ }
+
+ if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 32) {
+ dspi->dev_to_host = dspi_8on32_dev_to_host;
+ dspi->host_to_dev = dspi_8on32_host_to_dev;
+ } else if (xfer->bits_per_word == 8 && dspi->oper_bits_per_word == 16) {
+ dspi->dev_to_host = dspi_8on16_dev_to_host;
+ dspi->host_to_dev = dspi_8on16_host_to_dev;
+ } else if (xfer->bits_per_word == 16 && dspi->oper_bits_per_word == 32) {
+ dspi->dev_to_host = dspi_16on32_dev_to_host;
+ dspi->host_to_dev = dspi_16on32_host_to_dev;
+ } else {
+no_accel:
+ dspi->dev_to_host = dspi_native_dev_to_host;
+ dspi->host_to_dev = dspi_native_host_to_dev;
+ dspi->oper_bits_per_word = xfer->bits_per_word;
+ }
+
+ dspi->oper_word_size = DIV_ROUND_UP(dspi->oper_bits_per_word, 8);
+
+ /*
+ * Update CTAR here (code is common for EOQ, XSPI and DMA modes).
+ * We will update CTARE in the portion specific to XSPI, when we
+ * also know the preload value (DTCP).
+ */
+ regmap_write(dspi->regmap, SPI_CTAR(0),
+ dspi->cur_chip->ctar_val |
+ SPI_FRAME_BITS(dspi->oper_bits_per_word));
+}
+
+static void dspi_fifo_write(struct fsl_dspi *dspi)
+{
+ int num_fifo_entries = dspi->devtype_data->fifo_size;
+ struct spi_transfer *xfer = dspi->cur_transfer;
struct spi_message *msg = dspi->cur_msg;
- enum dspi_trans_mode trans_mode;
- u16 spi_tcnt;
- u32 spi_tcr;
+ int num_words, num_bytes;
- spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
- dspi->progress, !dspi->irq);
+ dspi_setup_accel(dspi);
+
+ /* In XSPI mode each 32-bit word occupies 2 TX FIFO entries */
+ if (dspi->oper_word_size == 4)
+ num_fifo_entries /= 2;
- /* Get transfer counter (in number of SPI transfers). It was
- * reset to 0 when transfer(s) were started.
+ /*
+ * Integer division intentionally trims off odd (or non-multiple of 4)
+ * numbers of bytes at the end of the buffer, which will be sent next
+ * time using a smaller oper_word_size.
*/
- regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
- spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
+ num_words = dspi->len / dspi->oper_word_size;
+ if (num_words > num_fifo_entries)
+ num_words = num_fifo_entries;
+
/* Update total number of bytes that were transferred */
- msg->actual_length += spi_tcnt * dspi->bytes_per_word;
- dspi->progress += spi_tcnt;
+ num_bytes = num_words * dspi->oper_word_size;
+ msg->actual_length += num_bytes;
+ dspi->progress += num_bytes / DIV_ROUND_UP(xfer->bits_per_word, 8);
- trans_mode = dspi->devtype_data->trans_mode;
- if (trans_mode == DSPI_EOQ_MODE)
- dspi_eoq_read(dspi);
- else if (trans_mode == DSPI_TCFQ_MODE)
- dspi_tcfq_read(dspi);
+ /*
+ * Update shared variable for use in the next interrupt (both in
+ * dspi_fifo_read and in dspi_fifo_write).
+ */
+ dspi->words_in_flight = num_words;
+
+ spi_take_timestamp_pre(dspi->ctlr, xfer, dspi->progress, !dspi->irq);
+
+ if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
+ dspi_eoq_fifo_write(dspi, num_words);
+ else
+ dspi_xspi_fifo_write(dspi, num_words);
+ /*
+ * Everything after this point is in a potential race with the next
+ * interrupt, so we must never use dspi->words_in_flight again since it
+ * might already be modified by the next dspi_fifo_write.
+ */
+
+ spi_take_timestamp_post(dspi->ctlr, dspi->cur_transfer,
+ dspi->progress, !dspi->irq);
+}
+
+static int dspi_rxtx(struct fsl_dspi *dspi)
+{
+ dspi_fifo_read(dspi);
if (!dspi->len)
/* Success! */
return 0;
- spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
- dspi->progress, !dspi->irq);
-
- if (trans_mode == DSPI_EOQ_MODE)
- dspi_eoq_write(dspi);
- else if (trans_mode == DSPI_TCFQ_MODE)
- dspi_tcfq_write(dspi);
+ dspi_fifo_write(dspi);
return -EINPROGRESS;
}
@@ -693,7 +874,7 @@ static int dspi_poll(struct fsl_dspi *dspi)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF))
+ if (spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF))
break;
} while (--tries);
@@ -711,13 +892,11 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
regmap_read(dspi->regmap, SPI_SR, &spi_sr);
regmap_write(dspi->regmap, SPI_SR, spi_sr);
- if (!(spi_sr & SPI_SR_EOQF))
+ if (!(spi_sr & (SPI_SR_EOQF | SPI_SR_CMDTCF)))
return IRQ_NONE;
- if (dspi_rxtx(dspi) == 0) {
- dspi->waitflags = 1;
- wake_up_interruptible(&dspi->waitq);
- }
+ if (dspi_rxtx(dspi) == 0)
+ complete(&dspi->xfer_done);
return IRQ_HANDLED;
}
@@ -727,7 +906,6 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
{
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
- enum dspi_trans_mode trans_mode;
struct spi_transfer *transfer;
int status = 0;
@@ -757,76 +935,38 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr,
dspi->tx_cmd |= SPI_PUSHR_CMD_CONT;
}
- dspi->void_write_data = dspi->cur_chip->void_write_data;
-
dspi->tx = transfer->tx_buf;
dspi->rx = transfer->rx_buf;
- dspi->rx_end = dspi->rx + transfer->len;
dspi->len = transfer->len;
dspi->progress = 0;
- /* Validated transfer specific frame size (defaults applied) */
- dspi->bits_per_word = transfer->bits_per_word;
- if (transfer->bits_per_word <= 8)
- dspi->bytes_per_word = 1;
- else if (transfer->bits_per_word <= 16)
- dspi->bytes_per_word = 2;
- else
- dspi->bytes_per_word = 4;
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
- regmap_write(dspi->regmap, SPI_CTAR(0),
- dspi->cur_chip->ctar_val |
- SPI_FRAME_BITS(transfer->bits_per_word));
- if (dspi->devtype_data->xspi_mode)
- regmap_write(dspi->regmap, SPI_CTARE(0),
- SPI_FRAME_EBITS(transfer->bits_per_word) |
- SPI_CTARE_DTCP(1));
spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer,
dspi->progress, !dspi->irq);
- trans_mode = dspi->devtype_data->trans_mode;
- switch (trans_mode) {
- case DSPI_EOQ_MODE:
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
- dspi_eoq_write(dspi);
- break;
- case DSPI_TCFQ_MODE:
- regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
- dspi_tcfq_write(dspi);
- break;
- case DSPI_DMA_MODE:
- regmap_write(dspi->regmap, SPI_RSER,
- SPI_RSER_TFFFE | SPI_RSER_TFFFD |
- SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
status = dspi_dma_xfer(dspi);
- break;
- default:
- dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
- trans_mode);
- status = -EINVAL;
- goto out;
- }
-
- if (!dspi->irq) {
- do {
- status = dspi_poll(dspi);
- } while (status == -EINPROGRESS);
- } else if (trans_mode != DSPI_DMA_MODE) {
- status = wait_event_interruptible(dspi->waitq,
- dspi->waitflags);
- dspi->waitflags = 0;
+ } else {
+ dspi_fifo_write(dspi);
+
+ if (dspi->irq) {
+ wait_for_completion(&dspi->xfer_done);
+ reinit_completion(&dspi->xfer_done);
+ } else {
+ do {
+ status = dspi_poll(dspi);
+ } while (status == -EINPROGRESS);
+ }
}
if (status)
- dev_err(&dspi->pdev->dev,
- "Waiting for transfer to complete failed!\n");
+ break;
spi_transfer_delay_exec(transfer);
}
-out:
message->status = status;
spi_finalize_current_message(ctlr);
@@ -864,8 +1004,6 @@ static int dspi_setup(struct spi_device *spi)
sck_cs_delay = pdata->sck_cs_delay;
}
- chip->void_write_data = 0;
-
clkrate = clk_get_rate(dspi->clk);
hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
@@ -909,9 +1047,34 @@ static void dspi_cleanup(struct spi_device *spi)
}
static const struct of_device_id fsl_dspi_dt_ids[] = {
- { .compatible = "fsl,vf610-dspi", .data = &vf610_data, },
- { .compatible = "fsl,ls1021a-v1.0-dspi", .data = &ls1021a_v1_data, },
- { .compatible = "fsl,ls2085a-dspi", .data = &ls2085a_data, },
+ {
+ .compatible = "fsl,vf610-dspi",
+ .data = &devtype_data[VF610],
+ }, {
+ .compatible = "fsl,ls1021a-v1.0-dspi",
+ .data = &devtype_data[LS1021A],
+ }, {
+ .compatible = "fsl,ls1012a-dspi",
+ .data = &devtype_data[LS1012A],
+ }, {
+ .compatible = "fsl,ls1028a-dspi",
+ .data = &devtype_data[LS1028A],
+ }, {
+ .compatible = "fsl,ls1043a-dspi",
+ .data = &devtype_data[LS1043A],
+ }, {
+ .compatible = "fsl,ls1046a-dspi",
+ .data = &devtype_data[LS1046A],
+ }, {
+ .compatible = "fsl,ls2080a-dspi",
+ .data = &devtype_data[LS2080A],
+ }, {
+ .compatible = "fsl,ls2085a-dspi",
+ .data = &devtype_data[LS2085A],
+ }, {
+ .compatible = "fsl,lx2160a-dspi",
+ .data = &devtype_data[LX2160A],
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -997,20 +1160,40 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
},
};
-static void dspi_init(struct fsl_dspi *dspi)
+static int dspi_init(struct fsl_dspi *dspi)
{
- unsigned int mcr = SPI_MCR_PCSIS;
+ unsigned int mcr;
+
+ /* Set idle states for all chip select signals to high */
+ mcr = SPI_MCR_PCSIS(GENMASK(dspi->ctlr->num_chipselect - 1, 0));
- if (dspi->devtype_data->xspi_mode)
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
mcr |= SPI_MCR_XSPI;
if (!spi_controller_is_slave(dspi->ctlr))
mcr |= SPI_MCR_MASTER;
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
- if (dspi->devtype_data->xspi_mode)
- regmap_write(dspi->regmap, SPI_CTARE(0),
- SPI_CTARE_FMSZE(0) | SPI_CTARE_DTCP(1));
+
+ switch (dspi->devtype_data->trans_mode) {
+ case DSPI_EOQ_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+ break;
+ case DSPI_XSPI_MODE:
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_CMDTCFE);
+ break;
+ case DSPI_DMA_MODE:
+ regmap_write(dspi->regmap, SPI_RSER,
+ SPI_RSER_TFFFE | SPI_RSER_TFFFD |
+ SPI_RSER_RFDFE | SPI_RSER_RFDFD);
+ break;
+ default:
+ dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
+ dspi->devtype_data->trans_mode);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int dspi_slave_abort(struct spi_master *master)
@@ -1021,8 +1204,10 @@ static int dspi_slave_abort(struct spi_master *master)
* Terminate all pending DMA transactions for the SPI working
* in SLAVE mode.
*/
- dmaengine_terminate_sync(dspi->dma->chan_rx);
- dmaengine_terminate_sync(dspi->dma->chan_tx);
+ if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
+ dmaengine_terminate_sync(dspi->dma->chan_rx);
+ dmaengine_terminate_sync(dspi->dma->chan_tx);
+ }
/* Clear the internal DSPI RX and TX FIFO buffers */
regmap_update_bits(dspi->regmap, SPI_MCR,
@@ -1032,16 +1217,33 @@ static int dspi_slave_abort(struct spi_master *master)
return 0;
}
+/*
+ * EOQ mode will inevitably deassert its PCS signal on last word in a queue
+ * (hardware limitation), so we need to inform the spi_device that larger
+ * buffers than the FIFO size are going to have the chip select randomly
+ * toggling, so it has a chance to adapt its message sizes.
+ */
+static size_t dspi_max_message_size(struct spi_device *spi)
+{
+ struct fsl_dspi *dspi = spi_controller_get_devdata(spi->controller);
+
+ if (dspi->devtype_data->trans_mode == DSPI_EOQ_MODE)
+ return dspi->devtype_data->fifo_size;
+
+ return SIZE_MAX;
+}
+
static int dspi_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
const struct regmap_config *regmap_config;
struct fsl_dspi_platform_data *pdata;
struct spi_controller *ctlr;
- int ret, cs_num, bus_num;
+ int ret, cs_num, bus_num = -1;
struct fsl_dspi *dspi;
struct resource *res;
void __iomem *base;
+ bool big_endian;
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
if (!ctlr)
@@ -1053,6 +1255,7 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->setup = dspi_setup;
ctlr->transfer_one_message = dspi_transfer_one_message;
+ ctlr->max_message_size = dspi_max_message_size;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->cleanup = dspi_cleanup;
@@ -1064,7 +1267,9 @@ static int dspi_probe(struct platform_device *pdev)
ctlr->num_chipselect = pdata->cs_num;
ctlr->bus_num = pdata->bus_num;
- dspi->devtype_data = &coldfire_data;
+ /* Only Coldfire uses platform data */
+ dspi->devtype_data = &devtype_data[MCF5441X];
+ big_endian = true;
} else {
ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
@@ -1074,11 +1279,7 @@ static int dspi_probe(struct platform_device *pdev)
}
ctlr->num_chipselect = cs_num;
- ret = of_property_read_u32(np, "bus-num", &bus_num);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get bus-num\n");
- goto out_ctlr_put;
- }
+ of_property_read_u32(np, "bus-num", &bus_num);
ctlr->bus_num = bus_num;
if (of_property_read_bool(np, "spi-slave"))
@@ -1090,9 +1291,18 @@ static int dspi_probe(struct platform_device *pdev)
ret = -EFAULT;
goto out_ctlr_put;
}
+
+ big_endian = of_device_is_big_endian(np);
+ }
+ if (big_endian) {
+ dspi->pushr_cmd = 0;
+ dspi->pushr_tx = 2;
+ } else {
+ dspi->pushr_cmd = 2;
+ dspi->pushr_tx = 0;
}
- if (dspi->devtype_data->xspi_mode)
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
else
ctlr->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
@@ -1104,7 +1314,7 @@ static int dspi_probe(struct platform_device *pdev)
goto out_ctlr_put;
}
- if (dspi->devtype_data->xspi_mode)
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE)
regmap_config = &dspi_xspi_regmap_config[0];
else
regmap_config = &dspi_regmap_config;
@@ -1116,7 +1326,7 @@ static int dspi_probe(struct platform_device *pdev)
goto out_ctlr_put;
}
- if (dspi->devtype_data->xspi_mode) {
+ if (dspi->devtype_data->trans_mode == DSPI_XSPI_MODE) {
dspi->regmap_pushr = devm_regmap_init_mmio(
&pdev->dev, base + SPI_PUSHR,
&dspi_xspi_regmap_config[1]);
@@ -1139,10 +1349,9 @@ static int dspi_probe(struct platform_device *pdev)
if (ret)
goto out_ctlr_put;
- dspi_init(dspi);
-
- if (dspi->devtype_data->trans_mode == DSPI_TCFQ_MODE)
- goto poll_mode;
+ ret = dspi_init(dspi);
+ if (ret)
+ goto out_clk_put;
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq <= 0) {
@@ -1159,7 +1368,7 @@ static int dspi_probe(struct platform_device *pdev)
goto out_clk_put;
}
- init_waitqueue_head(&dspi->waitq);
+ init_completion(&dspi->xfer_done);
poll_mode:
@@ -1174,7 +1383,8 @@ poll_mode:
ctlr->max_speed_hz =
clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
- ctlr->ptp_sts_supported = dspi->devtype_data->ptp_sts_supported;
+ if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE)
+ ctlr->ptp_sts_supported = true;
platform_set_drvdata(pdev, ctlr);
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index d0b8cc741a24..8b41b70f6f5c 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -86,8 +86,6 @@
#define TCR_RXMSK BIT(19)
#define TCR_TXMSK BIT(18)
-static int clkdivs[] = {1, 2, 4, 8, 16, 32, 64, 128};
-
struct lpspi_config {
u8 bpw;
u8 chip_select;
@@ -125,7 +123,7 @@ struct fsl_lpspi_data {
struct completion dma_rx_completion;
struct completion dma_tx_completion;
- int chipselect[0];
+ int chipselect[];
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
@@ -331,15 +329,14 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
}
for (prescale = 0; prescale < 8; prescale++) {
- scldiv = perclk_rate /
- (clkdivs[prescale] * config.speed_hz) - 2;
+ scldiv = perclk_rate / config.speed_hz / (1 << prescale) - 2;
if (scldiv < 256) {
fsl_lpspi->config.prescale = prescale;
break;
}
}
- if (prescale == 8 && scldiv >= 256)
+ if (scldiv >= 256)
return -EINVAL;
writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index e8a499cd1f13..02e5cba0a5bb 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -484,7 +484,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
}
if (needs_wakeup_wait_mode(q))
- pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+ cpu_latency_qos_add_request(&q->pm_qos_req, 0);
return 0;
}
@@ -492,7 +492,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
{
if (needs_wakeup_wait_mode(q))
- pm_qos_remove_request(&q->pm_qos_req);
+ cpu_latency_qos_remove_request(&q->pm_qos_req);
clk_disable_unprepare(q->clk);
clk_disable_unprepare(q->clk_en);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 6f3d64a1a2b3..c3972424af71 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -6,7 +6,6 @@
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
@@ -536,6 +535,7 @@ static int spi_geni_probe(struct platform_device *pdev)
struct spi_geni_master *mas;
void __iomem *base;
struct clk *clk;
+ struct device *dev = &pdev->dev;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -545,28 +545,25 @@ static int spi_geni_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- clk = devm_clk_get(&pdev->dev, "se");
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "Err getting SE Core clk %ld\n",
- PTR_ERR(clk));
+ clk = devm_clk_get(dev, "se");
+ if (IS_ERR(clk))
return PTR_ERR(clk);
- }
- spi = spi_alloc_master(&pdev->dev, sizeof(*mas));
+ spi = spi_alloc_master(dev, sizeof(*mas));
if (!spi)
return -ENOMEM;
platform_set_drvdata(pdev, spi);
mas = spi_master_get_devdata(spi);
mas->irq = irq;
- mas->dev = &pdev->dev;
- mas->se.dev = &pdev->dev;
- mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
+ mas->dev = dev;
+ mas->se.dev = dev;
+ mas->se.wrapper = dev_get_drvdata(dev->parent);
mas->se.base = base;
mas->se.clk = clk;
spi->bus_num = -1;
- spi->dev.of_node = pdev->dev.of_node;
+ spi->dev.of_node = dev->of_node;
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
@@ -579,14 +576,13 @@ static int spi_geni_probe(struct platform_device *pdev)
init_completion(&mas->xfer_done);
spin_lock_init(&mas->lock);
- pm_runtime_enable(&pdev->dev);
+ pm_runtime_enable(dev);
ret = spi_geni_init(mas);
if (ret)
goto spi_geni_probe_runtime_disable;
- ret = request_irq(mas->irq, geni_spi_isr,
- IRQF_TRIGGER_HIGH, "spi_geni", spi);
+ ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
goto spi_geni_probe_runtime_disable;
@@ -598,7 +594,7 @@ static int spi_geni_probe(struct platform_device *pdev)
spi_geni_probe_free_irq:
free_irq(mas->irq, spi);
spi_geni_probe_runtime_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
spi_master_put(spi);
return ret;
}
diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c
index 4cf8fc80a7b7..e3b57252d075 100644
--- a/drivers/spi/spi-hisi-sfc-v3xx.c
+++ b/drivers/spi/spi-hisi-sfc-v3xx.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/dmi.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -17,6 +18,12 @@
#define HISI_SFC_V3XX_VERSION (0x1f8)
#define HISI_SFC_V3XX_CMD_CFG (0x300)
+#define HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT (1 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_DUAL_IO (2 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_FULL_DIO (3 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT (5 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_QUAD_IO (6 << 17)
+#define HISI_SFC_V3XX_CMD_CFG_FULL_QIO (7 << 17)
#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9
#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8)
#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7)
@@ -161,6 +168,43 @@ static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host,
if (op->addr.nbytes)
config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK;
+ switch (op->data.buswidth) {
+ case 0 ... 1:
+ break;
+ case 2:
+ if (op->addr.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IN_DUAL_OUT;
+ } else if (op->addr.buswidth == 2) {
+ if (op->cmd.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_DUAL_IO;
+ } else if (op->cmd.buswidth == 2) {
+ config |= HISI_SFC_V3XX_CMD_CFG_FULL_DIO;
+ } else {
+ return -EIO;
+ }
+ } else {
+ return -EIO;
+ }
+ break;
+ case 4:
+ if (op->addr.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IN_QUAD_OUT;
+ } else if (op->addr.buswidth == 4) {
+ if (op->cmd.buswidth <= 1) {
+ config |= HISI_SFC_V3XX_CMD_CFG_QUAD_IO;
+ } else if (op->cmd.buswidth == 4) {
+ config |= HISI_SFC_V3XX_CMD_CFG_FULL_QIO;
+ } else {
+ return -EIO;
+ }
+ } else {
+ return -EIO;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (op->data.dir != SPI_MEM_NO_DATA) {
config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF;
config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK;
@@ -207,6 +251,44 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
.exec_op = hisi_sfc_v3xx_exec_op,
};
+static int hisi_sfc_v3xx_buswidth_override_bits;
+
+/*
+ * ACPI FW does not allow us to currently set the device buswidth, so quirk it
+ * depending on the board.
+ */
+static int __init hisi_sfc_v3xx_dmi_quirk(const struct dmi_system_id *d)
+{
+ hisi_sfc_v3xx_buswidth_override_bits = SPI_RX_QUAD | SPI_TX_QUAD;
+
+ return 0;
+}
+
+static const struct dmi_system_id hisi_sfc_v3xx_dmi_quirk_table[] = {
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "D06"),
+ },
+ },
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 2280 V2"),
+ },
+ },
+ {
+ .callback = hisi_sfc_v3xx_dmi_quirk,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Huawei"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TaiShan 200 (Model 2280)"),
+ },
+ },
+ {}
+};
+
static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -222,6 +304,8 @@ static int hisi_sfc_v3xx_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->buswidth_override_bits = hisi_sfc_v3xx_buswidth_override_bits;
+
host = spi_controller_get_devdata(ctlr);
host->dev = dev;
@@ -277,7 +361,20 @@ static struct platform_driver hisi_sfc_v3xx_spi_driver = {
.probe = hisi_sfc_v3xx_probe,
};
-module_platform_driver(hisi_sfc_v3xx_spi_driver);
+static int __init hisi_sfc_v3xx_spi_init(void)
+{
+ dmi_check_system(hisi_sfc_v3xx_dmi_quirk_table);
+
+ return platform_driver_register(&hisi_sfc_v3xx_spi_driver);
+}
+
+static void __exit hisi_sfc_v3xx_spi_exit(void)
+{
+ platform_driver_unregister(&hisi_sfc_v3xx_spi_driver);
+}
+
+module_init(hisi_sfc_v3xx_spi_init);
+module_exit(hisi_sfc_v3xx_spi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index e5a46f0eb93b..adaa0c49f966 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -418,12 +418,13 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
struct spi_controller *ctlr = mem->spi->controller;
size_t len;
- len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
-
if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
return ctlr->mem_ops->adjust_op_size(mem, op);
if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
+ len = sizeof(op->cmd.opcode) + op->addr.nbytes +
+ op->dummy.nbytes;
+
if (len > spi_max_transfer_size(mem->spi))
return -EINVAL;
@@ -487,7 +488,7 @@ static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
* This function is creating a direct mapping descriptor which can then be used
* to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
* If the SPI controller driver does not support direct mapping, this function
- * fallback to an implementation using spi_mem_exec_op(), so that the caller
+ * falls back to an implementation using spi_mem_exec_op(), so that the caller
* doesn't have to bother implementing a fallback on his own.
*
* Return: a valid pointer in case of success, and ERR_PTR() otherwise.
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index 7f5680fe2568..77f7d0e0e46a 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -9,11 +9,13 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/types.h>
@@ -33,7 +35,6 @@
* to have a CS go down over the full transfer
*/
-#define SPICC_MAX_FREQ 30000000
#define SPICC_MAX_BURST 128
/* Register Map */
@@ -105,7 +106,21 @@
#define SPICC_SWAP_RO BIT(14) /* RX FIFO Data Swap Read-Only */
#define SPICC_SWAP_W1 BIT(15) /* RX FIFO Data Swap Write-Only */
#define SPICC_DLYCTL_RO_MASK GENMASK(20, 15) /* Delay Control Read-Only */
-#define SPICC_DLYCTL_W1_MASK GENMASK(21, 16) /* Delay Control Write-Only */
+#define SPICC_MO_DELAY_MASK GENMASK(17, 16) /* Master Output Delay */
+#define SPICC_MO_NO_DELAY 0
+#define SPICC_MO_DELAY_1_CYCLE 1
+#define SPICC_MO_DELAY_2_CYCLE 2
+#define SPICC_MO_DELAY_3_CYCLE 3
+#define SPICC_MI_DELAY_MASK GENMASK(19, 18) /* Master Input Delay */
+#define SPICC_MI_NO_DELAY 0
+#define SPICC_MI_DELAY_1_CYCLE 1
+#define SPICC_MI_DELAY_2_CYCLE 2
+#define SPICC_MI_DELAY_3_CYCLE 3
+#define SPICC_MI_CAP_DELAY_MASK GENMASK(21, 20) /* Master Capture Delay */
+#define SPICC_CAP_AHEAD_2_CYCLE 0
+#define SPICC_CAP_AHEAD_1_CYCLE 1
+#define SPICC_CAP_NO_DELAY 2
+#define SPICC_CAP_DELAY_1_CYCLE 3
#define SPICC_FIFORST_RO_MASK GENMASK(22, 21) /* FIFO Softreset Read-Only */
#define SPICC_FIFORST_W1_MASK GENMASK(23, 22) /* FIFO Softreset Write-Only */
@@ -113,31 +128,59 @@
#define SPICC_DWADDR 0x24 /* Write Address of DMA */
+#define SPICC_ENH_CTL0 0x38 /* Enhanced Feature */
+#define SPICC_ENH_CLK_CS_DELAY_MASK GENMASK(15, 0)
+#define SPICC_ENH_DATARATE_MASK GENMASK(23, 16)
+#define SPICC_ENH_DATARATE_EN BIT(24)
+#define SPICC_ENH_MOSI_OEN BIT(25)
+#define SPICC_ENH_CLK_OEN BIT(26)
+#define SPICC_ENH_CS_OEN BIT(27)
+#define SPICC_ENH_CLK_CS_DELAY_EN BIT(28)
+#define SPICC_ENH_MAIN_CLK_AO BIT(29)
+
#define writel_bits_relaxed(mask, val, addr) \
writel_relaxed((readl_relaxed(addr) & ~(mask)) | (val), addr)
-#define SPICC_BURST_MAX 16
-#define SPICC_FIFO_HALF 10
+struct meson_spicc_data {
+ unsigned int max_speed_hz;
+ unsigned int min_speed_hz;
+ unsigned int fifo_size;
+ bool has_oen;
+ bool has_enhance_clk_div;
+ bool has_pclk;
+};
struct meson_spicc_device {
struct spi_master *master;
struct platform_device *pdev;
void __iomem *base;
struct clk *core;
+ struct clk *pclk;
+ struct clk *clk;
struct spi_message *message;
struct spi_transfer *xfer;
+ const struct meson_spicc_data *data;
u8 *tx_buf;
u8 *rx_buf;
unsigned int bytes_per_word;
unsigned long tx_remain;
- unsigned long txb_remain;
unsigned long rx_remain;
- unsigned long rxb_remain;
unsigned long xfer_remain;
- bool is_burst_end;
- bool is_last_burst;
};
+static void meson_spicc_oen_enable(struct meson_spicc_device *spicc)
+{
+ u32 conf;
+
+ if (!spicc->data->has_oen)
+ return;
+
+ conf = readl_relaxed(spicc->base + SPICC_ENH_CTL0) |
+ SPICC_ENH_MOSI_OEN | SPICC_ENH_CLK_OEN | SPICC_ENH_CS_OEN;
+
+ writel_relaxed(conf, spicc->base + SPICC_ENH_CTL0);
+}
+
static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
{
return !!FIELD_GET(SPICC_TF,
@@ -146,7 +189,7 @@ static inline bool meson_spicc_txfull(struct meson_spicc_device *spicc)
static inline bool meson_spicc_rxready(struct meson_spicc_device *spicc)
{
- return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF_EN,
+ return FIELD_GET(SPICC_RH | SPICC_RR | SPICC_RF,
readl_relaxed(spicc->base + SPICC_STATREG));
}
@@ -201,34 +244,22 @@ static inline void meson_spicc_tx(struct meson_spicc_device *spicc)
spicc->base + SPICC_TXDATA);
}
-static inline u32 meson_spicc_setup_rx_irq(struct meson_spicc_device *spicc,
- u32 irq_ctrl)
+static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc)
{
- if (spicc->rx_remain > SPICC_FIFO_HALF)
- irq_ctrl |= SPICC_RH_EN;
- else
- irq_ctrl |= SPICC_RR_EN;
-
- return irq_ctrl;
-}
-static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc,
- unsigned int burst_len)
-{
+ unsigned int burst_len = min_t(unsigned int,
+ spicc->xfer_remain /
+ spicc->bytes_per_word,
+ spicc->data->fifo_size);
/* Setup Xfer variables */
spicc->tx_remain = burst_len;
spicc->rx_remain = burst_len;
spicc->xfer_remain -= burst_len * spicc->bytes_per_word;
- spicc->is_burst_end = false;
- if (burst_len < SPICC_BURST_MAX || !spicc->xfer_remain)
- spicc->is_last_burst = true;
- else
- spicc->is_last_burst = false;
/* Setup burst length */
writel_bits_relaxed(SPICC_BURSTLENGTH_MASK,
FIELD_PREP(SPICC_BURSTLENGTH_MASK,
- burst_len),
+ burst_len - 1),
spicc->base + SPICC_CONREG);
/* Fill TX FIFO */
@@ -238,97 +269,71 @@ static inline void meson_spicc_setup_burst(struct meson_spicc_device *spicc,
static irqreturn_t meson_spicc_irq(int irq, void *data)
{
struct meson_spicc_device *spicc = (void *) data;
- u32 ctrl = readl_relaxed(spicc->base + SPICC_INTREG);
- u32 stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
- ctrl &= ~(SPICC_RH_EN | SPICC_RR_EN);
+ writel_bits_relaxed(SPICC_TC, SPICC_TC, spicc->base + SPICC_STATREG);
/* Empty RX FIFO */
meson_spicc_rx(spicc);
- /* Enable TC interrupt since we transferred everything */
- if (!spicc->tx_remain && !spicc->rx_remain) {
- spicc->is_burst_end = true;
-
- /* Enable TC interrupt */
- ctrl |= SPICC_TC_EN;
+ if (!spicc->xfer_remain) {
+ /* Disable all IRQs */
+ writel(0, spicc->base + SPICC_INTREG);
- /* Reload IRQ status */
- stat = readl_relaxed(spicc->base + SPICC_STATREG) & ctrl;
- }
-
- /* Check transfer complete */
- if ((stat & SPICC_TC) && spicc->is_burst_end) {
- unsigned int burst_len;
-
- /* Clear TC bit */
- writel_relaxed(SPICC_TC, spicc->base + SPICC_STATREG);
-
- /* Disable TC interrupt */
- ctrl &= ~SPICC_TC_EN;
-
- if (spicc->is_last_burst) {
- /* Disable all IRQs */
- writel(0, spicc->base + SPICC_INTREG);
-
- spi_finalize_current_transfer(spicc->master);
-
- return IRQ_HANDLED;
- }
-
- burst_len = min_t(unsigned int,
- spicc->xfer_remain / spicc->bytes_per_word,
- SPICC_BURST_MAX);
+ spi_finalize_current_transfer(spicc->master);
- /* Setup burst */
- meson_spicc_setup_burst(spicc, burst_len);
-
- /* Restart burst */
- writel_bits_relaxed(SPICC_XCH, SPICC_XCH,
- spicc->base + SPICC_CONREG);
+ return IRQ_HANDLED;
}
- /* Setup RX interrupt trigger */
- ctrl = meson_spicc_setup_rx_irq(spicc, ctrl);
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
- /* Reconfigure interrupts */
- writel(ctrl, spicc->base + SPICC_INTREG);
+ /* Start burst */
+ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
return IRQ_HANDLED;
}
-static u32 meson_spicc_setup_speed(struct meson_spicc_device *spicc, u32 conf,
- u32 speed)
+static void meson_spicc_auto_io_delay(struct meson_spicc_device *spicc)
{
- unsigned long parent, value;
- unsigned int i, div;
-
- parent = clk_get_rate(spicc->core);
-
- /* Find closest inferior/equal possible speed */
- for (i = 0 ; i < 7 ; ++i) {
- /* 2^(data_rate+2) */
- value = parent >> (i + 2);
-
- if (value <= speed)
- break;
+ u32 div, hz;
+ u32 mi_delay, cap_delay;
+ u32 conf;
+
+ if (spicc->data->has_enhance_clk_div) {
+ div = FIELD_GET(SPICC_ENH_DATARATE_MASK,
+ readl_relaxed(spicc->base + SPICC_ENH_CTL0));
+ div++;
+ div <<= 1;
+ } else {
+ div = FIELD_GET(SPICC_DATARATE_MASK,
+ readl_relaxed(spicc->base + SPICC_CONREG));
+ div += 2;
+ div = 1 << div;
}
- /* If provided speed it lower than max divider, use max divider */
- if (i > 7) {
- div = 7;
- dev_warn_once(&spicc->pdev->dev, "unable to get close to speed %u\n",
- speed);
- } else
- div = i;
-
- dev_dbg(&spicc->pdev->dev, "parent %lu, speed %u -> %lu (%u)\n",
- parent, speed, value, div);
-
- conf &= ~SPICC_DATARATE_MASK;
- conf |= FIELD_PREP(SPICC_DATARATE_MASK, div);
-
- return conf;
+ mi_delay = SPICC_MI_NO_DELAY;
+ cap_delay = SPICC_CAP_AHEAD_2_CYCLE;
+ hz = clk_get_rate(spicc->clk);
+
+ if (hz >= 100000000)
+ cap_delay = SPICC_CAP_DELAY_1_CYCLE;
+ else if (hz >= 80000000)
+ cap_delay = SPICC_CAP_NO_DELAY;
+ else if (hz >= 40000000)
+ cap_delay = SPICC_CAP_AHEAD_1_CYCLE;
+ else if (div >= 16)
+ mi_delay = SPICC_MI_DELAY_3_CYCLE;
+ else if (div >= 8)
+ mi_delay = SPICC_MI_DELAY_2_CYCLE;
+ else if (div >= 6)
+ mi_delay = SPICC_MI_DELAY_1_CYCLE;
+
+ conf = readl_relaxed(spicc->base + SPICC_TESTREG);
+ conf &= ~(SPICC_MO_DELAY_MASK | SPICC_MI_DELAY_MASK
+ | SPICC_MI_CAP_DELAY_MASK);
+ conf |= FIELD_PREP(SPICC_MI_DELAY_MASK, mi_delay);
+ conf |= FIELD_PREP(SPICC_MI_CAP_DELAY_MASK, cap_delay);
+ writel_relaxed(conf, spicc->base + SPICC_TESTREG);
}
static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
@@ -339,9 +344,6 @@ static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
/* Read original configuration */
conf = conf_orig = readl_relaxed(spicc->base + SPICC_CONREG);
- /* Select closest divider */
- conf = meson_spicc_setup_speed(spicc, conf, xfer->speed_hz);
-
/* Setup word width */
conf &= ~SPICC_BITLENGTH_MASK;
conf |= FIELD_PREP(SPICC_BITLENGTH_MASK,
@@ -350,6 +352,32 @@ static void meson_spicc_setup_xfer(struct meson_spicc_device *spicc,
/* Ignore if unchanged */
if (conf != conf_orig)
writel_relaxed(conf, spicc->base + SPICC_CONREG);
+
+ clk_set_rate(spicc->clk, xfer->speed_hz);
+
+ meson_spicc_auto_io_delay(spicc);
+
+ writel_relaxed(0, spicc->base + SPICC_DMAREG);
+}
+
+static void meson_spicc_reset_fifo(struct meson_spicc_device *spicc)
+{
+ u32 data;
+
+ if (spicc->data->has_oen)
+ writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO,
+ SPICC_ENH_MAIN_CLK_AO,
+ spicc->base + SPICC_ENH_CTL0);
+
+ writel_bits_relaxed(SPICC_FIFORST_W1_MASK, SPICC_FIFORST_W1_MASK,
+ spicc->base + SPICC_TESTREG);
+
+ while (meson_spicc_rxready(spicc))
+ data = readl_relaxed(spicc->base + SPICC_RXDATA);
+
+ if (spicc->data->has_oen)
+ writel_bits_relaxed(SPICC_ENH_MAIN_CLK_AO, 0,
+ spicc->base + SPICC_ENH_CTL0);
}
static int meson_spicc_transfer_one(struct spi_master *master,
@@ -357,8 +385,6 @@ static int meson_spicc_transfer_one(struct spi_master *master,
struct spi_transfer *xfer)
{
struct meson_spicc_device *spicc = spi_master_get_devdata(master);
- unsigned int burst_len;
- u32 irq = 0;
/* Store current transfer */
spicc->xfer = xfer;
@@ -372,22 +398,22 @@ static int meson_spicc_transfer_one(struct spi_master *master,
spicc->bytes_per_word =
DIV_ROUND_UP(spicc->xfer->bits_per_word, 8);
+ if (xfer->len % spicc->bytes_per_word)
+ return -EINVAL;
+
/* Setup transfer parameters */
meson_spicc_setup_xfer(spicc, xfer);
- burst_len = min_t(unsigned int,
- spicc->xfer_remain / spicc->bytes_per_word,
- SPICC_BURST_MAX);
+ meson_spicc_reset_fifo(spicc);
- meson_spicc_setup_burst(spicc, burst_len);
-
- irq = meson_spicc_setup_rx_irq(spicc, irq);
+ /* Setup burst */
+ meson_spicc_setup_burst(spicc);
/* Start burst */
writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG);
/* Enable interrupts */
- writel_relaxed(irq, spicc->base + SPICC_INTREG);
+ writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG);
return 1;
}
@@ -444,7 +470,7 @@ static int meson_spicc_prepare_message(struct spi_master *master,
/* Setup no wait cycles by default */
writel_relaxed(0, spicc->base + SPICC_PERIODREG);
- writel_bits_relaxed(BIT(24), BIT(24), spicc->base + SPICC_TESTREG);
+ writel_bits_relaxed(SPICC_LBC_W1, 0, spicc->base + SPICC_TESTREG);
return 0;
}
@@ -456,9 +482,6 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master)
/* Disable all IRQs */
writel(0, spicc->base + SPICC_INTREG);
- /* Disable controller */
- writel_bits_relaxed(SPICC_ENABLE, 0, spicc->base + SPICC_CONREG);
-
device_reset_optional(&spicc->pdev->dev);
return 0;
@@ -477,11 +500,167 @@ static void meson_spicc_cleanup(struct spi_device *spi)
spi->controller_state = NULL;
}
+/*
+ * The Clock Mux
+ * x-----------------x x------------x x------\
+ * |---| pow2 fixed div |---| pow2 div |----| |
+ * | x-----------------x x------------x | |
+ * src ---| | mux |-- out
+ * | x-----------------x x------------x | |
+ * |---| enh fixed div |---| enh div |0---| |
+ * x-----------------x x------------x x------/
+ *
+ * Clk path for GX series:
+ * src -> pow2 fixed div -> pow2 div -> out
+ *
+ * Clk path for AXG series:
+ * src -> pow2 fixed div -> pow2 div -> mux -> out
+ * src -> enh fixed div -> enh div -> mux -> out
+ *
+ * Clk path for G12A series:
+ * pclk -> pow2 fixed div -> pow2 div -> mux -> out
+ * pclk -> enh fixed div -> enh div -> mux -> out
+ */
+
+static int meson_spicc_clk_init(struct meson_spicc_device *spicc)
+{
+ struct device *dev = &spicc->pdev->dev;
+ struct clk_fixed_factor *pow2_fixed_div, *enh_fixed_div;
+ struct clk_divider *pow2_div, *enh_div;
+ struct clk_mux *mux;
+ struct clk_init_data init;
+ struct clk *clk;
+ struct clk_parent_data parent_data[2];
+ char name[64];
+
+ memset(&init, 0, sizeof(init));
+ memset(&parent_data, 0, sizeof(parent_data));
+
+ init.parent_data = parent_data;
+
+ /* algorithm for pow2 div: rate = freq / 4 / (2 ^ N) */
+
+ pow2_fixed_div = devm_kzalloc(dev, sizeof(*pow2_fixed_div), GFP_KERNEL);
+ if (!pow2_fixed_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#pow2_fixed_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ if (spicc->data->has_pclk)
+ parent_data[0].hw = __clk_get_hw(spicc->pclk);
+ else
+ parent_data[0].hw = __clk_get_hw(spicc->core);
+ init.num_parents = 1;
+
+ pow2_fixed_div->mult = 1,
+ pow2_fixed_div->div = 4,
+ pow2_fixed_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &pow2_fixed_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ pow2_div = devm_kzalloc(dev, sizeof(*pow2_div), GFP_KERNEL);
+ if (!pow2_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#pow2_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ parent_data[0].hw = &pow2_fixed_div->hw;
+ init.num_parents = 1;
+
+ pow2_div->shift = 16,
+ pow2_div->width = 3,
+ pow2_div->flags = CLK_DIVIDER_POWER_OF_TWO,
+ pow2_div->reg = spicc->base + SPICC_CONREG;
+ pow2_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &pow2_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ if (!spicc->data->has_enhance_clk_div) {
+ spicc->clk = clk;
+ return 0;
+ }
+
+ /* algorithm for enh div: rate = freq / 2 / (N + 1) */
+
+ enh_fixed_div = devm_kzalloc(dev, sizeof(*enh_fixed_div), GFP_KERNEL);
+ if (!enh_fixed_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#enh_fixed_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_fixed_factor_ops;
+ init.flags = 0;
+ if (spicc->data->has_pclk)
+ parent_data[0].hw = __clk_get_hw(spicc->pclk);
+ else
+ parent_data[0].hw = __clk_get_hw(spicc->core);
+ init.num_parents = 1;
+
+ enh_fixed_div->mult = 1,
+ enh_fixed_div->div = 2,
+ enh_fixed_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &enh_fixed_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ enh_div = devm_kzalloc(dev, sizeof(*enh_div), GFP_KERNEL);
+ if (!enh_div)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#enh_div", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_divider_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ parent_data[0].hw = &enh_fixed_div->hw;
+ init.num_parents = 1;
+
+ enh_div->shift = 16,
+ enh_div->width = 8,
+ enh_div->reg = spicc->base + SPICC_ENH_CTL0;
+ enh_div->hw.init = &init;
+
+ clk = devm_clk_register(dev, &enh_div->hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ snprintf(name, sizeof(name), "%s#sel", dev_name(dev));
+ init.name = name;
+ init.ops = &clk_mux_ops;
+ parent_data[0].hw = &pow2_div->hw;
+ parent_data[1].hw = &enh_div->hw;
+ init.num_parents = 2;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ mux->mask = 0x1,
+ mux->shift = 24,
+ mux->reg = spicc->base + SPICC_ENH_CTL0;
+ mux->hw.init = &init;
+
+ spicc->clk = devm_clk_register(dev, &mux->hw);
+ if (WARN_ON(IS_ERR(spicc->clk)))
+ return PTR_ERR(spicc->clk);
+
+ return 0;
+}
+
static int meson_spicc_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct meson_spicc_device *spicc;
- int ret, irq, rate;
+ int ret, irq;
master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
if (!master) {
@@ -491,6 +670,13 @@ static int meson_spicc_probe(struct platform_device *pdev)
spicc = spi_master_get_devdata(master);
spicc->master = master;
+ spicc->data = of_device_get_match_data(&pdev->dev);
+ if (!spicc->data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ ret = -EINVAL;
+ goto out_master;
+ }
+
spicc->pdev = pdev;
platform_set_drvdata(pdev, spicc);
@@ -501,6 +687,10 @@ static int meson_spicc_probe(struct platform_device *pdev)
goto out_master;
}
+ /* Set master mode and enable controller */
+ writel_relaxed(SPICC_ENABLE | SPICC_MODE_MASTER,
+ spicc->base + SPICC_CONREG);
+
/* Disable all IRQs */
writel_relaxed(0, spicc->base + SPICC_INTREG);
@@ -519,12 +709,26 @@ static int meson_spicc_probe(struct platform_device *pdev)
goto out_master;
}
+ if (spicc->data->has_pclk) {
+ spicc->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(spicc->pclk)) {
+ dev_err(&pdev->dev, "pclk clock request failed\n");
+ ret = PTR_ERR(spicc->pclk);
+ goto out_master;
+ }
+ }
+
ret = clk_prepare_enable(spicc->core);
if (ret) {
dev_err(&pdev->dev, "core clock enable failed\n");
goto out_master;
}
- rate = clk_get_rate(spicc->core);
+
+ ret = clk_prepare_enable(spicc->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "pclk clock enable failed\n");
+ goto out_master;
+ }
device_reset_optional(&pdev->dev);
@@ -536,7 +740,8 @@ static int meson_spicc_probe(struct platform_device *pdev)
SPI_BPW_MASK(16) |
SPI_BPW_MASK(8);
master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX);
- master->min_speed_hz = rate >> 9;
+ master->min_speed_hz = spicc->data->min_speed_hz;
+ master->max_speed_hz = spicc->data->max_speed_hz;
master->setup = meson_spicc_setup;
master->cleanup = meson_spicc_cleanup;
master->prepare_message = meson_spicc_prepare_message;
@@ -544,11 +749,13 @@ static int meson_spicc_probe(struct platform_device *pdev)
master->transfer_one = meson_spicc_transfer_one;
master->use_gpio_descriptors = true;
- /* Setup max rate according to the Meson GX datasheet */
- if ((rate >> 2) > SPICC_MAX_FREQ)
- master->max_speed_hz = SPICC_MAX_FREQ;
- else
- master->max_speed_hz = rate >> 2;
+ meson_spicc_oen_enable(spicc);
+
+ ret = meson_spicc_clk_init(spicc);
+ if (ret) {
+ dev_err(&pdev->dev, "clock registration failed\n");
+ goto out_master;
+ }
ret = devm_spi_register_master(&pdev->dev, master);
if (ret) {
@@ -560,6 +767,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
out_clk:
clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
out_master:
spi_master_put(master);
@@ -575,13 +783,47 @@ static int meson_spicc_remove(struct platform_device *pdev)
writel(0, spicc->base + SPICC_CONREG);
clk_disable_unprepare(spicc->core);
+ clk_disable_unprepare(spicc->pclk);
return 0;
}
+static const struct meson_spicc_data meson_spicc_gx_data = {
+ .max_speed_hz = 30000000,
+ .min_speed_hz = 325000,
+ .fifo_size = 16,
+};
+
+static const struct meson_spicc_data meson_spicc_axg_data = {
+ .max_speed_hz = 80000000,
+ .min_speed_hz = 325000,
+ .fifo_size = 16,
+ .has_oen = true,
+ .has_enhance_clk_div = true,
+};
+
+static const struct meson_spicc_data meson_spicc_g12a_data = {
+ .max_speed_hz = 166666666,
+ .min_speed_hz = 50000,
+ .fifo_size = 15,
+ .has_oen = true,
+ .has_enhance_clk_div = true,
+ .has_pclk = true,
+};
+
static const struct of_device_id meson_spicc_of_match[] = {
- { .compatible = "amlogic,meson-gx-spicc", },
- { .compatible = "amlogic,meson-axg-spicc", },
+ {
+ .compatible = "amlogic,meson-gx-spicc",
+ .data = &meson_spicc_gx_data,
+ },
+ {
+ .compatible = "amlogic,meson-axg-spicc",
+ .data = &meson_spicc_axg_data,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-spicc",
+ .data = &meson_spicc_g12a_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, meson_spicc_of_match);
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
new file mode 100644
index 000000000000..c15a9910549f
--- /dev/null
+++ b/drivers/spi/spi-mtk-nor.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Mediatek SPI NOR controller driver
+//
+// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/string.h>
+
+#define DRIVER_NAME "mtk-spi-nor"
+
+#define MTK_NOR_REG_CMD 0x00
+#define MTK_NOR_CMD_WRITE BIT(4)
+#define MTK_NOR_CMD_PROGRAM BIT(2)
+#define MTK_NOR_CMD_READ BIT(0)
+#define MTK_NOR_CMD_MASK GENMASK(5, 0)
+
+#define MTK_NOR_REG_PRG_CNT 0x04
+#define MTK_NOR_REG_RDATA 0x0c
+
+#define MTK_NOR_REG_RADR0 0x10
+#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
+#define MTK_NOR_REG_RADR3 0xc8
+
+#define MTK_NOR_REG_WDATA 0x1c
+
+#define MTK_NOR_REG_PRGDATA0 0x20
+#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
+#define MTK_NOR_REG_PRGDATA_MAX 5
+
+#define MTK_NOR_REG_SHIFT0 0x38
+#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
+#define MTK_NOR_REG_SHIFT_MAX 9
+
+#define MTK_NOR_REG_CFG1 0x60
+#define MTK_NOR_FAST_READ BIT(0)
+
+#define MTK_NOR_REG_CFG2 0x64
+#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
+#define MTK_NOR_WR_BUF_EN BIT(0)
+
+#define MTK_NOR_REG_PP_DATA 0x98
+
+#define MTK_NOR_REG_IRQ_STAT 0xa8
+#define MTK_NOR_REG_IRQ_EN 0xac
+#define MTK_NOR_IRQ_DMA BIT(7)
+#define MTK_NOR_IRQ_MASK GENMASK(7, 0)
+
+#define MTK_NOR_REG_CFG3 0xb4
+#define MTK_NOR_DISABLE_WREN BIT(7)
+#define MTK_NOR_DISABLE_SR_POLL BIT(5)
+
+#define MTK_NOR_REG_WP 0xc4
+#define MTK_NOR_ENABLE_SF_CMD 0x30
+
+#define MTK_NOR_REG_BUSCFG 0xcc
+#define MTK_NOR_4B_ADDR BIT(4)
+#define MTK_NOR_QUAD_ADDR BIT(3)
+#define MTK_NOR_QUAD_READ BIT(2)
+#define MTK_NOR_DUAL_ADDR BIT(1)
+#define MTK_NOR_DUAL_READ BIT(0)
+#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
+
+#define MTK_NOR_REG_DMA_CTL 0x718
+#define MTK_NOR_DMA_START BIT(0)
+
+#define MTK_NOR_REG_DMA_FADR 0x71c
+#define MTK_NOR_REG_DMA_DADR 0x720
+#define MTK_NOR_REG_DMA_END_DADR 0x724
+
+#define MTK_NOR_PRG_MAX_SIZE 6
+// Reading DMA src/dst addresses have to be 16-byte aligned
+#define MTK_NOR_DMA_ALIGN 16
+#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
+// and we allocate a bounce buffer if destination address isn't aligned.
+#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
+
+// Buffered page program can do one 128-byte transfer
+#define MTK_NOR_PP_SIZE 128
+
+#define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq)
+
+struct mtk_nor {
+ struct spi_controller *ctlr;
+ struct device *dev;
+ void __iomem *base;
+ u8 *buffer;
+ struct clk *spi_clk;
+ struct clk *ctlr_clk;
+ unsigned int spi_freq;
+ bool wbuf_en;
+ bool has_irq;
+ struct completion op_done;
+};
+
+static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
+{
+ u32 val = readl(sp->base + reg);
+
+ val &= ~clr;
+ val |= set;
+ writel(val, sp->base + reg);
+}
+
+static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
+{
+ ulong delay = CLK_TO_US(sp, clk);
+ u32 reg;
+ int ret;
+
+ writel(cmd, sp->base + MTK_NOR_REG_CMD);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
+ delay / 3, (delay + 1) * 200);
+ if (ret < 0)
+ dev_err(sp->dev, "command %u timeout.\n", cmd);
+ return ret;
+}
+
+static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u32 addr = op->addr.val;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
+ addr >>= 8;
+ }
+ if (op->addr.nbytes == 4) {
+ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
+ } else {
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
+ }
+}
+
+static bool mtk_nor_match_read(const struct spi_mem_op *op)
+{
+ int dummy = 0;
+
+ if (op->dummy.buswidth)
+ dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
+
+ if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
+ if (op->addr.buswidth == 1)
+ return dummy == 8;
+ else if (op->addr.buswidth == 2)
+ return dummy == 4;
+ else if (op->addr.buswidth == 4)
+ return dummy == 6;
+ } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
+ if (op->cmd.opcode == 0x03)
+ return dummy == 0;
+ else if (op->cmd.opcode == 0x0b)
+ return dummy == 8;
+ }
+ return false;
+}
+
+static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ size_t len;
+
+ if (!op->data.nbytes)
+ return 0;
+
+ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ if ((op->data.dir == SPI_MEM_DATA_IN) &&
+ mtk_nor_match_read(op)) {
+ if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
+ (op->data.nbytes < MTK_NOR_DMA_ALIGN))
+ op->data.nbytes = 1;
+ else if (!((ulong)(op->data.buf.in) &
+ MTK_NOR_DMA_ALIGN_MASK))
+ op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
+ else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
+ op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
+ return 0;
+ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
+ if (op->data.nbytes >= MTK_NOR_PP_SIZE)
+ op->data.nbytes = MTK_NOR_PP_SIZE;
+ else
+ op->data.nbytes = 1;
+ return 0;
+ }
+ }
+
+ len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes -
+ op->dummy.nbytes;
+ if (op->data.nbytes > len)
+ op->data.nbytes = len;
+
+ return 0;
+}
+
+static bool mtk_nor_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ size_t len;
+
+ if (op->cmd.buswidth != 1)
+ return false;
+
+ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op))
+ return true;
+ else if (op->data.dir == SPI_MEM_DATA_OUT)
+ return (op->addr.buswidth == 1) &&
+ (op->dummy.buswidth == 0) &&
+ (op->data.buswidth == 1);
+ }
+ len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+ if ((len > MTK_NOR_PRG_MAX_SIZE) ||
+ ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE)))
+ return false;
+ return true;
+}
+
+static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u32 reg = 0;
+
+ if (op->addr.nbytes == 4)
+ reg |= MTK_NOR_4B_ADDR;
+
+ if (op->data.buswidth == 4) {
+ reg |= MTK_NOR_QUAD_READ;
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
+ if (op->addr.buswidth == 4)
+ reg |= MTK_NOR_QUAD_ADDR;
+ } else if (op->data.buswidth == 2) {
+ reg |= MTK_NOR_DUAL_READ;
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
+ if (op->addr.buswidth == 2)
+ reg |= MTK_NOR_DUAL_ADDR;
+ } else {
+ if (op->cmd.opcode == 0x0b)
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
+ else
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
+ }
+ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
+}
+
+static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length,
+ u8 *buffer)
+{
+ int ret = 0;
+ ulong delay;
+ u32 reg;
+ dma_addr_t dma_addr;
+
+ dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(sp->dev, dma_addr)) {
+ dev_err(sp->dev, "failed to map dma buffer.\n");
+ return -EINVAL;
+ }
+
+ writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
+ writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
+ writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
+
+ if (sp->has_irq) {
+ reinit_completion(&sp->op_done);
+ mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
+ }
+
+ mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
+
+ delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
+
+ if (sp->has_irq) {
+ if (!wait_for_completion_timeout(&sp->op_done,
+ (delay + 1) * 100))
+ ret = -ETIMEDOUT;
+ } else {
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
+ !(reg & MTK_NOR_DMA_START), delay / 3,
+ (delay + 1) * 100);
+ }
+
+ dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE);
+ if (ret < 0)
+ dev_err(sp->dev, "dma read timeout.\n");
+
+ return ret;
+}
+
+static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from,
+ unsigned int length, u8 *buffer)
+{
+ unsigned int rdlen;
+ int ret;
+
+ if (length & MTK_NOR_DMA_ALIGN_MASK)
+ rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
+ else
+ rdlen = length;
+
+ ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer);
+ if (ret)
+ return ret;
+
+ memcpy(buffer, sp->buffer, length);
+ return 0;
+}
+
+static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ u8 *buf = op->data.buf.in;
+ int ret;
+
+ ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
+ if (!ret)
+ buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
+ return ret;
+}
+
+static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
+{
+ int ret;
+ u32 val;
+
+ if (sp->wbuf_en)
+ return 0;
+
+ val = readl(sp->base + MTK_NOR_REG_CFG2);
+ writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ val & MTK_NOR_WR_BUF_EN, 0, 10000);
+ if (!ret)
+ sp->wbuf_en = true;
+ return ret;
+}
+
+static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
+{
+ int ret;
+ u32 val;
+
+ if (!sp->wbuf_en)
+ return 0;
+ val = readl(sp->base + MTK_NOR_REG_CFG2);
+ writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
+ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
+ !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
+ if (!ret)
+ sp->wbuf_en = false;
+ return ret;
+}
+
+static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ u32 val;
+ int ret, i;
+
+ ret = mtk_nor_write_buffer_enable(sp);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < op->data.nbytes; i += 4) {
+ val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
+ buf[i];
+ writel(val, sp->base + MTK_NOR_REG_PP_DATA);
+ }
+ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
+ (op->data.nbytes + 5) * BITS_PER_BYTE);
+}
+
+static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
+ const struct spi_mem_op *op)
+{
+ const u8 *buf = op->data.buf.out;
+ int ret;
+
+ ret = mtk_nor_write_buffer_disable(sp);
+ if (ret < 0)
+ return ret;
+ writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
+ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
+}
+
+int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
+ int ret;
+
+ if ((op->data.nbytes == 0) ||
+ ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
+ return -ENOTSUPP;
+
+ if (op->data.dir == SPI_MEM_DATA_OUT) {
+ mtk_nor_set_addr(sp, op);
+ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
+ if (op->data.nbytes == MTK_NOR_PP_SIZE)
+ return mtk_nor_pp_buffered(sp, op);
+ return mtk_nor_pp_unbuffered(sp, op);
+ }
+
+ if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
+ ret = mtk_nor_write_buffer_disable(sp);
+ if (ret < 0)
+ return ret;
+ mtk_nor_setup_bus(sp, op);
+ if (op->data.nbytes == 1) {
+ mtk_nor_set_addr(sp, op);
+ return mtk_nor_read_pio(sp, op);
+ } else if (((ulong)(op->data.buf.in) &
+ MTK_NOR_DMA_ALIGN_MASK)) {
+ return mtk_nor_read_bounce(sp, op->addr.val,
+ op->data.nbytes,
+ op->data.buf.in);
+ } else {
+ return mtk_nor_read_dma(sp, op->addr.val,
+ op->data.nbytes,
+ op->data.buf.in);
+ }
+ }
+
+ return -ENOTSUPP;
+}
+
+static int mtk_nor_setup(struct spi_device *spi)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
+
+ if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
+ dev_err(&spi->dev, "spi clock should be %u Hz.\n",
+ sp->spi_freq);
+ return -EINVAL;
+ }
+ spi->max_speed_hz = sp->spi_freq;
+
+ return 0;
+}
+
+static int mtk_nor_transfer_one_message(struct spi_controller *master,
+ struct spi_message *m)
+{
+ struct mtk_nor *sp = spi_controller_get_devdata(master);
+ struct spi_transfer *t = NULL;
+ unsigned long trx_len = 0;
+ int stat = 0;
+ int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
+ void __iomem *reg;
+ const u8 *txbuf;
+ u8 *rxbuf;
+ int i;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ txbuf = t->tx_buf;
+ for (i = 0; i < t->len; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
+ if (txbuf)
+ writeb(txbuf[i], reg);
+ else
+ writeb(0, reg);
+ }
+ trx_len += t->len;
+ }
+
+ writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
+
+ stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
+ trx_len * BITS_PER_BYTE);
+ if (stat < 0)
+ goto msg_done;
+
+ reg_offset = trx_len - 1;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ rxbuf = t->rx_buf;
+ for (i = 0; i < t->len; i++, reg_offset--) {
+ reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
+ if (rxbuf)
+ rxbuf[i] = readb(reg);
+ }
+ }
+
+ m->actual_length = trx_len;
+msg_done:
+ m->status = stat;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static void mtk_nor_disable_clk(struct mtk_nor *sp)
+{
+ clk_disable_unprepare(sp->spi_clk);
+ clk_disable_unprepare(sp->ctlr_clk);
+}
+
+static int mtk_nor_enable_clk(struct mtk_nor *sp)
+{
+ int ret;
+
+ ret = clk_prepare_enable(sp->spi_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(sp->ctlr_clk);
+ if (ret) {
+ clk_disable_unprepare(sp->spi_clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_nor_init(struct mtk_nor *sp)
+{
+ int ret;
+
+ ret = mtk_nor_enable_clk(sp);
+ if (ret)
+ return ret;
+
+ sp->spi_freq = clk_get_rate(sp->spi_clk);
+
+ writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
+ mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
+ MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
+
+ return ret;
+}
+
+static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
+{
+ struct mtk_nor *sp = data;
+ u32 irq_status, irq_enabled;
+
+ irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
+ irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
+ // write status back to clear interrupt
+ writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ if (irq_status & MTK_NOR_IRQ_DMA) {
+ complete(&sp->op_done);
+ writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static size_t mtk_max_msg_size(struct spi_device *spi)
+{
+ return MTK_NOR_PRG_MAX_SIZE;
+}
+
+static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
+ .adjust_op_size = mtk_nor_adjust_op_size,
+ .supports_op = mtk_nor_supports_op,
+ .exec_op = mtk_nor_exec_op
+};
+
+static const struct of_device_id mtk_nor_match[] = {
+ { .compatible = "mediatek,mt8173-nor" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_nor_match);
+
+static int mtk_nor_probe(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mtk_nor *sp;
+ void __iomem *base;
+ u8 *buffer;
+ struct clk *spi_clk, *ctlr_clk;
+ int ret, irq;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ spi_clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(spi_clk))
+ return PTR_ERR(spi_clk);
+
+ ctlr_clk = devm_clk_get(&pdev->dev, "sf");
+ if (IS_ERR(ctlr_clk))
+ return PTR_ERR(ctlr_clk);
+
+ buffer = devm_kmalloc(&pdev->dev,
+ MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
+ GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
+ buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
+ ~MTK_NOR_DMA_ALIGN_MASK);
+
+ ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (!ctlr) {
+ dev_err(&pdev->dev, "failed to allocate spi controller\n");
+ return -ENOMEM;
+ }
+
+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+ ctlr->dev.of_node = pdev->dev.of_node;
+ ctlr->max_message_size = mtk_max_msg_size;
+ ctlr->mem_ops = &mtk_nor_mem_ops;
+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctlr->num_chipselect = 1;
+ ctlr->setup = mtk_nor_setup;
+ ctlr->transfer_one_message = mtk_nor_transfer_one_message;
+
+ dev_set_drvdata(&pdev->dev, ctlr);
+
+ sp = spi_controller_get_devdata(ctlr);
+ sp->base = base;
+ sp->buffer = buffer;
+ sp->has_irq = false;
+ sp->wbuf_en = false;
+ sp->ctlr = ctlr;
+ sp->dev = &pdev->dev;
+ sp->spi_clk = spi_clk;
+ sp->ctlr_clk = ctlr_clk;
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0) {
+ dev_warn(sp->dev, "IRQ not available.");
+ } else {
+ writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT);
+ writel(0, base + MTK_NOR_REG_IRQ_EN);
+ ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
+ pdev->name, sp);
+ if (ret < 0) {
+ dev_warn(sp->dev, "failed to request IRQ.");
+ } else {
+ init_completion(&sp->op_done);
+ sp->has_irq = true;
+ }
+ }
+
+ ret = mtk_nor_init(sp);
+ if (ret < 0) {
+ kfree(ctlr);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
+
+ return devm_spi_register_controller(&pdev->dev, ctlr);
+}
+
+static int mtk_nor_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr;
+ struct mtk_nor *sp;
+
+ ctlr = dev_get_drvdata(&pdev->dev);
+ sp = spi_controller_get_devdata(ctlr);
+
+ mtk_nor_disable_clk(sp);
+
+ return 0;
+}
+
+static struct platform_driver mtk_nor_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mtk_nor_match,
+ },
+ .probe = mtk_nor_probe,
+ .remove = mtk_nor_remove,
+};
+
+module_platform_driver(mtk_nor_driver);
+
+MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
+MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
new file mode 100644
index 000000000000..4f94c9127fc1
--- /dev/null
+++ b/drivers/spi/spi-mux.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// General Purpose SPI multiplexer
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#define SPI_MUX_NO_CS ((unsigned int)-1)
+
+/**
+ * DOC: Driver description
+ *
+ * This driver supports a MUX on an SPI bus. This can be useful when you need
+ * more chip selects than the hardware peripherals support, or than are
+ * available in a particular board setup.
+ *
+ * The driver will create an additional SPI controller. Devices added under the
+ * mux will be handled as 'chip selects' on this controller.
+ */
+
+/**
+ * struct spi_mux_priv - the basic spi_mux structure
+ * @spi: pointer to the device struct attached to the parent
+ * spi controller
+ * @current_cs: The current chip select set in the mux
+ * @child_msg_complete: The mux replaces the complete callback in the child's
+ * message to its own callback; this field is used by the
+ * driver to store the child's callback during a transfer
+ * @child_msg_context: Used to store the child's context to the callback
+ * @child_msg_dev: Used to store the spi_device pointer to the child
+ * @mux: mux_control structure used to provide chip selects for
+ * downstream spi devices
+ */
+struct spi_mux_priv {
+ struct spi_device *spi;
+ unsigned int current_cs;
+
+ void (*child_msg_complete)(void *context);
+ void *child_msg_context;
+ struct spi_device *child_msg_dev;
+ struct mux_control *mux;
+};
+
+/* should not get called when the parent controller is doing a transfer */
+static int spi_mux_select(struct spi_device *spi)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
+ int ret;
+
+ if (priv->current_cs == spi->chip_select)
+ return 0;
+
+ dev_dbg(&priv->spi->dev, "setting up the mux for cs %d\n",
+ spi->chip_select);
+
+ /* copy the child device's settings except for the cs */
+ priv->spi->max_speed_hz = spi->max_speed_hz;
+ priv->spi->mode = spi->mode;
+ priv->spi->bits_per_word = spi->bits_per_word;
+
+ ret = mux_control_select(priv->mux, spi->chip_select);
+ if (ret)
+ return ret;
+
+ priv->current_cs = spi->chip_select;
+
+ return 0;
+}
+
+static int spi_mux_setup(struct spi_device *spi)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(spi->controller);
+
+ /*
+ * can be called multiple times, won't do a valid setup now but we will
+ * change the settings when we do a transfer (necessary because we
+ * can't predict from which device it will be anyway)
+ */
+ return spi_setup(priv->spi);
+}
+
+static void spi_mux_complete_cb(void *context)
+{
+ struct spi_mux_priv *priv = (struct spi_mux_priv *)context;
+ struct spi_controller *ctlr = spi_get_drvdata(priv->spi);
+ struct spi_message *m = ctlr->cur_msg;
+
+ m->complete = priv->child_msg_complete;
+ m->context = priv->child_msg_context;
+ m->spi = priv->child_msg_dev;
+ spi_finalize_current_message(ctlr);
+ mux_control_deselect(priv->mux);
+}
+
+static int spi_mux_transfer_one_message(struct spi_controller *ctlr,
+ struct spi_message *m)
+{
+ struct spi_mux_priv *priv = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = m->spi;
+ int ret;
+
+ ret = spi_mux_select(spi);
+ if (ret)
+ return ret;
+
+ /*
+ * Replace the complete callback, context and spi_device with our own
+ * pointers. Save originals
+ */
+ priv->child_msg_complete = m->complete;
+ priv->child_msg_context = m->context;
+ priv->child_msg_dev = m->spi;
+
+ m->complete = spi_mux_complete_cb;
+ m->context = priv;
+ m->spi = priv->spi;
+
+ /* do the transfer */
+ return spi_async(priv->spi, m);
+}
+
+static int spi_mux_probe(struct spi_device *spi)
+{
+ struct spi_controller *ctlr;
+ struct spi_mux_priv *priv;
+ int ret;
+
+ ctlr = spi_alloc_master(&spi->dev, sizeof(*priv));
+ if (!ctlr)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ctlr);
+ priv = spi_controller_get_devdata(ctlr);
+ priv->spi = spi;
+
+ priv->mux = devm_mux_control_get(&spi->dev, NULL);
+ if (IS_ERR(priv->mux)) {
+ ret = PTR_ERR(priv->mux);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&spi->dev, "failed to get control-mux\n");
+ goto err_put_ctlr;
+ }
+
+ priv->current_cs = SPI_MUX_NO_CS;
+
+ /* supported modes are the same as our parent's */
+ ctlr->mode_bits = spi->controller->mode_bits;
+ ctlr->flags = spi->controller->flags;
+ ctlr->transfer_one_message = spi_mux_transfer_one_message;
+ ctlr->setup = spi_mux_setup;
+ ctlr->num_chipselect = mux_control_states(priv->mux);
+ ctlr->bus_num = -1;
+ ctlr->dev.of_node = spi->dev.of_node;
+
+ ret = devm_spi_register_controller(&spi->dev, ctlr);
+ if (ret)
+ goto err_put_ctlr;
+
+ return 0;
+
+err_put_ctlr:
+ spi_controller_put(ctlr);
+
+ return ret;
+}
+
+static const struct of_device_id spi_mux_of_match[] = {
+ { .compatible = "spi-mux" },
+ { }
+};
+
+static struct spi_driver spi_mux_driver = {
+ .probe = spi_mux_probe,
+ .driver = {
+ .name = "spi-mux",
+ .of_match_table = spi_mux_of_match,
+ },
+};
+
+module_spi_driver(spi_mux_driver);
+
+MODULE_DESCRIPTION("SPI multiplexer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index dce85ee07cd0..918918a9e049 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -22,7 +22,6 @@
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -32,7 +31,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/completion.h>
-#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
index 8c5084a3a617..1ccda82da206 100644
--- a/drivers/spi/spi-nxp-fspi.c
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -307,6 +307,7 @@
#define POLL_TOUT 5000
#define NXP_FSPI_MAX_CHIPSELECT 4
+#define NXP_FSPI_MIN_IOMAP SZ_4M
struct nxp_fspi_devtype_data {
unsigned int rxfifo;
@@ -324,11 +325,29 @@ static const struct nxp_fspi_devtype_data lx2160a_data = {
.little_endian = true, /* little-endian */
};
+static const struct nxp_fspi_devtype_data imx8mm_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
+static const struct nxp_fspi_devtype_data imx8qxp_data = {
+ .rxfifo = SZ_512, /* (64 * 64 bits) */
+ .txfifo = SZ_1K, /* (128 * 64 bits) */
+ .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
+ .quirks = 0,
+ .little_endian = true, /* little-endian */
+};
+
struct nxp_fspi {
void __iomem *iobase;
void __iomem *ahb_addr;
u32 memmap_phy;
u32 memmap_phy_size;
+ u32 memmap_start;
+ u32 memmap_len;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
@@ -641,12 +660,35 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
f->selected = spi->chip_select;
}
-static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
+static int nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
{
+ u32 start = op->addr.val;
u32 len = op->data.nbytes;
+ /* if necessary, ioremap before AHB read */
+ if ((!f->ahb_addr) || start < f->memmap_start ||
+ start + len > f->memmap_start + f->memmap_len) {
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+
+ f->memmap_start = start;
+ f->memmap_len = len > NXP_FSPI_MIN_IOMAP ?
+ len : NXP_FSPI_MIN_IOMAP;
+
+ f->ahb_addr = ioremap_wc(f->memmap_phy + f->memmap_start,
+ f->memmap_len);
+
+ if (!f->ahb_addr) {
+ dev_err(f->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+ }
+
/* Read out the data directly from the AHB buffer. */
- memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len);
+ memcpy_fromio(op->data.buf.in,
+ f->ahb_addr + start - f->memmap_start, len);
+
+ return 0;
}
static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
@@ -806,7 +848,7 @@ static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
*/
if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
op->data.dir == SPI_MEM_DATA_IN) {
- nxp_fspi_read_ahb(f, op);
+ err = nxp_fspi_read_ahb(f, op);
} else {
if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
nxp_fspi_fill_txfifo(f, op);
@@ -871,8 +913,9 @@ static int nxp_fspi_default_setup(struct nxp_fspi *f)
fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
/* enable module */
- fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF),
- base + FSPI_MCR0);
+ fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) |
+ FSPI_MCR0_IP_TIMEOUT(0xFF) | (u32) FSPI_MCR0_OCTCOMB_EN,
+ base + FSPI_MCR0);
/*
* Disable same device enable bit and configure all slave devices
@@ -976,9 +1019,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
/* find the resources - controller memory mapped space */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
- f->ahb_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(f->ahb_addr)) {
- ret = PTR_ERR(f->ahb_addr);
+ if (!res) {
+ ret = -ENODEV;
goto err_put_ctrl;
}
@@ -1057,6 +1099,9 @@ static int nxp_fspi_remove(struct platform_device *pdev)
mutex_destroy(&f->lock);
+ if (f->ahb_addr)
+ iounmap(f->ahb_addr);
+
return 0;
}
@@ -1076,6 +1121,8 @@ static int nxp_fspi_resume(struct device *dev)
static const struct of_device_id nxp_fspi_dt_ids[] = {
{ .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
+ { .compatible = "nxp,imx8mm-fspi", .data = (void *)&imx8mm_data, },
+ { .compatible = "nxp,imx8qxp-fspi", .data = (void *)&imx8qxp_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7e2292c11d12..e9e256718ef4 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -130,6 +130,7 @@ struct omap2_mcspi {
int fifo_depth;
bool slave_aborted;
unsigned int pin_dir:1;
+ size_t max_xfer_len;
};
struct omap2_mcspi_cs {
@@ -974,20 +975,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
* Note that we currently allow DMA only if we get a channel
* for both rx and tx. Otherwise we'll do PIO for both rx and tx.
*/
-static int omap2_mcspi_request_dma(struct spi_device *spi)
+static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
+ struct omap2_mcspi_dma *mcspi_dma)
{
- struct spi_master *master = spi->master;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
int ret = 0;
- mcspi = spi_master_get_devdata(master);
- mcspi_dma = mcspi->dma_channels + spi->chip_select;
-
- init_completion(&mcspi_dma->dma_rx_completion);
- init_completion(&mcspi_dma->dma_tx_completion);
-
- mcspi_dma->dma_rx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_rx_ch_name);
if (IS_ERR(mcspi_dma->dma_rx)) {
ret = PTR_ERR(mcspi_dma->dma_rx);
@@ -995,7 +988,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
goto no_dma;
}
- mcspi_dma->dma_tx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_tx_ch_name);
if (IS_ERR(mcspi_dma->dma_tx)) {
ret = PTR_ERR(mcspi_dma->dma_tx);
@@ -1004,20 +997,40 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
mcspi_dma->dma_rx = NULL;
}
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
no_dma:
return ret;
}
+static void omap2_mcspi_release_dma(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_dma *mcspi_dma;
+ int i;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ mcspi_dma = &mcspi->dma_channels[i];
+
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ }
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
+ }
+ }
+}
+
static int omap2_mcspi_setup(struct spi_device *spi)
{
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
- mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
@@ -1042,13 +1055,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
}
}
- if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
- ret = omap2_mcspi_request_dma(spi);
- if (ret)
- dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n",
- ret);
- }
-
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
@@ -1065,12 +1071,8 @@ static int omap2_mcspi_setup(struct spi_device *spi)
static void omap2_mcspi_cleanup(struct spi_device *spi)
{
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs;
- mcspi = spi_master_get_devdata(spi->master);
-
if (spi->controller_state) {
/* Unlink controller state from context save list */
cs = spi->controller_state;
@@ -1079,19 +1081,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
kfree(cs);
}
- if (spi->chip_select < spi->master->num_chipselect) {
- mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
- if (mcspi_dma->dma_rx) {
- dma_release_channel(mcspi_dma->dma_rx);
- mcspi_dma->dma_rx = NULL;
- }
- if (mcspi_dma->dma_tx) {
- dma_release_channel(mcspi_dma->dma_tx);
- mcspi_dma->dma_tx = NULL;
- }
- }
-
if (gpio_is_valid(spi->cs_gpio))
gpio_free(spi->cs_gpio);
}
@@ -1302,9 +1291,24 @@ static bool omap2_mcspi_can_dma(struct spi_master *master,
if (spi_controller_is_slave(master))
return true;
+ master->dma_rx = mcspi_dma->dma_rx;
+ master->dma_tx = mcspi_dma->dma_tx;
+
return (xfer->len >= DMA_MIN_BYTES);
}
+static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi->chip_select];
+
+ if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
+ return mcspi->max_xfer_len;
+
+ return SIZE_MAX;
+}
+
static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
@@ -1373,6 +1377,11 @@ static struct omap2_mcspi_platform_config omap4_pdata = {
.regs_offset = OMAP4_MCSPI_REG_OFFSET,
};
+static struct omap2_mcspi_platform_config am654_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+ .max_xfer_len = SZ_4K - 1,
+};
+
static const struct of_device_id omap_mcspi_of_match[] = {
{
.compatible = "ti,omap2-mcspi",
@@ -1382,6 +1391,10 @@ static const struct of_device_id omap_mcspi_of_match[] = {
.compatible = "ti,omap4-mcspi",
.data = &omap4_pdata,
},
+ {
+ .compatible = "ti,am654-mcspi",
+ .data = &am654_pdata,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
@@ -1439,6 +1452,10 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
mcspi->pin_dir = pdata->pin_dir;
}
regs_offset = pdata->regs_offset;
+ if (pdata->max_xfer_len) {
+ mcspi->max_xfer_len = pdata->max_xfer_len;
+ master->max_transfer_size = omap2_mcspi_max_xfer_size;
+ }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcspi->base = devm_ioremap_resource(&pdev->dev, r);
@@ -1464,6 +1481,11 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
for (i = 0; i < master->num_chipselect; i++) {
sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
+
+ status = omap2_mcspi_request_dma(mcspi,
+ &mcspi->dma_channels[i]);
+ if (status == -EPROBE_DEFER)
+ goto free_master;
}
status = platform_get_irq(pdev, 0);
@@ -1501,6 +1523,7 @@ disable_pm:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_master:
+ omap2_mcspi_release_dma(master);
spi_master_put(master);
return status;
}
@@ -1510,6 +1533,8 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ omap2_mcspi_release_dma(master);
+
pm_runtime_dont_use_autosuspend(mcspi->dev);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4c7a71f0fb3e..73d2a65d0b6e 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -70,6 +70,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define LPSS_CAPS_CS_EN_SHIFT 9
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
+#define LPSS_PRIV_CLOCK_GATE 0x38
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
unsigned offset;
@@ -86,6 +90,8 @@ struct lpss_config {
unsigned cs_sel_shift;
unsigned cs_sel_mask;
unsigned cs_num;
+ /* Quirks */
+ unsigned cs_clk_stays_gated : 1;
};
/* Keep these sorted with enum pxa_ssp_type */
@@ -156,6 +162,7 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_hi = 56,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
+ .cs_clk_stays_gated = true,
},
};
@@ -185,6 +192,11 @@ static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
return drv_data->ssp_type == QUARK_X1000_SSP;
}
+static bool is_mmp2_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == MMP2_SSP;
+}
+
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
switch (drv_data->ssp_type) {
@@ -383,6 +395,22 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
else
value |= LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ if (config->cs_clk_stays_gated) {
+ u32 clkgate;
+
+ /*
+ * Changing CS alone when dynamic clock gating is on won't
+ * actually flip CS at that time. This ruins SPI transfers
+ * that specify delays, or have no data. Toggle the clock mode
+ * to force on briefly to poke the CS pin to move.
+ */
+ clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
+ value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
+
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
+ }
}
static void cs_assert(struct spi_device *spi)
@@ -463,8 +491,8 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
static void pxa2xx_spi_off(struct driver_data *drv_data)
{
- /* On MMP, disabling SSE seems to corrupt the rx fifo */
- if (drv_data->ssp_type == MMP2_SSP)
+ /* On MMP, disabling SSE seems to corrupt the Rx FIFO */
+ if (is_mmp2_ssp(drv_data))
return;
pxa2xx_spi_write(drv_data, SSCR0,
@@ -1070,7 +1098,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
|| (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
!= (cr1 & change_mask)) {
/* stop the SSP, and update the other bits */
- if (drv_data->ssp_type != MMP2_SSP)
+ if (!is_mmp2_ssp(drv_data))
pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
@@ -1084,7 +1112,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
}
- if (drv_data->ssp_type == MMP2_SSP) {
+ if (is_mmp2_ssp(drv_data)) {
u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR)
& SSSR_TFL_MASK) >> 8;
@@ -1548,18 +1576,18 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
else if (pcidev_id)
type = (enum pxa_ssp_type)pcidev_id->driver_data;
else
- return NULL;
+ return ERR_PTR(-EINVAL);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ssp = &pdata->ssp;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ssp->mmio_base))
- return NULL;
+ return ERR_CAST(ssp->mmio_base);
ssp->phys_base = res->start;
@@ -1573,11 +1601,11 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
ssp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ssp->clk))
- return NULL;
+ return ERR_CAST(ssp->clk);
ssp->irq = platform_get_irq(pdev, 0);
if (ssp->irq < 0)
- return NULL;
+ return ERR_PTR(ssp->irq);
ssp->type = type;
ssp->dev = &pdev->dev;
@@ -1634,9 +1662,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
platform_info = dev_get_platdata(dev);
if (!platform_info) {
platform_info = pxa2xx_spi_init_pdata(pdev);
- if (!platform_info) {
+ if (IS_ERR(platform_info)) {
dev_err(&pdev->dev, "missing platform data\n");
- return -ENODEV;
+ return PTR_ERR(platform_info);
}
}
@@ -1884,11 +1912,7 @@ out_error_controller_alloc:
static int pxa2xx_spi_remove(struct platform_device *pdev)
{
struct driver_data *drv_data = platform_get_drvdata(pdev);
- struct ssp_device *ssp;
-
- if (!drv_data)
- return 0;
- ssp = drv_data->ssp;
+ struct ssp_device *ssp = drv_data->ssp;
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index dd3434a407ea..a364b99497e2 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1217,6 +1217,11 @@ static int spi_qup_suspend(struct device *device)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;
+ if (pm_runtime_suspended(device)) {
+ ret = spi_qup_pm_resume_runtime(device);
+ if (ret)
+ return ret;
+ }
ret = spi_master_suspend(master);
if (ret)
return ret;
@@ -1225,10 +1230,8 @@ static int spi_qup_suspend(struct device *device)
if (ret)
return ret;
- if (!pm_runtime_suspended(device)) {
- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
- }
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
return 0;
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 2cc6d9951b52..70ef63e0b6b8 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -843,14 +843,17 @@ static const struct dev_pm_ops rockchip_spi_pm = {
};
static const struct of_device_id rockchip_spi_dt_match[] = {
- { .compatible = "rockchip,rv1108-spi", },
+ { .compatible = "rockchip,px30-spi", },
{ .compatible = "rockchip,rk3036-spi", },
{ .compatible = "rockchip,rk3066-spi", },
{ .compatible = "rockchip,rk3188-spi", },
{ .compatible = "rockchip,rk3228-spi", },
{ .compatible = "rockchip,rk3288-spi", },
+ { .compatible = "rockchip,rk3308-spi", },
+ { .compatible = "rockchip,rk3328-spi", },
{ .compatible = "rockchip,rk3368-spi", },
{ .compatible = "rockchip,rk3399-spi", },
+ { .compatible = "rockchip,rv1108-spi", },
{ },
};
MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 85575d45901c..06192c9ea813 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -24,6 +24,7 @@
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
+#include <linux/spinlock.h>
#define RSPI_SPCR 0x00 /* Control Register */
#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
@@ -79,8 +80,7 @@
#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
/* SSLP - Slave Select Polarity Register */
-#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
-#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
+#define SSLP_SSLP(i) BIT(i) /* SSLi Signal Polarity Setting */
/* SPPCR - Pin Control Register */
#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
@@ -181,7 +181,9 @@ struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_controller *ctlr;
+ struct platform_device *pdev;
wait_queue_head_t wait;
+ spinlock_t lock; /* Protects RMW-access to RSPI_SSLP */
struct clk *clk;
u16 spcmd;
u8 spsr;
@@ -239,7 +241,7 @@ struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
int (*transfer_one)(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer);
- u16 mode_bits;
+ u16 extra_mode_bits;
u16 flags;
u16 fifo_size;
u8 num_hw_ss;
@@ -919,6 +921,29 @@ static int qspi_setup_sequencer(struct rspi_data *rspi,
return 0;
}
+static int rspi_setup(struct spi_device *spi)
+{
+ struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
+ u8 sslp;
+
+ if (spi->cs_gpiod)
+ return 0;
+
+ pm_runtime_get_sync(&rspi->pdev->dev);
+ spin_lock_irq(&rspi->lock);
+
+ sslp = rspi_read8(rspi, RSPI_SSLP);
+ if (spi->mode & SPI_CS_HIGH)
+ sslp |= SSLP_SSLP(spi->chip_select);
+ else
+ sslp &= ~SSLP_SSLP(spi->chip_select);
+ rspi_write8(rspi, sslp, RSPI_SSLP);
+
+ spin_unlock_irq(&rspi->lock);
+ pm_runtime_put(&rspi->pdev->dev);
+ return 0;
+}
+
static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
@@ -933,6 +958,8 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
rspi->spcmd |= SPCMD_CPOL;
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ rspi->spcmd |= SPCMD_LSBF;
/* Configure slave signal to assert */
rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs
@@ -1122,7 +1149,6 @@ static int rspi_remove(struct platform_device *pdev)
static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
- .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
.num_hw_ss = 2,
@@ -1131,7 +1157,6 @@ static const struct spi_ops rspi_ops = {
static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
- .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
.num_hw_ss = 1,
@@ -1140,8 +1165,7 @@ static const struct spi_ops rspi_rz_ops = {
static const struct spi_ops qspi_ops = {
.set_config_register = qspi_set_config_register,
.transfer_one = qspi_transfer_one,
- .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
- SPI_TX_DUAL | SPI_TX_QUAD |
+ .extra_mode_bits = SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
@@ -1249,16 +1273,20 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
+ rspi->pdev = pdev;
pm_runtime_enable(&pdev->dev);
init_waitqueue_head(&rspi->wait);
+ spin_lock_init(&rspi->lock);
ctlr->bus_num = pdev->id;
+ ctlr->setup = rspi_setup;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = ops->transfer_one;
ctlr->prepare_message = rspi_prepare_message;
ctlr->unprepare_message = rspi_unprepare_message;
- ctlr->mode_bits = ops->mode_bits;
+ ctlr->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
+ SPI_LOOP | ops->extra_mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->use_gpio_descriptors = true;
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 2d6e37f25e2d..2cb3b611c294 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -227,7 +227,7 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
struct spi_fiq_code {
u32 length;
u32 ack_offset;
- u8 data[0];
+ u8 data[];
};
extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 4ef569b47aa6..d066f5144c3e 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -565,7 +565,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->io_base)) {
ret = PTR_ERR(qspi->io_base);
- goto err;
+ goto err_master_put;
}
qspi->phys_base = res->start;
@@ -574,24 +574,26 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->mm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->mm_base)) {
ret = PTR_ERR(qspi->mm_base);
- goto err;
+ goto err_master_put;
}
qspi->mm_size = resource_size(res);
if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
ret = -EINVAL;
- goto err;
+ goto err_master_put;
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto err_master_put;
+ }
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err;
+ goto err_master_put;
}
init_completion(&qspi->data_completion);
@@ -599,23 +601,27 @@ static int stm32_qspi_probe(struct platform_device *pdev)
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk)) {
ret = PTR_ERR(qspi->clk);
- goto err;
+ goto err_master_put;
}
qspi->clk_rate = clk_get_rate(qspi->clk);
if (!qspi->clk_rate) {
ret = -EINVAL;
- goto err;
+ goto err_master_put;
}
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- goto err;
+ goto err_master_put;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
- if (!IS_ERR(rstc)) {
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret == -EPROBE_DEFER)
+ goto err_qspi_release;
+ } else {
reset_control_assert(rstc);
udelay(2);
reset_control_deassert(rstc);
@@ -625,7 +631,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qspi);
ret = stm32_qspi_dma_setup(qspi);
if (ret)
- goto err;
+ goto err_qspi_release;
mutex_init(&qspi->lock);
@@ -641,8 +647,9 @@ static int stm32_qspi_probe(struct platform_device *pdev)
if (!ret)
return 0;
-err:
+err_qspi_release:
stm32_qspi_release(qspi);
+err_master_put:
spi_master_put(qspi->ctrl);
return ret;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index e041f9c4ec47..44ac6eb3298d 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -175,7 +175,7 @@
#define SPI_DMA_MIN_BYTES 16
/**
- * stm32_spi_reg - stm32 SPI register & bitfield desc
+ * struct stm32_spi_reg - stm32 SPI register & bitfield desc
* @reg: register offset
* @mask: bitfield mask
* @shift: left shift
@@ -187,16 +187,16 @@ struct stm32_spi_reg {
};
/**
- * stm32_spi_regspec - stm32 registers definition, compatible dependent data
- * en: enable register and SPI enable bit
- * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
- * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
- * cpol: clock polarity register and polarity bit
- * cpha: clock phase register and phase bit
- * lsb_first: LSB transmitted first register and bit
- * br: baud rate register and bitfields
- * rx: SPI RX data register
- * tx: SPI TX data register
+ * struct stm32_spi_regspec - stm32 registers definition, compatible dependent data
+ * @en: enable register and SPI enable bit
+ * @dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
+ * @dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
+ * @cpol: clock polarity register and polarity bit
+ * @cpha: clock phase register and phase bit
+ * @lsb_first: LSB transmitted first register and bit
+ * @br: baud rate register and bitfields
+ * @rx: SPI RX data register
+ * @tx: SPI TX data register
*/
struct stm32_spi_regspec {
const struct stm32_spi_reg en;
@@ -213,7 +213,7 @@ struct stm32_spi_regspec {
struct stm32_spi;
/**
- * stm32_spi_cfg - stm32 compatible configuration data
+ * struct stm32_spi_cfg - stm32 compatible configuration data
* @regs: registers descriptions
* @get_fifo_size: routine to get fifo size
* @get_bpw_mask: routine to get bits per word mask
@@ -223,13 +223,13 @@ struct stm32_spi;
* @set_mode: routine to configure registers to desired mode
* @set_data_idleness: optional routine to configure registers to desired idle
* time between frames (if driver has this functionality)
- * set_number_of_data: optional routine to configure registers to desired
+ * @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
* @can_dma: routine to determine if the transfer is eligible for DMA use
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
- * @dma_rx cb: routine to call after DMA RX channel operation is complete
- * @dma_tx cb: routine to call after DMA TX channel operation is complete
+ * @dma_rx_cb: routine to call after DMA RX channel operation is complete
+ * @dma_tx_cb: routine to call after DMA TX channel operation is complete
* @transfer_one_irq: routine to configure interrupts for driver
* @irq_handler_event: Interrupt handler for SPI controller events
* @irq_handler_thread: thread of interrupt handler for SPI controller
@@ -587,6 +587,7 @@ static void stm32f4_spi_read_rx(struct stm32_spi *spi)
/**
* stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
* @spi: pointer to the spi controller data structure
+ * @flush: boolean indicating that FIFO should be flushed
*
* Write in rx_buf depends on remaining bytes to avoid to write beyond
* rx_buf end.
@@ -756,6 +757,9 @@ static void stm32h7_spi_disable(struct stm32_spi *spi)
/**
* stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
+ * @master: controller master interface
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
*
* If driver has fifo and the current transfer size is greater than fifo size,
* use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
@@ -974,6 +978,8 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
/**
* stm32_spi_prepare_msg - set up the controller to transfer a single message
+ * @master: controller master interface
+ * @msg: pointer to spi message
*/
static int stm32_spi_prepare_msg(struct spi_master *master,
struct spi_message *msg)
@@ -1026,6 +1032,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
/**
* stm32f4_spi_dma_tx_cb - dma callback
+ * @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA TX channel.
*/
@@ -1041,6 +1048,7 @@ static void stm32f4_spi_dma_tx_cb(void *data)
/**
* stm32f4_spi_dma_rx_cb - dma callback
+ * @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete for DMA RX channel.
*/
@@ -1054,6 +1062,7 @@ static void stm32f4_spi_dma_rx_cb(void *data)
/**
* stm32h7_spi_dma_cb - dma callback
+ * @data: pointer to the spi controller data structure
*
* DMA callback is called when the transfer is complete or when an error
* occurs. If the transfer is complete, EOT flag is raised.
@@ -1079,6 +1088,9 @@ static void stm32h7_spi_dma_cb(void *data)
/**
* stm32_spi_dma_config - configure dma slave channel depending on current
* transfer bits_per_word.
+ * @spi: pointer to the spi controller data structure
+ * @dma_conf: pointer to the dma_slave_config structure
+ * @dir: direction of the dma transfer
*/
static void stm32_spi_dma_config(struct stm32_spi *spi,
struct dma_slave_config *dma_conf,
@@ -1126,6 +1138,7 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
/**
* stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
+ * @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1166,6 +1179,7 @@ static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
/**
* stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
* interrupts
+ * @spi: pointer to the spi controller data structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1207,6 +1221,7 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
/**
* stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
+ * @spi: pointer to the spi controller data structure
*/
static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
@@ -1227,6 +1242,7 @@ static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
/**
* stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
* transfer using DMA
+ * @spi: pointer to the spi controller data structure
*/
static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
{
@@ -1243,6 +1259,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
/**
* stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
+ * @spi: pointer to the spi controller data structure
+ * @xfer: pointer to the spi_transfer structure
*
* It must returns 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1405,7 +1423,7 @@ static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
/**
* stm32_spi_communication_type - return transfer communication type
* @spi_dev: pointer to the spi device
- * transfer: pointer to spi transfer
+ * @transfer: pointer to spi transfer
*/
static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
struct spi_transfer *transfer)
@@ -1522,7 +1540,7 @@ static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
/**
* stm32h7_spi_number_of_data - configure number of data at current transfer
* @spi: pointer to the spi controller data structure
- * @len: transfer length
+ * @nb_words: transfer length (in words)
*/
static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
{
@@ -1546,6 +1564,9 @@ static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
* stm32_spi_transfer_one_setup - common setup to transfer a single
* spi_transfer either using DMA or
* interrupts.
+ * @spi: pointer to the spi controller data structure
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
*/
static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
struct spi_device *spi_dev,
@@ -1625,6 +1646,9 @@ out:
/**
* stm32_spi_transfer_one - transfer a single spi_transfer
+ * @master: controller master interface
+ * @spi_dev: pointer to the spi device
+ * @transfer: pointer to spi transfer
*
* It must return 0 if the transfer is finished or 1 if the transfer is still
* in progress.
@@ -1658,6 +1682,8 @@ static int stm32_spi_transfer_one(struct spi_master *master,
/**
* stm32_spi_unprepare_msg - relax the hardware
+ * @master: controller master interface
+ * @msg: pointer to the spi message
*/
static int stm32_spi_unprepare_msg(struct spi_master *master,
struct spi_message *msg)
@@ -1671,6 +1697,7 @@ static int stm32_spi_unprepare_msg(struct spi_master *master,
/**
* stm32f4_spi_config - Configure SPI controller as SPI master
+ * @spi: pointer to the spi controller data structure
*/
static int stm32f4_spi_config(struct stm32_spi *spi)
{
@@ -1701,6 +1728,7 @@ static int stm32f4_spi_config(struct stm32_spi *spi)
/**
* stm32h7_spi_config - Configure SPI controller as SPI master
+ * @spi: pointer to the spi controller data structure
*/
static int stm32h7_spi_config(struct stm32_spi *spi)
{
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 60c4de4e4485..7412a3042a8d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -401,9 +401,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
-
/* Manually start the generic FIFO command */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 38b4c78df506..c92c89467e7e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -510,6 +510,7 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
spi->cs_gpio = -ENOENT;
+ spi->mode = ctlr->buswidth_override_bits;
spin_lock_init(&spi->statistics.lock);
@@ -1514,17 +1515,15 @@ void spi_take_timestamp_pre(struct spi_controller *ctlr,
if (!xfer->ptp_sts)
return;
- if (xfer->timestamped_pre)
+ if (xfer->timestamped)
return;
- if (progress < xfer->ptp_sts_word_pre)
+ if (progress > xfer->ptp_sts_word_pre)
return;
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_pre = progress;
- xfer->timestamped_pre = true;
-
if (irqs_off) {
local_irq_save(ctlr->irq_flags);
preempt_disable();
@@ -1553,7 +1552,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
if (!xfer->ptp_sts)
return;
- if (xfer->timestamped_post)
+ if (xfer->timestamped)
return;
if (progress < xfer->ptp_sts_word_post)
@@ -1569,7 +1568,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_post = progress;
- xfer->timestamped_post = true;
+ xfer->timestamped = true;
}
EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
@@ -1674,12 +1673,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
}
}
- if (unlikely(ctlr->ptp_sts_supported)) {
- list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
- WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre);
- WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post);
- }
- }
+ if (unlikely(ctlr->ptp_sts_supported))
+ list_for_each_entry(xfer, &mesg->transfers, transfer_list)
+ WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
spi_unmap_msg(ctlr, mesg);
@@ -1955,13 +1951,8 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi->mode |= SPI_CS_HIGH;
/* Device speed */
- rc = of_property_read_u32(nc, "spi-max-frequency", &value);
- if (rc) {
- dev_err(&ctlr->dev,
- "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
- return rc;
- }
- spi->max_speed_hz = value;
+ if (!of_property_read_u32(nc, "spi-max-frequency", &value))
+ spi->max_speed_hz = value;
return 0;
}
@@ -2181,9 +2172,10 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
return AE_NO_MEMORY;
}
+
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
- spi->mode = lookup.mode;
+ spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
spi->chip_select = lookup.chip_select;
@@ -2639,7 +2631,7 @@ int spi_register_controller(struct spi_controller *ctlr)
if (ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
- return status;
+ goto free_bus_id;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
@@ -2649,7 +2641,7 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Legacy code path for GPIOs from DT */
status = of_spi_get_gpio_numbers(ctlr);
if (status)
- return status;
+ goto free_bus_id;
}
}
@@ -2657,17 +2649,14 @@ int spi_register_controller(struct spi_controller *ctlr)
* Even if it's just one always-selected device, there must
* be at least one chipselect.
*/
- if (!ctlr->num_chipselect)
- return -EINVAL;
+ if (!ctlr->num_chipselect) {
+ status = -EINVAL;
+ goto free_bus_id;
+ }
status = device_add(&ctlr->dev);
- if (status < 0) {
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
- }
+ if (status < 0)
+ goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev));
@@ -2683,11 +2672,7 @@ int spi_register_controller(struct spi_controller *ctlr)
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
+ goto free_bus_id;
}
}
/* add statistics */
@@ -2702,7 +2687,12 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
-done:
+ return status;
+
+free_bus_id:
+ mutex_lock(&board_lock);
+ idr_remove(&spi_master_idr, ctlr->bus_num);
+ mutex_unlock(&board_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);
@@ -4036,7 +4026,7 @@ static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
struct device *dev;
dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
- return dev ? to_spi_device(dev) : NULL;
+ return to_spi_device(dev);
}
static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1e217e3e9486..80dd1025b953 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -275,14 +275,14 @@ static int spidev_message(struct spidev_data *spidev,
#ifdef VERBOSE
dev_dbg(&spidev->spi->dev,
" xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
- u_tmp->len,
- u_tmp->rx_buf ? "rx " : "",
- u_tmp->tx_buf ? "tx " : "",
- u_tmp->cs_change ? "cs " : "",
- u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
- u_tmp->delay_usecs,
- u_tmp->word_delay_usecs,
- u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
+ k_tmp->len,
+ k_tmp->rx_buf ? "rx " : "",
+ k_tmp->tx_buf ? "tx " : "",
+ k_tmp->cs_change ? "cs " : "",
+ k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
+ k_tmp->delay.value,
+ k_tmp->word_delay.value,
+ k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
#endif
spi_message_add_tail(k_tmp, &msg);
}
@@ -396,6 +396,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
else
retval = get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
+ struct spi_controller *ctlr = spi->controller;
u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
@@ -403,6 +404,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp |= SPI_CS_HIGH;
+
tmp |= spi->mode & ~SPI_MODE_MASK;
spi->mode = (u16)tmp;
retval = spi_setup(spi);
@@ -449,10 +454,11 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
spi->max_speed_hz = tmp;
retval = spi_setup(spi);
- if (retval >= 0)
+ if (retval == 0) {
spidev->speed_hz = tmp;
- else
- dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
+ dev_dbg(&spi->dev, "%d Hz (max)\n",
+ spidev->speed_hz);
+ }
spi->max_speed_hz = save;
}
break;