diff options
Diffstat (limited to 'drivers/spi/spi-microchip-core-qspi.c')
| -rw-r--r-- | drivers/spi/spi-microchip-core-qspi.c | 226 | 
1 files changed, 204 insertions, 22 deletions
diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c index fa828fcaaef2..d13a9b755c7f 100644 --- a/drivers/spi/spi-microchip-core-qspi.c +++ b/drivers/spi/spi-microchip-core-qspi.c @@ -194,7 +194,7 @@ static inline void mchp_coreqspi_read_op(struct mchp_coreqspi *qspi)  	}  } -static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word) +static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi)  {  	u32 control, data; @@ -222,6 +222,87 @@ static inline void mchp_coreqspi_write_op(struct mchp_coreqspi *qspi, bool word)  	}  } +static inline void mchp_coreqspi_write_read_op(struct mchp_coreqspi *qspi) +{ +	u32 control, data; + +	qspi->rx_len = qspi->tx_len; + +	control = readl_relaxed(qspi->regs + REG_CONTROL); +	control |= CONTROL_FLAGSX4; +	writel_relaxed(control, qspi->regs + REG_CONTROL); + +	while (qspi->tx_len >= 4) { +		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) +			; + +		data = qspi->txbuf ? *((u32 *)qspi->txbuf) : 0xaa; +		if (qspi->txbuf) +			qspi->txbuf += 4; +		qspi->tx_len -= 4; +		writel_relaxed(data, qspi->regs + REG_X4_TX_DATA); + +		/* +		 * The rx FIFO is twice the size of the tx FIFO, so there is +		 * no requirement to block transmission if receive data is not +		 * ready, and it is fine to let the tx FIFO completely fill +		 * without reading anything from the rx FIFO. Once the tx FIFO +		 * has been filled and becomes non-full due to a transmission +		 * occurring there will always be something to receive. +		 * IOW, this is safe as TX_FIFO_SIZE + 4 < 2 * TX_FIFO_SIZE +		 */ +		if (qspi->rx_len >= 4) { +			if (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXAVAILABLE) { +				data = readl_relaxed(qspi->regs + REG_X4_RX_DATA); +				*(u32 *)qspi->rxbuf = data; +				qspi->rxbuf += 4; +				qspi->rx_len -= 4; +			} +		} +	} + +	/* +	 * Since transmission is not being blocked by clearing the rx FIFO, +	 * loop here until all received data "leaked" by the loop above has +	 * been dealt with. +	 */ +	while (qspi->rx_len >= 4) { +		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) +			; +		data = readl_relaxed(qspi->regs + REG_X4_RX_DATA); +		*(u32 *)qspi->rxbuf = data; +		qspi->rxbuf += 4; +		qspi->rx_len -= 4; +	} + +	/* +	 * Since rx_len and tx_len must be < 4 bytes at this point, there's no +	 * concern about overflowing the rx or tx FIFOs any longer. It's +	 * therefore safe to loop over the remainder of the transmit data before +	 * handling the remaining receive data. +	 */ +	if (!qspi->tx_len) +		return; + +	control &= ~CONTROL_FLAGSX4; +	writel_relaxed(control, qspi->regs + REG_CONTROL); + +	while (qspi->tx_len--) { +		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_TXFIFOFULL) +			; +		data = qspi->txbuf ? *qspi->txbuf : 0xaa; +		qspi->txbuf++; +		writel_relaxed(data, qspi->regs + REG_TX_DATA); +	} + +	while (qspi->rx_len--) { +		while (readl_relaxed(qspi->regs + REG_STATUS) & STATUS_RXFIFOEMPTY) +			; +		data = readl_relaxed(qspi->regs + REG_RX_DATA); +		*qspi->rxbuf++ = (data & 0xFF); +	} +} +  static void mchp_coreqspi_enable_ints(struct mchp_coreqspi *qspi)  {  	u32 mask = IEN_TXDONE | @@ -266,7 +347,7 @@ static irqreturn_t mchp_coreqspi_isr(int irq, void *dev_id)  }  static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_device *spi, -				     const struct spi_mem_op *op) +				     u32 max_freq)  {  	unsigned long clk_hz;  	u32 control, baud_rate_val = 0; @@ -275,11 +356,11 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi  	if (!clk_hz)  		return -EINVAL; -	baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * op->max_freq); +	baud_rate_val = DIV_ROUND_UP(clk_hz, 2 * max_freq);  	if (baud_rate_val > MAX_DIVIDER || baud_rate_val < MIN_DIVIDER) {  		dev_err(&spi->dev,  			"could not configure the clock for spi clock %d Hz & system clock %ld Hz\n", -			op->max_freq, clk_hz); +			max_freq, clk_hz);  		return -EINVAL;  	} @@ -367,23 +448,13 @@ static inline void mchp_coreqspi_config_op(struct mchp_coreqspi *qspi, const str  	writel_relaxed(frames, qspi->regs + REG_FRAMES);  } -static int mchp_qspi_wait_for_ready(struct spi_mem *mem) +static int mchp_coreqspi_wait_for_ready(struct mchp_coreqspi *qspi)  { -	struct mchp_coreqspi *qspi = spi_controller_get_devdata -				    (mem->spi->controller);  	u32 status; -	int ret; -	ret = readl_poll_timeout(qspi->regs + REG_STATUS, status, +	return readl_poll_timeout(qspi->regs + REG_STATUS, status,  				 (status & STATUS_READY), 0,  				 TIMEOUT_MS); -	if (ret) { -		dev_err(&mem->spi->dev, -			"Timeout waiting on QSPI ready.\n"); -		return -ETIMEDOUT; -	} - -	return ret;  }  static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) @@ -396,11 +467,13 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o  	int err, i;  	mutex_lock(&qspi->op_lock); -	err = mchp_qspi_wait_for_ready(mem); -	if (err) +	err = mchp_coreqspi_wait_for_ready(qspi); +	if (err) { +		dev_err(&mem->spi->dev, "Timeout waiting on QSPI ready.\n");  		goto error; +	} -	err = mchp_coreqspi_setup_clock(qspi, mem->spi, op); +	err = mchp_coreqspi_setup_clock(qspi, mem->spi, op->max_freq);  	if (err)  		goto error; @@ -415,7 +488,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o  		qspi->rxbuf = NULL;  		qspi->tx_len = op->cmd.nbytes;  		qspi->rx_len = 0; -		mchp_coreqspi_write_op(qspi, false); +		mchp_coreqspi_write_op(qspi);  	}  	qspi->txbuf = &opaddr[0]; @@ -426,7 +499,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o  		qspi->rxbuf = NULL;  		qspi->tx_len = op->addr.nbytes;  		qspi->rx_len = 0; -		mchp_coreqspi_write_op(qspi, false); +		mchp_coreqspi_write_op(qspi);  	}  	if (op->data.nbytes) { @@ -435,7 +508,7 @@ static int mchp_coreqspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o  			qspi->rxbuf = NULL;  			qspi->rx_len = 0;  			qspi->tx_len = op->data.nbytes; -			mchp_coreqspi_write_op(qspi, true); +			mchp_coreqspi_write_op(qspi);  		} else {  			qspi->txbuf = NULL;  			qspi->rxbuf = (u8 *)op->data.buf.in; @@ -515,6 +588,109 @@ static const struct spi_controller_mem_caps mchp_coreqspi_mem_caps = {  	.per_op_freq = true,  }; +static int mchp_coreqspi_unprepare_message(struct spi_controller *ctlr, struct spi_message *m) +{ +	struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); + +	/* +	 * This delay is required for the driver to function correctly, +	 * but no explanation has been determined for why it is required. +	 */ +	udelay(750); + +	mutex_unlock(&qspi->op_lock); + +	return 0; +} + +static int mchp_coreqspi_prepare_message(struct spi_controller *ctlr, struct spi_message *m) +{ +	struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); +	struct spi_transfer *t = NULL; +	u32 control, frames; +	u32 total_bytes = 0, cmd_bytes = 0, idle_cycles = 0; +	int ret; +	bool quad = false, dual = false; + +	mutex_lock(&qspi->op_lock); +	ret = mchp_coreqspi_wait_for_ready(qspi); +	if (ret) { +		mutex_unlock(&qspi->op_lock); +		dev_err(&ctlr->dev, "Timeout waiting on QSPI ready.\n"); +		return ret; +	} + +	ret = mchp_coreqspi_setup_clock(qspi, m->spi, m->spi->max_speed_hz); +	if (ret) { +		mutex_unlock(&qspi->op_lock); +		return ret; +	} + +	control = readl_relaxed(qspi->regs + REG_CONTROL); +	control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0); +	writel_relaxed(control, qspi->regs + REG_CONTROL); + +	reinit_completion(&qspi->data_completion); + +	list_for_each_entry(t, &m->transfers, transfer_list) { +		total_bytes += t->len; +		if (!cmd_bytes && !(t->tx_buf && t->rx_buf)) +			cmd_bytes = t->len; +		if (!t->rx_buf) +			cmd_bytes = total_bytes; +		if (t->tx_nbits == SPI_NBITS_QUAD || t->rx_nbits == SPI_NBITS_QUAD) +			quad = true; +		else if (t->tx_nbits == SPI_NBITS_DUAL || t->rx_nbits == SPI_NBITS_DUAL) +			dual = true; +	} + +	control = readl_relaxed(qspi->regs + REG_CONTROL); +	if (quad) { +		control |= (CONTROL_MODE0 | CONTROL_MODE12_EX_RW); +	} else if (dual) { +		control &= ~CONTROL_MODE0; +		control |= CONTROL_MODE12_FULL; +	} else { +		control &= ~(CONTROL_MODE12_MASK | CONTROL_MODE0); +	} +	writel_relaxed(control, qspi->regs + REG_CONTROL); + +	frames = total_bytes & BYTESUPPER_MASK; +	writel_relaxed(frames, qspi->regs + REG_FRAMESUP); +	frames = total_bytes & BYTESLOWER_MASK; +	frames |= cmd_bytes << FRAMES_CMDBYTES_SHIFT; +	frames |= idle_cycles << FRAMES_IDLE_SHIFT; +	control = readl_relaxed(qspi->regs + REG_CONTROL); +	if (control & CONTROL_MODE12_MASK) +		frames |= (1 << FRAMES_SHIFT); + +	frames |= FRAMES_FLAGWORD; +	writel_relaxed(frames, qspi->regs + REG_FRAMES); + +	return 0; +}; + +static int mchp_coreqspi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, +				      struct spi_transfer *t) +{ +	struct mchp_coreqspi *qspi = spi_controller_get_devdata(ctlr); + +	qspi->tx_len = t->len; + +	if (t->tx_buf) +		qspi->txbuf = (u8 *)t->tx_buf; + +	if (!t->rx_buf) { +		mchp_coreqspi_write_op(qspi); +	} else { +		qspi->rxbuf = (u8 *)t->rx_buf; +		qspi->rx_len = t->len; +		mchp_coreqspi_write_read_op(qspi); +	} + +	return 0; +} +  static int mchp_coreqspi_probe(struct platform_device *pdev)  {  	struct spi_controller *ctlr; @@ -562,6 +738,12 @@ static int mchp_coreqspi_probe(struct platform_device *pdev)  	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |  			  SPI_TX_DUAL | SPI_TX_QUAD;  	ctlr->dev.of_node = np; +	ctlr->min_speed_hz = clk_get_rate(qspi->clk) / 30; +	ctlr->prepare_message = mchp_coreqspi_prepare_message; +	ctlr->unprepare_message = mchp_coreqspi_unprepare_message; +	ctlr->transfer_one = mchp_coreqspi_transfer_one; +	ctlr->num_chipselect = 2; +	ctlr->use_gpio_descriptors = true;  	ret = devm_spi_register_controller(&pdev->dev, ctlr);  	if (ret)  | 
