diff options
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r-- | drivers/spi/spi.c | 75 |
1 files changed, 53 insertions, 22 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8158e281f354..6626587e77b4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -778,6 +778,17 @@ static void spi_set_cs(struct spi_device *spi, bool enable) { bool enable1 = enable; + /* + * Avoid calling into the driver (or doing delays) if the chip select + * isn't actually changing from the last time this was called. + */ + if ((spi->controller->last_cs_enable == enable) && + (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) + return; + + spi->controller->last_cs_enable = enable; + spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; + if (!spi->controller->set_cs_timing) { if (enable1) spi_delay_exec(&spi->controller->cs_setup, NULL); @@ -982,6 +993,8 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } + ctlr->cur_msg_mapped = false; + return 0; } #else /* !CONFIG_HAS_DMA */ @@ -1234,8 +1247,17 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, if (xfer->tx_buf || xfer->rx_buf) { reinit_completion(&ctlr->xfer_completion); +fallback_pio: ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { + if (ctlr->cur_msg_mapped && + (xfer->error & SPI_TRANS_FAIL_NO_START)) { + __spi_unmap_msg(ctlr, msg); + ctlr->fallback = true; + xfer->error &= ~SPI_TRANS_FAIL_NO_START; + goto fallback_pio; + } + SPI_STATISTICS_INCREMENT_FIELD(statm, errors); SPI_STATISTICS_INCREMENT_FIELD(stats, @@ -1314,6 +1336,14 @@ void spi_finalize_current_transfer(struct spi_controller *ctlr) } EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); +static void spi_idle_runtime_pm(struct spi_controller *ctlr) +{ + if (ctlr->auto_runtime_pm) { + pm_runtime_mark_last_busy(ctlr->dev.parent); + pm_runtime_put_autosuspend(ctlr->dev.parent); + } +} + /** * __spi_pump_messages - function which processes spi message queue * @ctlr: controller to process queue for @@ -1346,7 +1376,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) /* If another context is idling the device then defer */ if (ctlr->idling) { - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } @@ -1358,10 +1388,17 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) return; } - /* Only do teardown in the thread */ + /* Defer any non-atomic teardown to the thread */ if (!in_kthread) { - kthread_queue_work(&ctlr->kworker, - &ctlr->pump_messages); + if (!ctlr->dummy_rx && !ctlr->dummy_tx && + !ctlr->unprepare_transfer_hardware) { + spi_idle_runtime_pm(ctlr); + ctlr->busy = false; + trace_spi_controller_idle(ctlr); + } else { + kthread_queue_work(ctlr->kworker, + &ctlr->pump_messages); + } spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } @@ -1378,10 +1415,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ctlr->unprepare_transfer_hardware(ctlr)) dev_err(&ctlr->dev, "failed to unprepare transfer hardware\n"); - if (ctlr->auto_runtime_pm) { - pm_runtime_mark_last_busy(ctlr->dev.parent); - pm_runtime_put_autosuspend(ctlr->dev.parent); - } + spi_idle_runtime_pm(ctlr); trace_spi_controller_idle(ctlr); spin_lock_irqsave(&ctlr->queue_lock, flags); @@ -1592,11 +1626,9 @@ EXPORT_SYMBOL_GPL(spi_take_timestamp_post); */ static void spi_set_thread_rt(struct spi_controller *ctlr) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; - dev_info(&ctlr->dev, "will run message pump with realtime priority\n"); - sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m); + sched_set_fifo(ctlr->kworker->task); } static int spi_init_queue(struct spi_controller *ctlr) @@ -1604,13 +1636,12 @@ static int spi_init_queue(struct spi_controller *ctlr) ctlr->running = false; ctlr->busy = false; - kthread_init_worker(&ctlr->kworker); - ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker, - "%s", dev_name(&ctlr->dev)); - if (IS_ERR(ctlr->kworker_task)) { - dev_err(&ctlr->dev, "failed to create message pump task\n"); - return PTR_ERR(ctlr->kworker_task); + ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); + if (IS_ERR(ctlr->kworker)) { + dev_err(&ctlr->dev, "failed to create message pump kworker\n"); + return PTR_ERR(ctlr->kworker); } + kthread_init_work(&ctlr->pump_messages, spi_pump_messages); /* @@ -1693,7 +1724,8 @@ void spi_finalize_current_message(struct spi_controller *ctlr) spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr->cur_msg = NULL; ctlr->cur_msg_prepared = false; - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + ctlr->fallback = false; + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); trace_spi_message_done(mesg); @@ -1719,7 +1751,7 @@ static int spi_start_queue(struct spi_controller *ctlr) ctlr->cur_msg = NULL; spin_unlock_irqrestore(&ctlr->queue_lock, flags); - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); return 0; } @@ -1775,8 +1807,7 @@ static int spi_destroy_queue(struct spi_controller *ctlr) return ret; } - kthread_flush_worker(&ctlr->kworker); - kthread_stop(ctlr->kworker_task); + kthread_destroy_worker(ctlr->kworker); return 0; } @@ -1799,7 +1830,7 @@ static int __spi_queued_transfer(struct spi_device *spi, list_add_tail(&msg->queue, &ctlr->queue); if (!ctlr->busy && need_pump) - kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); return 0; |