diff options
Diffstat (limited to 'drivers/i2c/busses/i2c-tegra.c')
-rw-r--r-- | drivers/i2c/busses/i2c-tegra.c | 178 |
1 files changed, 116 insertions, 62 deletions
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 61339c665ebd..cbc2ad49043e 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -16,7 +16,9 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/irq.h> #include <linux/kernel.h> +#include <linux/ktime.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> @@ -129,11 +131,12 @@ #define I2C_PACKET_HEADER_SIZE 12 /* - * Upto I2C_PIO_MODE_MAX_LEN bytes, controller will use PIO mode, - * above this, controller will use DMA to fill FIFO. - * MAX PIO len is 20 bytes excluding packet header. + * I2C Controller will use PIO mode for transfers up to 32 bytes in order to + * avoid DMA overhead, otherwise external APB DMA controller will be used. + * Note that the actual MAX PIO length is 20 bytes because 32 bytes include + * I2C_PACKET_HEADER_SIZE. */ -#define I2C_PIO_MODE_MAX_LEN 32 +#define I2C_PIO_MODE_PREFERRED_LEN 32 /* * msg_end_type: The bus control which need to be send at end of transfer. @@ -230,7 +233,6 @@ struct tegra_i2c_hw_feature { * @base_phys: physical base address of the I2C controller * @cont_id: I2C controller ID, used for packet header * @irq: IRQ number of transfer complete interrupt - * @irq_disabled: used to track whether or not the interrupt is enabled * @is_dvc: identifies the DVC I2C controller, has a different register layout * @msg_complete: transfer completion notifier * @msg_err: error code for completed message @@ -240,7 +242,6 @@ struct tegra_i2c_hw_feature { * @bus_clk_rate: current I2C bus clock rate * @clk_divisor_non_hs_mode: clock divider for non-high-speed modes * @is_multimaster_mode: track if I2C controller is in multi-master mode - * @xfer_lock: lock to serialize transfer submission and processing * @tx_dma_chan: DMA transmit channel * @rx_dma_chan: DMA receive channel * @dma_phys: handle to DMA resources @@ -248,6 +249,7 @@ struct tegra_i2c_hw_feature { * @dma_buf_size: DMA buffer size * @is_curr_dma_xfer: indicates active DMA transfer * @dma_complete: DMA completion notifier + * @is_curr_atomic_xfer: indicates active atomic transfer */ struct tegra_i2c_dev { struct device *dev; @@ -260,7 +262,6 @@ struct tegra_i2c_dev { phys_addr_t base_phys; int cont_id; int irq; - bool irq_disabled; int is_dvc; struct completion msg_complete; int msg_err; @@ -270,8 +271,6 @@ struct tegra_i2c_dev { u32 bus_clk_rate; u16 clk_divisor_non_hs_mode; bool is_multimaster_mode; - /* xfer_lock: lock to serialize transfer submission and processing */ - spinlock_t xfer_lock; struct dma_chan *tx_dma_chan; struct dma_chan *rx_dma_chan; dma_addr_t dma_phys; @@ -279,17 +278,18 @@ struct tegra_i2c_dev { unsigned int dma_buf_size; bool is_curr_dma_xfer; struct completion dma_complete; + bool is_curr_atomic_xfer; }; static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { - writel(val, i2c_dev->base + reg); + writel_relaxed(val, i2c_dev->base + reg); } static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) { - return readl(i2c_dev->base + reg); + return readl_relaxed(i2c_dev->base + reg); } /* @@ -307,16 +307,16 @@ static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { - writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + writel_relaxed(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); /* Read back register to make sure that register writes completed */ if (reg != I2C_TX_FIFO) - readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) { - return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + return readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data, @@ -687,13 +687,15 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev) reg_offset = tegra_i2c_reg_addr(i2c_dev, I2C_CONFIG_LOAD); addr = i2c_dev->base + reg_offset; i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD); - if (in_interrupt()) - err = readl_poll_timeout_atomic(addr, val, val == 0, - 1000, - I2C_CONFIG_LOAD_TIMEOUT); + + if (i2c_dev->is_curr_atomic_xfer) + err = readl_relaxed_poll_timeout_atomic( + addr, val, val == 0, 1000, + I2C_CONFIG_LOAD_TIMEOUT); else - err = readl_poll_timeout(addr, val, val == 0, 1000, - I2C_CONFIG_LOAD_TIMEOUT); + err = readl_relaxed_poll_timeout( + addr, val, val == 0, 1000, + I2C_CONFIG_LOAD_TIMEOUT); if (err) { dev_warn(i2c_dev->dev, @@ -790,11 +792,6 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit) if (err) return err; - if (i2c_dev->irq_disabled) { - i2c_dev->irq_disabled = false; - enable_irq(i2c_dev->irq); - } - return 0; } @@ -825,18 +822,12 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) status = i2c_readl(i2c_dev, I2C_INT_STATUS); - spin_lock(&i2c_dev->xfer_lock); if (status == 0) { dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n", i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), i2c_readl(i2c_dev, I2C_STATUS), i2c_readl(i2c_dev, I2C_CNFG)); i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; - - if (!i2c_dev->irq_disabled) { - disable_irq_nosync(i2c_dev->irq); - i2c_dev->irq_disabled = true; - } goto err; } @@ -925,7 +916,6 @@ err: complete(&i2c_dev->msg_complete); done: - spin_unlock(&i2c_dev->xfer_lock); return IRQ_HANDLED; } @@ -999,6 +989,64 @@ out: i2c_writel(i2c_dev, val, reg); } +static unsigned long +tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev, + struct completion *complete, + unsigned int timeout_ms) +{ + ktime_t ktime = ktime_get(); + ktime_t ktimeout = ktime_add_ms(ktime, timeout_ms); + + do { + u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS); + + if (status) { + tegra_i2c_isr(i2c_dev->irq, i2c_dev); + + if (completion_done(complete)) { + s64 delta = ktime_ms_delta(ktimeout, ktime); + + return msecs_to_jiffies(delta) ?: 1; + } + } + + ktime = ktime_get(); + + } while (ktime_before(ktime, ktimeout)); + + return 0; +} + +static unsigned long +tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev, + struct completion *complete, + unsigned int timeout_ms) +{ + unsigned long ret; + + if (i2c_dev->is_curr_atomic_xfer) { + ret = tegra_i2c_poll_completion_timeout(i2c_dev, complete, + timeout_ms); + } else { + enable_irq(i2c_dev->irq); + ret = wait_for_completion_timeout(complete, + msecs_to_jiffies(timeout_ms)); + disable_irq(i2c_dev->irq); + + /* + * There is a chance that completion may happen after IRQ + * synchronization, which is done by disable_irq(). + */ + if (ret == 0 && completion_done(complete)) { + dev_warn(i2c_dev->dev, + "completion done after timeout\n"); + ret = 1; + } + } + + return ret; +} + static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); @@ -1020,8 +1068,8 @@ static int tegra_i2c_issue_bus_clear(struct i2c_adapter *adap) i2c_writel(i2c_dev, reg, I2C_BUS_CLEAR_CNFG); tegra_i2c_unmask_irq(i2c_dev, I2C_INT_BUS_CLR_DONE); - time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(50)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, 50); if (time_left == 0) { dev_err(i2c_dev->dev, "timed out for bus clear\n"); return -ETIMEDOUT; @@ -1044,7 +1092,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, u32 packet_header; u32 int_mask; unsigned long time_left; - unsigned long flags; size_t xfer_size; u32 *buffer = NULL; int err = 0; @@ -1065,8 +1112,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, xfer_size = msg->len + I2C_PACKET_HEADER_SIZE; xfer_size = ALIGN(xfer_size, BYTES_PER_FIFO_WORD); - i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_MAX_LEN) && - i2c_dev->dma_buf; + i2c_dev->is_curr_dma_xfer = (xfer_size > I2C_PIO_MODE_PREFERRED_LEN) && + i2c_dev->dma_buf && + !i2c_dev->is_curr_atomic_xfer; tegra_i2c_config_fifo_trig(i2c_dev, xfer_size); dma = i2c_dev->is_curr_dma_xfer; /* @@ -1075,7 +1123,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, */ xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC, i2c_dev->bus_clk_rate); - spin_lock_irqsave(&i2c_dev->xfer_lock, flags); int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; tegra_i2c_unmask_irq(i2c_dev, int_mask); @@ -1090,7 +1137,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting RX DMA failed, err %d\n", err); - goto unlock; + return err; } } else { @@ -1149,7 +1196,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_err(i2c_dev->dev, "starting TX DMA failed, err %d\n", err); - goto unlock; + return err; } } else { tegra_i2c_fill_tx_fifo(i2c_dev); @@ -1169,20 +1216,16 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n", i2c_readl(i2c_dev, I2C_INT_MASK)); -unlock: - spin_unlock_irqrestore(&i2c_dev->xfer_lock, flags); - if (dma) { - if (err) - return err; + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->dma_complete, xfer_time); + + dmaengine_terminate_sync(i2c_dev->msg_read ? + i2c_dev->rx_dma_chan : + i2c_dev->tx_dma_chan); - time_left = wait_for_completion_timeout(&i2c_dev->dma_complete, - msecs_to_jiffies(xfer_time)); - if (time_left == 0) { + if (!time_left && !completion_done(&i2c_dev->dma_complete)) { dev_err(i2c_dev->dev, "DMA transfer timeout\n"); - dmaengine_terminate_sync(i2c_dev->msg_read ? - i2c_dev->rx_dma_chan : - i2c_dev->tx_dma_chan); tegra_i2c_init(i2c_dev, true); return -ETIMEDOUT; } @@ -1195,20 +1238,15 @@ unlock: memcpy(i2c_dev->msg_buf, i2c_dev->dma_buf, msg->len); } - - if (i2c_dev->msg_err != I2C_ERR_NONE) - dmaengine_synchronize(i2c_dev->msg_read ? - i2c_dev->rx_dma_chan : - i2c_dev->tx_dma_chan); } - time_left = wait_for_completion_timeout(&i2c_dev->msg_complete, - msecs_to_jiffies(xfer_time)); + time_left = tegra_i2c_wait_completion_timeout( + i2c_dev, &i2c_dev->msg_complete, xfer_time); + tegra_i2c_mask_irq(i2c_dev, int_mask); if (time_left == 0) { dev_err(i2c_dev->dev, "i2c transfer timed out\n"); - tegra_i2c_init(i2c_dev, true); return -ETIMEDOUT; } @@ -1270,6 +1308,19 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], return ret ?: i; } +static int tegra_i2c_xfer_atomic(struct i2c_adapter *adap, + struct i2c_msg msgs[], int num) +{ + struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); + int ret; + + i2c_dev->is_curr_atomic_xfer = true; + ret = tegra_i2c_xfer(adap, msgs, num); + i2c_dev->is_curr_atomic_xfer = false; + + return ret; +} + static u32 tegra_i2c_func(struct i2c_adapter *adap) { struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); @@ -1297,8 +1348,9 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev) } static const struct i2c_algorithm tegra_i2c_algo = { - .master_xfer = tegra_i2c_xfer, - .functionality = tegra_i2c_func, + .master_xfer = tegra_i2c_xfer, + .master_xfer_atomic = tegra_i2c_xfer_atomic, + .functionality = tegra_i2c_func, }; /* payload size is only 12 bit */ @@ -1568,7 +1620,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) I2C_PACKET_HEADER_SIZE; init_completion(&i2c_dev->msg_complete); init_completion(&i2c_dev->dma_complete); - spin_lock_init(&i2c_dev->xfer_lock); if (!i2c_dev->hw->has_single_clk_source) { fast_clk = devm_clk_get(&pdev->dev, "fast-clk"); @@ -1607,6 +1658,7 @@ static int tegra_i2c_probe(struct platform_device *pdev) goto unprepare_fast_clk; } + pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_i2c_runtime_resume(&pdev->dev); @@ -1644,6 +1696,8 @@ static int tegra_i2c_probe(struct platform_device *pdev) goto release_dma; } + irq_set_status_flags(i2c_dev->irq, IRQ_NOAUTOEN); + ret = devm_request_irq(&pdev->dev, i2c_dev->irq, tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev); if (ret) { |