diff options
Diffstat (limited to 'drivers/net/ipa')
34 files changed, 1211 insertions, 990 deletions
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile index 506f8d5cd4ee..bdfb2430ab2c 100644 --- a/drivers/net/ipa/Makefile +++ b/drivers/net/ipa/Makefile @@ -1,9 +1,6 @@ -# Un-comment the next line if you want to validate configuration data -#ccflags-y += -DIPA_VALIDATE - obj-$(CONFIG_QCOM_IPA) += ipa.o -ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \ +ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \ ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \ ipa_gsi.o ipa_smp2p.o ipa_uc.o \ ipa_endpoint.o ipa_cmd.o ipa_modem.o \ diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 427c68b2ad8f..a2fcdb1abdb9 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -198,77 +198,6 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); } -/* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */ -static void gsi_irq_setup(struct gsi *gsi) -{ - /* Disable all interrupt types */ - gsi_irq_type_update(gsi, 0); - - /* Clear all type-specific interrupt masks */ - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); - - /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ - if (gsi->version > IPA_VERSION_3_1) { - u32 offset; - - /* These registers are in the non-adjusted address range */ - offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; - iowrite32(0, gsi->virt_raw + offset); - offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET; - iowrite32(0, gsi->virt_raw + offset); - } - - iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); -} - -/* Get # supported channel and event rings; there is no gsi_ring_teardown() */ -static int gsi_ring_setup(struct gsi *gsi) -{ - struct device *dev = gsi->dev; - u32 count; - u32 val; - - if (gsi->version < IPA_VERSION_3_5_1) { - /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ - gsi->channel_count = GSI_CHANNEL_COUNT_MAX; - gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; - - return 0; - } - - val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); - - count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); - if (!count) { - dev_err(dev, "GSI reports zero channels supported\n"); - return -EINVAL; - } - if (count > GSI_CHANNEL_COUNT_MAX) { - dev_warn(dev, "limiting to %u channels; hardware supports %u\n", - GSI_CHANNEL_COUNT_MAX, count); - count = GSI_CHANNEL_COUNT_MAX; - } - gsi->channel_count = count; - - count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); - if (!count) { - dev_err(dev, "GSI reports zero event rings supported\n"); - return -EINVAL; - } - if (count > GSI_EVT_RING_COUNT_MAX) { - dev_warn(dev, - "limiting to %u event rings; hardware supports %u\n", - GSI_EVT_RING_COUNT_MAX, count); - count = GSI_EVT_RING_COUNT_MAX; - } - gsi->evt_ring_count = count; - - return 0; -} - /* Event ring commands are performed one at a time. Their completion * is signaled by the event ring control GSI interrupt type, which is * only enabled when we issue an event ring command. Only the event @@ -920,12 +849,13 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* All done! */ } -static int __gsi_channel_start(struct gsi_channel *channel, bool start) +static int __gsi_channel_start(struct gsi_channel *channel, bool resume) { struct gsi *gsi = channel->gsi; int ret; - if (!start) + /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ + if (resume && gsi->version < IPA_VERSION_4_0) return 0; mutex_lock(&gsi->mutex); @@ -947,7 +877,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id) napi_enable(&channel->napi); gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id); - ret = __gsi_channel_start(channel, true); + ret = __gsi_channel_start(channel, false); if (ret) { gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id); napi_disable(&channel->napi); @@ -971,7 +901,7 @@ static int gsi_channel_stop_retry(struct gsi_channel *channel) return ret; } -static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) +static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend) { struct gsi *gsi = channel->gsi; int ret; @@ -979,7 +909,8 @@ static int __gsi_channel_stop(struct gsi_channel *channel, bool stop) /* Wait for any underway transactions to complete before stopping. */ gsi_channel_trans_quiesce(channel); - if (!stop) + /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */ + if (suspend && gsi->version < IPA_VERSION_4_0) return 0; mutex_lock(&gsi->mutex); @@ -997,7 +928,7 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) struct gsi_channel *channel = &gsi->channel[channel_id]; int ret; - ret = __gsi_channel_stop(channel, true); + ret = __gsi_channel_stop(channel, false); if (ret) return ret; @@ -1026,13 +957,13 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) mutex_unlock(&gsi->mutex); } -/* Stop a STARTED channel for suspend (using stop if requested) */ -int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) +/* Stop a started channel for suspend */ +int gsi_channel_suspend(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; int ret; - ret = __gsi_channel_stop(channel, stop); + ret = __gsi_channel_stop(channel, true); if (ret) return ret; @@ -1042,12 +973,24 @@ int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop) return 0; } -/* Resume a suspended channel (starting will be requested if STOPPED) */ -int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start) +/* Resume a suspended channel (starting if stopped) */ +int gsi_channel_resume(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; - return __gsi_channel_start(channel, start); + return __gsi_channel_start(channel, true); +} + +/* Prevent all GSI interrupts while suspended */ +void gsi_suspend(struct gsi *gsi) +{ + disable_irq(gsi->irq); +} + +/* Allow all GSI interrupts again when resuming */ +void gsi_resume(struct gsi *gsi) +{ + enable_irq(gsi->irq); } /** @@ -1372,33 +1315,20 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) return IRQ_HANDLED; } +/* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) { - struct device *dev = &pdev->dev; - unsigned int irq; int ret; ret = platform_get_irq_byname(pdev, "gsi"); if (ret <= 0) return ret ? : -EINVAL; - irq = ret; - - ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); - if (ret) { - dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); - return ret; - } - gsi->irq = irq; + gsi->irq = ret; return 0; } -static void gsi_irq_exit(struct gsi *gsi) -{ - free_irq(gsi->irq, gsi); -} - /* Return the transaction associated with a transfer completion event */ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, struct gsi_event *event) @@ -1876,6 +1806,93 @@ static void gsi_channel_teardown(struct gsi *gsi) gsi_irq_disable(gsi); } +/* Turn off all GSI interrupts initially */ +static int gsi_irq_setup(struct gsi *gsi) +{ + int ret; + + /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ + iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); + + /* Disable all interrupt types */ + gsi_irq_type_update(gsi, 0); + + /* Clear all type-specific interrupt masks */ + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); + + /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ + if (gsi->version > IPA_VERSION_3_1) { + u32 offset; + + /* These registers are in the non-adjusted address range */ + offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; + iowrite32(0, gsi->virt_raw + offset); + offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET; + iowrite32(0, gsi->virt_raw + offset); + } + + iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); + + ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi); + if (ret) + dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); + + return ret; +} + +static void gsi_irq_teardown(struct gsi *gsi) +{ + free_irq(gsi->irq, gsi); +} + +/* Get # supported channel and event rings; there is no gsi_ring_teardown() */ +static int gsi_ring_setup(struct gsi *gsi) +{ + struct device *dev = gsi->dev; + u32 count; + u32 val; + + if (gsi->version < IPA_VERSION_3_5_1) { + /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */ + gsi->channel_count = GSI_CHANNEL_COUNT_MAX; + gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; + + return 0; + } + + val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); + + count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); + if (!count) { + dev_err(dev, "GSI reports zero channels supported\n"); + return -EINVAL; + } + if (count > GSI_CHANNEL_COUNT_MAX) { + dev_warn(dev, "limiting to %u channels; hardware supports %u\n", + GSI_CHANNEL_COUNT_MAX, count); + count = GSI_CHANNEL_COUNT_MAX; + } + gsi->channel_count = count; + + count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); + if (!count) { + dev_err(dev, "GSI reports zero event rings supported\n"); + return -EINVAL; + } + if (count > GSI_EVT_RING_COUNT_MAX) { + dev_warn(dev, + "limiting to %u event rings; hardware supports %u\n", + GSI_EVT_RING_COUNT_MAX, count); + count = GSI_EVT_RING_COUNT_MAX; + } + gsi->evt_ring_count = count; + + return 0; +} + /* Setup function for GSI. GSI firmware must be loaded and initialized */ int gsi_setup(struct gsi *gsi) { @@ -1889,25 +1906,34 @@ int gsi_setup(struct gsi *gsi) return -EIO; } - gsi_irq_setup(gsi); /* No matching teardown required */ + ret = gsi_irq_setup(gsi); + if (ret) + return ret; ret = gsi_ring_setup(gsi); /* No matching teardown required */ if (ret) - return ret; + goto err_irq_teardown; /* Initialize the error log */ iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); - /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ - iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); + ret = gsi_channel_setup(gsi); + if (ret) + goto err_irq_teardown; - return gsi_channel_setup(gsi); + return 0; + +err_irq_teardown: + gsi_irq_teardown(gsi); + + return ret; } /* Inverse of gsi_setup() */ void gsi_teardown(struct gsi *gsi) { gsi_channel_teardown(gsi); + gsi_irq_teardown(gsi); } /* Initialize a channel's event ring */ @@ -1964,7 +1990,6 @@ static void gsi_evt_ring_init(struct gsi *gsi) static bool gsi_channel_data_valid(struct gsi *gsi, const struct ipa_gsi_endpoint_data *data) { -#ifdef IPA_VALIDATION u32 channel_id = data->channel_id; struct device *dev = gsi->dev; @@ -2010,7 +2035,6 @@ static bool gsi_channel_data_valid(struct gsi *gsi, channel_id, data->channel.event_count); return false; } -#endif /* IPA_VALIDATION */ return true; } @@ -2206,20 +2230,18 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, init_completion(&gsi->completion); - ret = gsi_irq_init(gsi, pdev); + ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ if (ret) goto err_iounmap; ret = gsi_channel_init(gsi, count, data); if (ret) - goto err_irq_exit; + goto err_iounmap; mutex_init(&gsi->mutex); return 0; -err_irq_exit: - gsi_irq_exit(gsi); err_iounmap: iounmap(gsi->virt_raw); @@ -2231,7 +2253,6 @@ void gsi_exit(struct gsi *gsi) { mutex_destroy(&gsi->mutex); gsi_channel_exit(gsi); - gsi_irq_exit(gsi); iounmap(gsi->virt_raw); } diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 81cd7b07f6e1..88b80dc3db79 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -232,8 +232,35 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id); */ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell); -int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop); -int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start); +/** + * gsi_suspend() - Prepare the GSI subsystem for suspend + * @gsi: GSI pointer + */ +void gsi_suspend(struct gsi *gsi); + +/** + * gsi_resume() - Resume the GSI subsystem following suspend + * @gsi: GSI pointer + */ +void gsi_resume(struct gsi *gsi); + +/** + * gsi_channel_suspend() - Suspend a GSI channel + * @gsi: GSI pointer + * @channel_id: Channel to suspend + * + * For IPA v4.0+, suspend is implemented by stopping the channel. + */ +int gsi_channel_suspend(struct gsi *gsi, u32 channel_id); + +/** + * gsi_channel_resume() - Resume a suspended GSI channel + * @gsi: GSI pointer + * @channel_id: Channel to resume + * + * For IPA v4.0+, the stopped channel is started again. + */ +int gsi_channel_resume(struct gsi *gsi, u32 channel_id); /** * gsi_init() - Initialize the GSI subsystem diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 8c795a6a8598..1544564bc283 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -90,14 +90,12 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, { void *virt; -#ifdef IPA_VALIDATE if (!size) return -EINVAL; if (count < max_alloc) return -EINVAL; if (!max_alloc) return -EINVAL; -#endif /* IPA_VALIDATE */ /* By allocating a few extra entries in our pool (one less * than the maximum number that will be requested in a @@ -140,14 +138,12 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, dma_addr_t addr; void *virt; -#ifdef IPA_VALIDATE if (!size) return -EINVAL; if (count < max_alloc) return -EINVAL; if (!max_alloc) return -EINVAL; -#endif /* IPA_VALIDATE */ /* Don't let allocations cross a power-of-two boundary */ size = __roundup_pow_of_two(size); @@ -188,8 +184,8 @@ static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count) { u32 offset; - /* assert(count > 0); */ - /* assert(count <= pool->max_alloc); */ + WARN_ON(!count); + WARN_ON(count > pool->max_alloc); /* Allocate from beginning if wrap would occur */ if (count > pool->count - pool->free) @@ -225,9 +221,10 @@ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element) { void *end = pool->base + pool->count * pool->size; - /* assert(element >= pool->base); */ - /* assert(element < end); */ - /* assert(pool->max_alloc == 1); */ + WARN_ON(element < pool->base); + WARN_ON(element >= end); + WARN_ON(pool->max_alloc != 1); + element += pool->size; return element < end ? element : pool->base; @@ -332,7 +329,8 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, struct gsi_trans_info *trans_info; struct gsi_trans *trans; - /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */ + if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id))) + return NULL; trans_info = &channel->trans_info; @@ -408,7 +406,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size, u32 which = trans->used++; struct scatterlist *sg; - /* assert(which < trans->tre_count); */ + WARN_ON(which >= trans->tre_count); /* Commands are quite different from data transfer requests. * Their payloads come from a pool whose memory is allocated @@ -441,8 +439,10 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size, struct scatterlist *sg = &trans->sgl[0]; int ret; - /* assert(trans->tre_count == 1); */ - /* assert(!trans->used); */ + if (WARN_ON(trans->tre_count != 1)) + return -EINVAL; + if (WARN_ON(trans->used)) + return -EINVAL; sg_set_page(sg, page, size, offset); ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction); @@ -461,8 +461,10 @@ int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb) u32 used; int ret; - /* assert(trans->tre_count == 1); */ - /* assert(!trans->used); */ + if (WARN_ON(trans->tre_count != 1)) + return -EINVAL; + if (WARN_ON(trans->used)) + return -EINVAL; /* skb->len will not be 0 (checked early) */ ret = skb_to_sgvec(skb, sg, 0, skb->len); @@ -550,7 +552,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db) u32 avail; u32 i; - /* assert(trans->used > 0); */ + WARN_ON(!trans->used); /* Consume the entries. If we cross the end of the ring while * filling them we'll switch to the beginning to finish. diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index 744406832a77..9fc880eb7e3a 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -23,34 +23,24 @@ struct icc_path; struct net_device; struct platform_device; -struct ipa_clock; +struct ipa_power; struct ipa_smp2p; struct ipa_interrupt; /** - * enum ipa_flag - IPA state flags - * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled - * @IPA_FLAG_COUNT: Number of defined IPA flags - */ -enum ipa_flag { - IPA_FLAG_RESUMED, - IPA_FLAG_COUNT, /* Last; not a flag */ -}; - -/** * struct ipa - IPA information * @gsi: Embedded GSI structure - * @flags: Boolean state flags * @version: IPA hardware version * @pdev: Platform device * @completion: Used to signal pipeline clear transfer complete * @nb: Notifier block used for remoteproc SSR * @notifier: Remoteproc SSR notifier * @smp2p: SMP2P information - * @clock: IPA clocking information + * @power: IPA power information * @table_addr: DMA address of filter/route table content * @table_virt: Virtual address of filter/route table content * @interrupt: IPA Interrupt information + * @uc_powered: true if power is active by proxy for microcontroller * @uc_loaded: true after microcontroller has reported it's ready * @reg_addr: DMA address used for IPA register access * @reg_virt: Virtual address used for IPA register access @@ -82,19 +72,19 @@ enum ipa_flag { */ struct ipa { struct gsi gsi; - DECLARE_BITMAP(flags, IPA_FLAG_COUNT); enum ipa_version version; struct platform_device *pdev; struct completion completion; struct notifier_block nb; void *notifier; struct ipa_smp2p *smp2p; - struct ipa_clock *clock; + struct ipa_power *power; dma_addr_t table_addr; __le64 *table_virt; struct ipa_interrupt *interrupt; + bool uc_powered; bool uc_loaded; dma_addr_t reg_addr; @@ -144,11 +134,11 @@ struct ipa { * * Activities performed at the init stage can be done without requiring * any access to IPA hardware. Activities performed at the config stage - * require the IPA clock to be running, because they involve access - * to IPA registers. The setup stage is performed only after the GSI - * hardware is ready (more on this below). The setup stage allows - * the AP to perform more complex initialization by issuing "immediate - * commands" using a special interface to the IPA. + * require IPA power, because they involve access to IPA registers. + * The setup stage is performed only after the GSI hardware is ready + * (more on this below). The setup stage allows the AP to perform + * more complex initialization by issuing "immediate commands" using + * a special interface to the IPA. * * This function, @ipa_setup(), starts the setup stage. * diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c deleted file mode 100644 index 69ef6ea41e61..000000000000 --- a/drivers/net/ipa/ipa_clock.c +++ /dev/null @@ -1,331 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2021 Linaro Ltd. - */ - -#include <linux/refcount.h> -#include <linux/mutex.h> -#include <linux/clk.h> -#include <linux/device.h> -#include <linux/interconnect.h> - -#include "ipa.h" -#include "ipa_clock.h" -#include "ipa_modem.h" -#include "ipa_data.h" - -/** - * DOC: IPA Clocking - * - * The "IPA Clock" manages both the IPA core clock and the interconnects - * (buses) the IPA depends on as a single logical entity. A reference count - * is incremented by "get" operations and decremented by "put" operations. - * Transitions of that count from 0 to 1 result in the clock and interconnects - * being enabled, and transitions of the count from 1 to 0 cause them to be - * disabled. We currently operate the core clock at a fixed clock rate, and - * all buses at a fixed average and peak bandwidth. As more advanced IPA - * features are enabled, we can make better use of clock and bus scaling. - * - * An IPA clock reference must be held for any access to IPA hardware. - */ - -/** - * struct ipa_interconnect - IPA interconnect information - * @path: Interconnect path - * @average_bandwidth: Average interconnect bandwidth (KB/second) - * @peak_bandwidth: Peak interconnect bandwidth (KB/second) - */ -struct ipa_interconnect { - struct icc_path *path; - u32 average_bandwidth; - u32 peak_bandwidth; -}; - -/** - * struct ipa_clock - IPA clocking information - * @count: Clocking reference count - * @mutex: Protects clock enable/disable - * @core: IPA core clock - * @interconnect_count: Number of elements in interconnect[] - * @interconnect: Interconnect array - */ -struct ipa_clock { - refcount_t count; - struct mutex mutex; /* protects clock enable/disable */ - struct clk *core; - u32 interconnect_count; - struct ipa_interconnect *interconnect; -}; - -static int ipa_interconnect_init_one(struct device *dev, - struct ipa_interconnect *interconnect, - const struct ipa_interconnect_data *data) -{ - struct icc_path *path; - - path = of_icc_get(dev, data->name); - if (IS_ERR(path)) { - int ret = PTR_ERR(path); - - dev_err_probe(dev, ret, "error getting %s interconnect\n", - data->name); - - return ret; - } - - interconnect->path = path; - interconnect->average_bandwidth = data->average_bandwidth; - interconnect->peak_bandwidth = data->peak_bandwidth; - - return 0; -} - -static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) -{ - icc_put(interconnect->path); - memset(interconnect, 0, sizeof(*interconnect)); -} - -/* Initialize interconnects required for IPA operation */ -static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev, - const struct ipa_interconnect_data *data) -{ - struct ipa_interconnect *interconnect; - u32 count; - int ret; - - count = clock->interconnect_count; - interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); - if (!interconnect) - return -ENOMEM; - clock->interconnect = interconnect; - - while (count--) { - ret = ipa_interconnect_init_one(dev, interconnect, data++); - if (ret) - goto out_unwind; - interconnect++; - } - - return 0; - -out_unwind: - while (interconnect-- > clock->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(clock->interconnect); - clock->interconnect = NULL; - - return ret; -} - -/* Inverse of ipa_interconnect_init() */ -static void ipa_interconnect_exit(struct ipa_clock *clock) -{ - struct ipa_interconnect *interconnect; - - interconnect = clock->interconnect + clock->interconnect_count; - while (interconnect-- > clock->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(clock->interconnect); - clock->interconnect = NULL; -} - -/* Currently we only use one bandwidth level, so just "enable" interconnects */ -static int ipa_interconnect_enable(struct ipa *ipa) -{ - struct ipa_interconnect *interconnect; - struct ipa_clock *clock = ipa->clock; - int ret; - u32 i; - - interconnect = clock->interconnect; - for (i = 0; i < clock->interconnect_count; i++) { - ret = icc_set_bw(interconnect->path, - interconnect->average_bandwidth, - interconnect->peak_bandwidth); - if (ret) - goto out_unwind; - interconnect++; - } - - return 0; - -out_unwind: - while (interconnect-- > clock->interconnect) - (void)icc_set_bw(interconnect->path, 0, 0); - - return ret; -} - -/* To disable an interconnect, we just its bandwidth to 0 */ -static void ipa_interconnect_disable(struct ipa *ipa) -{ - struct ipa_interconnect *interconnect; - struct ipa_clock *clock = ipa->clock; - int result = 0; - u32 count; - int ret; - - count = clock->interconnect_count; - interconnect = clock->interconnect + count; - while (count--) { - interconnect--; - ret = icc_set_bw(interconnect->path, 0, 0); - if (ret && !result) - result = ret; - } - - if (result) - dev_err(&ipa->pdev->dev, - "error %d disabling IPA interconnects\n", ret); -} - -/* Turn on IPA clocks, including interconnects */ -static int ipa_clock_enable(struct ipa *ipa) -{ - int ret; - - ret = ipa_interconnect_enable(ipa); - if (ret) - return ret; - - ret = clk_prepare_enable(ipa->clock->core); - if (ret) - ipa_interconnect_disable(ipa); - - return ret; -} - -/* Inverse of ipa_clock_enable() */ -static void ipa_clock_disable(struct ipa *ipa) -{ - clk_disable_unprepare(ipa->clock->core); - ipa_interconnect_disable(ipa); -} - -/* Get an IPA clock reference, but only if the reference count is - * already non-zero. Returns true if the additional reference was - * added successfully, or false otherwise. - */ -bool ipa_clock_get_additional(struct ipa *ipa) -{ - return refcount_inc_not_zero(&ipa->clock->count); -} - -/* Get an IPA clock reference. If the reference count is non-zero, it is - * incremented and return is immediate. Otherwise it is checked again - * under protection of the mutex, and if appropriate the IPA clock - * is enabled. - * - * Incrementing the reference count is intentionally deferred until - * after the clock is running and endpoints are resumed. - */ -void ipa_clock_get(struct ipa *ipa) -{ - struct ipa_clock *clock = ipa->clock; - int ret; - - /* If the clock is running, just bump the reference count */ - if (ipa_clock_get_additional(ipa)) - return; - - /* Otherwise get the mutex and check again */ - mutex_lock(&clock->mutex); - - /* A reference might have been added before we got the mutex. */ - if (ipa_clock_get_additional(ipa)) - goto out_mutex_unlock; - - ret = ipa_clock_enable(ipa); - if (ret) { - dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret); - goto out_mutex_unlock; - } - - refcount_set(&clock->count, 1); - -out_mutex_unlock: - mutex_unlock(&clock->mutex); -} - -/* Attempt to remove an IPA clock reference. If this represents the - * last reference, disable the IPA clock under protection of the mutex. - */ -void ipa_clock_put(struct ipa *ipa) -{ - struct ipa_clock *clock = ipa->clock; - - /* If this is not the last reference there's nothing more to do */ - if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex)) - return; - - ipa_clock_disable(ipa); - - mutex_unlock(&clock->mutex); -} - -/* Return the current IPA core clock rate */ -u32 ipa_clock_rate(struct ipa *ipa) -{ - return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0; -} - -/* Initialize IPA clocking */ -struct ipa_clock * -ipa_clock_init(struct device *dev, const struct ipa_clock_data *data) -{ - struct ipa_clock *clock; - struct clk *clk; - int ret; - - clk = clk_get(dev, "core"); - if (IS_ERR(clk)) { - dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); - - return ERR_CAST(clk); - } - - ret = clk_set_rate(clk, data->core_clock_rate); - if (ret) { - dev_err(dev, "error %d setting core clock rate to %u\n", - ret, data->core_clock_rate); - goto err_clk_put; - } - - clock = kzalloc(sizeof(*clock), GFP_KERNEL); - if (!clock) { - ret = -ENOMEM; - goto err_clk_put; - } - clock->core = clk; - clock->interconnect_count = data->interconnect_count; - - ret = ipa_interconnect_init(clock, dev, data->interconnect_data); - if (ret) - goto err_kfree; - - mutex_init(&clock->mutex); - refcount_set(&clock->count, 0); - - return clock; - -err_kfree: - kfree(clock); -err_clk_put: - clk_put(clk); - - return ERR_PTR(ret); -} - -/* Inverse of ipa_clock_init() */ -void ipa_clock_exit(struct ipa_clock *clock) -{ - struct clk *clk = clock->core; - - WARN_ON(refcount_read(&clock->count) != 0); - mutex_destroy(&clock->mutex); - ipa_interconnect_exit(clock); - kfree(clock); - clk_put(clk); -} diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h deleted file mode 100644 index 1fe634760e59..000000000000 --- a/drivers/net/ipa/ipa_clock.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2018-2020 Linaro Ltd. - */ -#ifndef _IPA_CLOCK_H_ -#define _IPA_CLOCK_H_ - -struct device; - -struct ipa; -struct ipa_clock_data; - -/** - * ipa_clock_rate() - Return the current IPA core clock rate - * @ipa: IPA structure - * - * Return: The current clock rate (in Hz), or 0. - */ -u32 ipa_clock_rate(struct ipa *ipa); - -/** - * ipa_clock_init() - Initialize IPA clocking - * @dev: IPA device - * @data: Clock configuration data - * - * Return: A pointer to an ipa_clock structure, or a pointer-coded error - */ -struct ipa_clock *ipa_clock_init(struct device *dev, - const struct ipa_clock_data *data); - -/** - * ipa_clock_exit() - Inverse of ipa_clock_init() - * @clock: IPA clock pointer - */ -void ipa_clock_exit(struct ipa_clock *clock); - -/** - * ipa_clock_get() - Get an IPA clock reference - * @ipa: IPA pointer - * - * This call blocks if this is the first reference. - */ -void ipa_clock_get(struct ipa *ipa); - -/** - * ipa_clock_get_additional() - Get an IPA clock reference if not first - * @ipa: IPA pointer - * - * This returns immediately, and only takes a reference if not the first - */ -bool ipa_clock_get_additional(struct ipa *ipa); - -/** - * ipa_clock_put() - Drop an IPA clock reference - * @ipa: IPA pointer - * - * This drops a clock reference. If the last reference is being dropped, - * the clock is stopped and RX endpoints are suspended. This call will - * not block unless the last reference is dropped. - */ -void ipa_clock_put(struct ipa *ipa); - -#endif /* _IPA_CLOCK_H_ */ diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c index af44ca41189e..cff51731195a 100644 --- a/drivers/net/ipa/ipa_cmd.c +++ b/drivers/net/ipa/ipa_cmd.c @@ -159,35 +159,49 @@ static void ipa_cmd_validate_build(void) BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); #undef TABLE_COUNT_MAX #undef TABLE_SIZE -} -#ifdef IPA_VALIDATE + /* Hashed and non-hashed fields are assumed to be the same size */ + BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) != + field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); + BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) != + field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK)); + + /* Valid endpoint numbers must fit in the IP packet init command */ + BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) < + IPA_ENDPOINT_MAX - 1); +} /* Validate a memory region holding a table */ -bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, - bool route, bool ipv6, bool hashed) +bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route) { + u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); + u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); + const char *table = route ? "route" : "filter"; struct device *dev = &ipa->pdev->dev; - u32 offset_max; - offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) - : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); + /* Size must fit in the immediate command field that holds it */ + if (mem->size > size_max) { + dev_err(dev, "%s table region size too large\n", table); + dev_err(dev, " (0x%04x > 0x%04x)\n", + mem->size, size_max); + + return false; + } + + /* Offset must fit in the immediate command field that holds it */ if (mem->offset > offset_max || ipa->mem_offset > offset_max - mem->offset) { - dev_err(dev, "IPv%c %s%s table region offset too large\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter"); + dev_err(dev, "%s table region offset too large\n", table); dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", ipa->mem_offset, mem->offset, offset_max); return false; } + /* Entire memory range must fit within IPA-local memory */ if (mem->offset > ipa->mem_size || mem->size > ipa->mem_size - mem->offset) { - dev_err(dev, "IPv%c %s%s table region out of range\n", - ipv6 ? '6' : '4', hashed ? "hashed " : "", - route ? "route" : "filter"); + dev_err(dev, "%s table region out of range\n", table); dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n", mem->offset, mem->size, ipa->mem_size); @@ -331,7 +345,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa) return true; } -#endif /* IPA_VALIDATE */ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max) { @@ -522,9 +535,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; - /* assert(endpoint_id < - field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */ - cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); payload = &cmd_payload->ip_packet_init; @@ -548,8 +558,9 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, u16 flags; /* size and offset must fit in 16 bit fields */ - /* assert(size > 0 && size <= U16_MAX); */ - /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */ + WARN_ON(!size); + WARN_ON(size > U16_MAX); + WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset); offset += ipa->mem_offset; @@ -588,8 +599,6 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans) union ipa_cmd_payload *cmd_payload; dma_addr_t payload_addr; - /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */ - cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); payload = &cmd_payload->ip_packet_tag_status; diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index b99262281f41..69cd085d427d 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -57,20 +57,16 @@ struct ipa_cmd_info { enum dma_data_direction direction; }; -#ifdef IPA_VALIDATE - /** * ipa_cmd_table_valid() - Validate a memory region holding a table * @ipa: - IPA pointer * @mem: - IPA memory region descriptor * @route: - Whether the region holds a route or filter table - * @ipv6: - Whether the table is for IPv6 or IPv4 - * @hashed: - Whether the table is hashed or non-hashed * * Return: true if region is valid, false otherwise */ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, - bool route, bool ipv6, bool hashed); + bool route); /** * ipa_cmd_data_valid() - Validate command-realted configuration is valid @@ -80,22 +76,6 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, */ bool ipa_cmd_data_valid(struct ipa *ipa); -#else /* !IPA_VALIDATE */ - -static inline bool ipa_cmd_table_valid(struct ipa *ipa, - const struct ipa_mem *mem, bool route, - bool ipv6, bool hashed) -{ - return true; -} - -static inline bool ipa_cmd_data_valid(struct ipa *ipa) -{ - return true; -} - -#endif /* !IPA_VALIDATE */ - /** * ipa_cmd_pool_init() - initialize command channel pools * @channel: AP->IPA command TX GSI channel pointer diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c index 4c28189462a7..06ddb85f39b2 100644 --- a/drivers/net/ipa/ipa_data-v3.1.c +++ b/drivers/net/ipa/ipa_data-v3.1.c @@ -513,7 +513,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v3.1 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 16 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -529,5 +529,5 @@ const struct ipa_data ipa_data_v3_1 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c index af536ef8c120..760c22bbdf70 100644 --- a/drivers/net/ipa/ipa_data-v3.5.1.c +++ b/drivers/net/ipa/ipa_data-v3.5.1.c @@ -394,7 +394,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v3.5.1 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 75 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -414,5 +414,5 @@ const struct ipa_data ipa_data_v3_5_1 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c index 9353efbd504f..fea91451a0c3 100644 --- a/drivers/net/ipa/ipa_data-v4.11.c +++ b/drivers/net/ipa/ipa_data-v4.11.c @@ -105,6 +105,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -128,6 +129,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .config = { .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -368,24 +370,19 @@ static const struct ipa_mem_data ipa_mem_data = { static const struct ipa_interconnect_data ipa_interconnect_data[] = { { .name = "memory", - .peak_bandwidth = 465000, /* 465 MBps */ - .average_bandwidth = 80000, /* 80 MBps */ - }, - /* Average rate is unused for the next two interconnects */ - { - .name = "imem", - .peak_bandwidth = 68570, /* 68.57 MBps */ - .average_bandwidth = 80000, /* 80 MBps (unused?) */ + .peak_bandwidth = 600000, /* 600 MBps */ + .average_bandwidth = 150000, /* 150 MBps */ }, + /* Average rate is unused for the next interconnect */ { .name = "config", - .peak_bandwidth = 30000, /* 30 MBps */ + .peak_bandwidth = 74000, /* 74 MBps */ .average_bandwidth = 0, /* unused */ }, }; /* Clock and interconnect configuration data for an SoC having IPA v4.11 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 60 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -400,5 +397,5 @@ const struct ipa_data ipa_data_v4_11 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c index 3b09b7baa95f..2a231e79d5e1 100644 --- a/drivers/net/ipa/ipa_data-v4.2.c +++ b/drivers/net/ipa/ipa_data-v4.2.c @@ -360,7 +360,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v4.2 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 100 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -376,5 +376,5 @@ const struct ipa_data ipa_data_v4_2 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c index a99b6478fa3a..e62ab9c3ac67 100644 --- a/drivers/net/ipa/ipa_data-v4.5.c +++ b/drivers/net/ipa/ipa_data-v4.5.c @@ -114,6 +114,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -137,6 +138,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .config = { .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -441,7 +443,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v4.5 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 150 * 1000 * 1000, /* Hz (150? 60?) */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -456,5 +458,5 @@ const struct ipa_data ipa_data_v4_5 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c index 798d43e1eb13..2421b5abb5d4 100644 --- a/drivers/net/ipa/ipa_data-v4.9.c +++ b/drivers/net/ipa/ipa_data-v4.9.c @@ -106,6 +106,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -129,6 +130,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .endpoint = { .config = { .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -416,18 +418,13 @@ static const struct ipa_mem_data ipa_mem_data = { /* Interconnect rates are in 1000 byte/second units */ static const struct ipa_interconnect_data ipa_interconnect_data[] = { { - .name = "ipa_to_llcc", + .name = "memory", .peak_bandwidth = 600000, /* 600 MBps */ .average_bandwidth = 150000, /* 150 MBps */ }, - { - .name = "llcc_to_ebi1", - .peak_bandwidth = 1804000, /* 1.804 GBps */ - .average_bandwidth = 150000, /* 150 MBps */ - }, /* Average rate is unused for the next interconnect */ { - .name = "appss_to_ipa", + .name = "config", .peak_bandwidth = 74000, /* 74 MBps */ .average_bandwidth = 0, /* unused */ }, @@ -435,7 +432,7 @@ static const struct ipa_interconnect_data ipa_interconnect_data[] = { }; /* Clock and interconnect configuration data for an SoC having IPA v4.9 */ -static const struct ipa_clock_data ipa_clock_data = { +static const struct ipa_power_data ipa_power_data = { .core_clock_rate = 60 * 1000 * 1000, /* Hz */ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data), .interconnect_data = ipa_interconnect_data, @@ -450,5 +447,5 @@ const struct ipa_data ipa_data_v4_9 = { .endpoint_data = ipa_gsi_endpoint_data, .resource_data = &ipa_resource_data, .mem_data = &ipa_mem_data, - .clock_data = &ipa_clock_data, + .power_data = &ipa_power_data, }; diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 5bc244c8f94e..6d329e9ce5d2 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -19,7 +19,7 @@ * IPA and GSI resources to use for a given platform. This data is supplied * via the Device Tree match table, associated with a particular compatible * string. The data defines information about how resources, endpoints and - * channels, memory, clocking and so on are allocated and used for the + * channels, memory, power and so on are allocated and used for the * platform. * * Resources are data structures used internally by the IPA hardware. The @@ -265,12 +265,12 @@ struct ipa_interconnect_data { }; /** - * struct ipa_clock_data - description of IPA clock and interconnect rates + * struct ipa_power_data - description of IPA power configuration data * @core_clock_rate: Core clock rate (Hz) * @interconnect_count: Number of entries in the interconnect_data array * @interconnect_data: IPA interconnect configuration data */ -struct ipa_clock_data { +struct ipa_power_data { u32 core_clock_rate; u32 interconnect_count; /* # entries in interconnect_data[] */ const struct ipa_interconnect_data *interconnect_data; @@ -286,7 +286,7 @@ struct ipa_clock_data { * @endpoint_data: IPA endpoint/GSI channel data * @resource_data: IPA resource configuration data * @mem_data: IPA memory region data - * @clock_data: IPA clock and interconnect data + * @power_data: IPA power data */ struct ipa_data { enum ipa_version version; @@ -297,7 +297,7 @@ struct ipa_data { const struct ipa_gsi_endpoint_data *endpoint_data; const struct ipa_resource_data *resource_data; const struct ipa_mem_data *mem_data; - const struct ipa_clock_data *clock_data; + const struct ipa_power_data *power_data; }; extern const struct ipa_data ipa_data_v3_1; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index ab02669bae4e..5528d97110d5 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -21,7 +21,7 @@ #include "ipa_modem.h" #include "ipa_table.h" #include "ipa_gsi.h" -#include "ipa_clock.h" +#include "ipa_power.h" #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) @@ -250,17 +250,18 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) /* Suspend is not supported for IPA v4.0+. Delay doesn't work * correctly on IPA v4.2. - * - * if (endpoint->toward_ipa) - * assert(ipa->version != IPA_VERSION_4.2); - * else - * assert(ipa->version < IPA_VERSION_4_0); */ + if (endpoint->toward_ipa) + WARN_ON(ipa->version == IPA_VERSION_4_2); + else + WARN_ON(ipa->version >= IPA_VERSION_4_0); + mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; val = ioread32(ipa->reg_virt + offset); - /* Don't bother if it's already in the requested state */ state = !!(val & mask); + + /* Don't bother if it's already in the requested state */ if (suspend_delay != state) { val ^= mask; iowrite32(val, ipa->reg_virt + offset); @@ -273,7 +274,7 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) static void ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) { - /* assert(endpoint->toward_ipa); */ + WARN_ON(!endpoint->toward_ipa); /* Delay mode doesn't work properly for IPA v4.2 */ if (endpoint->ipa->version != IPA_VERSION_4_2) @@ -287,7 +288,8 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) u32 offset; u32 val; - /* assert(mask & ipa->available); */ + WARN_ON(!(mask & ipa->available)); + offset = ipa_reg_state_aggr_active_offset(ipa->version); val = ioread32(ipa->reg_virt + offset); @@ -299,7 +301,8 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) u32 mask = BIT(endpoint->endpoint_id); struct ipa *ipa = endpoint->ipa; - /* assert(mask & ipa->available); */ + WARN_ON(!(mask & ipa->available)); + iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); } @@ -338,7 +341,7 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) if (endpoint->ipa->version >= IPA_VERSION_4_0) return enable; /* For IPA v4.0+, no change made */ - /* assert(!endpoint->toward_ipa); */ + WARN_ON(endpoint->toward_ipa); suspended = ipa_endpoint_init_ctrl(endpoint, enable); @@ -807,7 +810,7 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) return hol_block_timer_qtime_val(ipa, microseconds); /* Use 64 bit arithmetic to avoid overflow... */ - rate = ipa_clock_rate(ipa); + rate = ipa_core_clock_rate(ipa); ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); /* ...but we still need to fit into a 32-bit register */ WARN_ON(ticks > U32_MAX); @@ -1156,7 +1159,8 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, if (!endpoint->netdev) return false; - /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ + WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD)); + skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); if (skb) { /* Reserve the headroom and account for the data */ @@ -1583,7 +1587,6 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) { struct device *dev = &endpoint->ipa->pdev->dev; struct gsi *gsi = &endpoint->ipa->gsi; - bool stop_channel; int ret; if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) @@ -1594,11 +1597,7 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) (void)ipa_endpoint_program_suspend(endpoint, true); } - /* Starting with IPA v4.0, endpoints are suspended by stopping the - * underlying GSI channel rather than using endpoint suspend mode. - */ - stop_channel = endpoint->ipa->version >= IPA_VERSION_4_0; - ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); + ret = gsi_channel_suspend(gsi, endpoint->channel_id); if (ret) dev_err(dev, "error %d suspending channel %u\n", ret, endpoint->channel_id); @@ -1608,7 +1607,6 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) { struct device *dev = &endpoint->ipa->pdev->dev; struct gsi *gsi = &endpoint->ipa->gsi; - bool start_channel; int ret; if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) @@ -1617,11 +1615,7 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) (void)ipa_endpoint_program_suspend(endpoint, false); - /* Starting with IPA v4.0, the underlying GSI channel must be - * restarted for resume. - */ - start_channel = endpoint->ipa->version >= IPA_VERSION_4_0; - ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); + ret = gsi_channel_resume(gsi, endpoint->channel_id); if (ret) dev_err(dev, "error %d resuming channel %u\n", ret, endpoint->channel_id); diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c index c46df0b7c4e5..b35170a93b0f 100644 --- a/drivers/net/ipa/ipa_interrupt.c +++ b/drivers/net/ipa/ipa_interrupt.c @@ -21,9 +21,9 @@ #include <linux/types.h> #include <linux/interrupt.h> +#include <linux/pm_runtime.h> #include "ipa.h" -#include "ipa_clock.h" #include "ipa_reg.h" #include "ipa_endpoint.h" #include "ipa_interrupt.h" @@ -74,21 +74,30 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id) iowrite32(mask, ipa->reg_virt + offset); } -/* Process all IPA interrupt types that have been signaled */ -static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) +/* IPA IRQ handler is threaded */ +static irqreturn_t ipa_isr_thread(int irq, void *dev_id) { + struct ipa_interrupt *interrupt = dev_id; struct ipa *ipa = interrupt->ipa; u32 enabled = interrupt->enabled; + struct device *dev; + u32 pending; u32 offset; u32 mask; + int ret; + + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0)) + goto out_power_put; /* The status register indicates which conditions are present, * including conditions whose interrupt is not enabled. Handle * only the enabled ones. */ offset = ipa_reg_irq_stts_offset(ipa->version); - mask = ioread32(ipa->reg_virt + offset); - while ((mask &= enabled)) { + pending = ioread32(ipa->reg_virt + offset); + while ((mask = pending & enabled)) { do { u32 irq_id = __ffs(mask); @@ -96,43 +105,19 @@ static void ipa_interrupt_process_all(struct ipa_interrupt *interrupt) ipa_interrupt_process(interrupt, irq_id); } while (mask); - mask = ioread32(ipa->reg_virt + offset); + pending = ioread32(ipa->reg_virt + offset); } -} - -/* Threaded part of the IPA IRQ handler */ -static irqreturn_t ipa_isr_thread(int irq, void *dev_id) -{ - struct ipa_interrupt *interrupt = dev_id; - - ipa_clock_get(interrupt->ipa); - - ipa_interrupt_process_all(interrupt); - - ipa_clock_put(interrupt->ipa); - - return IRQ_HANDLED; -} - -/* Hard part (i.e., "real" IRQ handler) of the IRQ handler */ -static irqreturn_t ipa_isr(int irq, void *dev_id) -{ - struct ipa_interrupt *interrupt = dev_id; - struct ipa *ipa = interrupt->ipa; - u32 offset; - u32 mask; - offset = ipa_reg_irq_stts_offset(ipa->version); - mask = ioread32(ipa->reg_virt + offset); - if (mask & interrupt->enabled) - return IRQ_WAKE_THREAD; - - /* Nothing in the mask was supposed to cause an interrupt */ - offset = ipa_reg_irq_clr_offset(ipa->version); - iowrite32(mask, ipa->reg_virt + offset); - - dev_err(&ipa->pdev->dev, "%s: unexpected interrupt, mask 0x%08x\n", - __func__, mask); + /* If any disabled interrupts are pending, clear them */ + if (pending) { + dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n", + pending); + offset = ipa_reg_irq_clr_offset(ipa->version); + iowrite32(pending, ipa->reg_virt + offset); + } +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); return IRQ_HANDLED; } @@ -146,7 +131,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, u32 offset; u32 val; - /* assert(mask & ipa->available); */ + WARN_ON(!(mask & ipa->available)); /* IPA version 3.0 does not support TX_SUSPEND interrupt control */ if (ipa->version == IPA_VERSION_3_0) @@ -206,7 +191,8 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt, struct ipa *ipa = interrupt->ipa; u32 offset; - /* assert(ipa_irq < IPA_IRQ_COUNT); */ + WARN_ON(ipa_irq >= IPA_IRQ_COUNT); + interrupt->handler[ipa_irq] = handler; /* Update the IPA interrupt mask to enable it */ @@ -222,7 +208,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) struct ipa *ipa = interrupt->ipa; u32 offset; - /* assert(ipa_irq < IPA_IRQ_COUNT); */ + WARN_ON(ipa_irq >= IPA_IRQ_COUNT); + /* Update the IPA interrupt mask to disable it */ interrupt->enabled &= ~BIT(ipa_irq); offset = ipa_reg_irq_en_offset(ipa->version); @@ -231,8 +218,8 @@ ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq) interrupt->handler[ipa_irq] = NULL; } -/* Set up the IPA interrupt framework */ -struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) +/* Configure the IPA interrupt framework */ +struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa) { struct device *dev = &ipa->pdev->dev; struct ipa_interrupt *interrupt; @@ -258,7 +245,7 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) offset = ipa_reg_irq_en_offset(ipa->version); iowrite32(0, ipa->reg_virt + offset); - ret = request_threaded_irq(irq, ipa_isr, ipa_isr_thread, IRQF_ONESHOT, + ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT, "ipa", interrupt); if (ret) { dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret); @@ -281,8 +268,8 @@ err_kfree: return ERR_PTR(ret); } -/* Tear down the IPA interrupt framework */ -void ipa_interrupt_teardown(struct ipa_interrupt *interrupt) +/* Inverse of ipa_interrupt_config() */ +void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt) { struct device *dev = &interrupt->ipa->pdev->dev; int ret; diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index d5c486a6800d..231390cea52a 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -86,17 +86,17 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt); void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt); /** - * ipa_interrupt_setup() - Set up the IPA interrupt framework + * ipa_interrupt_config() - Configure the IPA interrupt framework * @ipa: IPA pointer * * Return: Pointer to IPA SMP2P info, or a pointer-coded error */ -struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa); +struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa); /** - * ipa_interrupt_teardown() - Tear down the IPA interrupt framework + * ipa_interrupt_deconfig() - Inverse of ipa_interrupt_config() * @interrupt: IPA interrupt structure */ -void ipa_interrupt_teardown(struct ipa_interrupt *interrupt); +void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt); #endif /* _IPA_INTERRUPT_H_ */ diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 9810c61a0320..cdfa98a76e1f 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -15,11 +15,12 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> +#include <linux/pm_runtime.h> #include <linux/qcom_scm.h> #include <linux/soc/qcom/mdt_loader.h> #include "ipa.h" -#include "ipa_clock.h" +#include "ipa_power.h" #include "ipa_data.h" #include "ipa_endpoint.h" #include "ipa_resource.h" @@ -80,29 +81,6 @@ #define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */ /** - * ipa_suspend_handler() - Handle the suspend IPA interrupt - * @ipa: IPA pointer - * @irq_id: IPA interrupt type (unused) - * - * If an RX endpoint is in suspend state, and the IPA has a packet - * destined for that endpoint, the IPA generates a SUSPEND interrupt - * to inform the AP that it should resume the endpoint. If we get - * one of these interrupts we just resume everything. - */ -static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) -{ - /* Just report the event, and let system resume handle the rest. - * More than one endpoint could signal this; if so, ignore - * all but the first. - */ - if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags)) - pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); - - /* Acknowledge/clear the suspend interrupt on all endpoints */ - ipa_interrupt_suspend_clear_all(ipa->interrupt); -} - -/** * ipa_setup() - Set up IPA hardware * @ipa: IPA pointer * @@ -124,19 +102,9 @@ int ipa_setup(struct ipa *ipa) if (ret) return ret; - ipa->interrupt = ipa_interrupt_setup(ipa); - if (IS_ERR(ipa->interrupt)) { - ret = PTR_ERR(ipa->interrupt); - goto err_gsi_teardown; - } - ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, - ipa_suspend_handler); - - ipa_uc_setup(ipa); - - ret = device_init_wakeup(dev, true); + ret = ipa_power_setup(ipa); if (ret) - goto err_uc_teardown; + goto err_gsi_teardown; ipa_endpoint_setup(ipa); @@ -167,7 +135,7 @@ int ipa_setup(struct ipa *ipa) ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id); /* We're all set. Now prepare for communication with the modem */ - ret = ipa_modem_setup(ipa); + ret = ipa_qmi_setup(ipa); if (ret) goto err_default_route_clear; @@ -184,11 +152,7 @@ err_command_disable: ipa_endpoint_disable_one(command_endpoint); err_endpoint_teardown: ipa_endpoint_teardown(ipa); - (void)device_init_wakeup(dev, false); -err_uc_teardown: - ipa_uc_teardown(ipa); - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); - ipa_interrupt_teardown(ipa->interrupt); + ipa_power_teardown(ipa); err_gsi_teardown: gsi_teardown(&ipa->gsi); @@ -204,17 +168,17 @@ static void ipa_teardown(struct ipa *ipa) struct ipa_endpoint *exception_endpoint; struct ipa_endpoint *command_endpoint; - ipa_modem_teardown(ipa); + /* We're going to tear everything down, as if setup never completed */ + ipa->setup_complete = false; + + ipa_qmi_teardown(ipa); ipa_endpoint_default_route_clear(ipa); exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; ipa_endpoint_disable_one(exception_endpoint); command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; ipa_endpoint_disable_one(command_endpoint); ipa_endpoint_teardown(ipa); - (void)device_init_wakeup(&ipa->pdev->dev, false); - ipa_uc_teardown(ipa); - ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); - ipa_interrupt_teardown(ipa->interrupt); + ipa_power_teardown(ipa); gsi_teardown(&ipa->gsi); } @@ -253,9 +217,6 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) const struct ipa_qsb_data *data1; u32 val; - /* assert(data->qsb_count > 0); */ - /* assert(data->qsb_count < 3); */ - /* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */ data0 = &data->qsb_data[IPA_QSB_MASTER_DDR]; if (data->qsb_count > 1) @@ -289,12 +250,11 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data) /* Compute the value to use in the COUNTER_CFG register AGGR_GRANULARITY * field to represent the given number of microseconds. The value is one * less than the number of timer ticks in the requested period. 0 is not - * a valid granularity value. + * a valid granularity value (so for example @usec must be at least 16 for + * a TIMER_FREQUENCY of 32000). */ -static u32 ipa_aggr_granularity_val(u32 usec) +static __always_inline u32 ipa_aggr_granularity_val(u32 usec) { - /* assert(usec != 0); */ - return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; } @@ -366,8 +326,8 @@ static void ipa_idle_indication_cfg(struct ipa *ipa, * @ipa: IPA pointer * * Configures when the IPA signals it is idle to the global clock - * controller, which can respond by scalling down the clock to - * save power. + * controller, which can respond by scaling down the clock to save + * power. */ static void ipa_hardware_dcd_config(struct ipa *ipa) { @@ -457,48 +417,54 @@ static void ipa_hardware_deconfig(struct ipa *ipa) * @ipa: IPA pointer * @data: IPA configuration data * - * Perform initialization requiring IPA clock to be enabled. + * Perform initialization requiring IPA power to be enabled. */ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) { int ret; - /* Get a clock reference to allow initialization. This reference - * is held after initialization completes, and won't get dropped - * unless/until a system suspend request arrives. - */ - ipa_clock_get(ipa); - ipa_hardware_config(ipa, data); - ret = ipa_endpoint_config(ipa); + ret = ipa_mem_config(ipa); if (ret) goto err_hardware_deconfig; - ret = ipa_mem_config(ipa); + ipa->interrupt = ipa_interrupt_config(ipa); + if (IS_ERR(ipa->interrupt)) { + ret = PTR_ERR(ipa->interrupt); + ipa->interrupt = NULL; + goto err_mem_deconfig; + } + + ipa_uc_config(ipa); + + ret = ipa_endpoint_config(ipa); if (ret) - goto err_endpoint_deconfig; + goto err_uc_deconfig; ipa_table_config(ipa); /* No deconfig required */ /* Assign resource limitation to each group; no deconfig required */ ret = ipa_resource_config(ipa, data->resource_data); if (ret) - goto err_mem_deconfig; + goto err_endpoint_deconfig; ret = ipa_modem_config(ipa); if (ret) - goto err_mem_deconfig; + goto err_endpoint_deconfig; return 0; -err_mem_deconfig: - ipa_mem_deconfig(ipa); err_endpoint_deconfig: ipa_endpoint_deconfig(ipa); +err_uc_deconfig: + ipa_uc_deconfig(ipa); + ipa_interrupt_deconfig(ipa->interrupt); + ipa->interrupt = NULL; +err_mem_deconfig: + ipa_mem_deconfig(ipa); err_hardware_deconfig: ipa_hardware_deconfig(ipa); - ipa_clock_put(ipa); return ret; } @@ -510,10 +476,12 @@ err_hardware_deconfig: static void ipa_deconfig(struct ipa *ipa) { ipa_modem_deconfig(ipa); - ipa_mem_deconfig(ipa); ipa_endpoint_deconfig(ipa); + ipa_uc_deconfig(ipa); + ipa_interrupt_deconfig(ipa->interrupt); + ipa->interrupt = NULL; + ipa_mem_deconfig(ipa); ipa_hardware_deconfig(ipa); - ipa_clock_put(ipa); } static int ipa_firmware_load(struct device *dev) @@ -612,7 +580,6 @@ MODULE_DEVICE_TABLE(of, ipa_match); * */ static void ipa_validate_build(void) { -#ifdef IPA_VALIDATE /* At one time we assumed a 64-bit build, allowing some do_div() * calls to be replaced by simple division or modulo operations. * We currently only perform divide and modulo operations on u32, @@ -646,7 +613,6 @@ static void ipa_validate_build(void) BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY)); BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) > field_max(AGGR_GRANULARITY_FMASK)); -#endif /* IPA_VALIDATE */ } static bool ipa_version_valid(enum ipa_version version) @@ -681,7 +647,7 @@ static bool ipa_version_valid(enum ipa_version version) * in several stages: * - The "init" stage involves activities that can be initialized without * access to the IPA hardware. - * - The "config" stage requires the IPA clock to be active so IPA registers + * - The "config" stage requires IPA power to be active so IPA registers * can be accessed, but does not require the use of IPA immediate commands. * - The "setup" stage uses IPA immediate commands, and so requires the GSI * layer to be initialized. @@ -697,14 +663,14 @@ static int ipa_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct ipa_data *data; - struct ipa_clock *clock; + struct ipa_power *power; bool modem_init; struct ipa *ipa; int ret; ipa_validate_build(); - /* Get configuration data early; needed for clock initialization */ + /* Get configuration data early; needed for power initialization */ data = of_device_get_match_data(dev); if (!data) { dev_err(dev, "matched hardware not supported\n"); @@ -725,20 +691,20 @@ static int ipa_probe(struct platform_device *pdev) /* The clock and interconnects might not be ready when we're * probed, so might return -EPROBE_DEFER. */ - clock = ipa_clock_init(dev, data->clock_data); - if (IS_ERR(clock)) - return PTR_ERR(clock); + power = ipa_power_init(dev, data->power_data); + if (IS_ERR(power)) + return PTR_ERR(power); /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */ ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); if (!ipa) { ret = -ENOMEM; - goto err_clock_exit; + goto err_power_exit; } ipa->pdev = pdev; dev_set_drvdata(dev, ipa); - ipa->clock = clock; + ipa->power = power; ipa->version = data->version; init_completion(&ipa->completion); @@ -771,18 +737,23 @@ static int ipa_probe(struct platform_device *pdev) if (ret) goto err_table_exit; + /* Power needs to be active for config and setup */ + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0)) + goto err_power_put; + ret = ipa_config(ipa, data); if (ret) - goto err_modem_exit; + goto err_power_put; dev_info(dev, "IPA driver initialized"); /* If the modem is doing early initialization, it will trigger a - * call to ipa_setup() call when it has finished. In that case - * we're done here. + * call to ipa_setup() when it has finished. In that case we're + * done here. */ if (modem_init) - return 0; + goto done; /* Otherwise we need to load the firmware and have Trust Zone validate * and install it. If that succeeds we can proceed with setup. @@ -794,12 +765,16 @@ static int ipa_probe(struct platform_device *pdev) ret = ipa_setup(ipa); if (ret) goto err_deconfig; +done: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); return 0; err_deconfig: ipa_deconfig(ipa); -err_modem_exit: +err_power_put: + pm_runtime_put_noidle(dev); ipa_modem_exit(ipa); err_table_exit: ipa_table_exit(ipa); @@ -813,8 +788,8 @@ err_reg_exit: ipa_reg_exit(ipa); err_kfree_ipa: kfree(ipa); -err_clock_exit: - ipa_clock_exit(clock); +err_power_exit: + ipa_power_exit(power); return ret; } @@ -822,9 +797,14 @@ err_clock_exit: static int ipa_remove(struct platform_device *pdev) { struct ipa *ipa = dev_get_drvdata(&pdev->dev); - struct ipa_clock *clock = ipa->clock; + struct ipa_power *power = ipa->power; + struct device *dev = &pdev->dev; int ret; + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0)) + goto out_power_put; + if (ipa->setup_complete) { ret = ipa_modem_stop(ipa); /* If starting or stopping is in progress, try once more */ @@ -839,6 +819,8 @@ static int ipa_remove(struct platform_device *pdev) } ipa_deconfig(ipa); +out_power_put: + pm_runtime_put_noidle(dev); ipa_modem_exit(ipa); ipa_table_exit(ipa); ipa_endpoint_exit(ipa); @@ -846,7 +828,7 @@ static int ipa_remove(struct platform_device *pdev) ipa_mem_exit(ipa); ipa_reg_exit(ipa); kfree(ipa); - ipa_clock_exit(clock); + ipa_power_exit(power); return 0; } @@ -860,62 +842,6 @@ static void ipa_shutdown(struct platform_device *pdev) dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret); } -/** - * ipa_suspend() - Power management system suspend callback - * @dev: IPA device structure - * - * Return: Always returns zero - * - * Called by the PM framework when a system suspend operation is invoked. - * Suspends endpoints and releases the clock reference held to keep - * the IPA clock running until this point. - */ -static int ipa_suspend(struct device *dev) -{ - struct ipa *ipa = dev_get_drvdata(dev); - - /* When a suspended RX endpoint has a packet ready to receive, we - * get an IPA SUSPEND interrupt. We trigger a system resume in - * that case, but only on the first such interrupt since suspend. - */ - __clear_bit(IPA_FLAG_RESUMED, ipa->flags); - - ipa_endpoint_suspend(ipa); - - ipa_clock_put(ipa); - - return 0; -} - -/** - * ipa_resume() - Power management system resume callback - * @dev: IPA device structure - * - * Return: Always returns 0 - * - * Called by the PM framework when a system resume operation is invoked. - * Takes an IPA clock reference to keep the clock running until suspend, - * and resumes endpoints. - */ -static int ipa_resume(struct device *dev) -{ - struct ipa *ipa = dev_get_drvdata(dev); - - /* This clock reference will keep the IPA out of suspend - * until we get a power management suspend request. - */ - ipa_clock_get(ipa); - - ipa_endpoint_resume(ipa); - - return 0; -} - -static const struct dev_pm_ops ipa_pm_ops = { - .suspend = ipa_suspend, - .resume = ipa_resume, -}; - static const struct attribute_group *ipa_attribute_groups[] = { &ipa_attribute_group, &ipa_feature_attribute_group, diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index af9aedbde717..ad116bcc0580 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -9,6 +9,7 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_rmnet.h> +#include <linux/pm_runtime.h> #include <linux/remoteproc/qcom_rproc.h> #include "ipa.h" @@ -19,6 +20,8 @@ #include "ipa_modem.h" #include "ipa_smp2p.h" #include "ipa_qmi.h" +#include "ipa_uc.h" +#include "ipa_power.h" #define IPA_NETDEV_NAME "rmnet_ipa%d" #define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */ @@ -31,9 +34,14 @@ enum ipa_modem_state { IPA_MODEM_STATE_STOPPING, }; -/** struct ipa_priv - IPA network device private data */ +/** + * struct ipa_priv - IPA network device private data + * @ipa: IPA pointer + * @work: Work structure used to wake the modem netdev TX queue + */ struct ipa_priv { struct ipa *ipa; + struct work_struct work; }; /** ipa_open() - Opens the modem network interface */ @@ -41,21 +49,33 @@ static int ipa_open(struct net_device *netdev) { struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; + struct device *dev; int ret; + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto err_power_put; + ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); if (ret) - return ret; + goto err_power_put; + ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); if (ret) goto err_disable_tx; netif_start_queue(netdev); + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + return 0; err_disable_tx: ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); +err_power_put: + pm_runtime_put_noidle(dev); return ret; } @@ -65,11 +85,21 @@ static int ipa_stop(struct net_device *netdev) { struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; + struct device *dev; + int ret; + + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto out_power_put; netif_stop_queue(netdev); ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); return 0; } @@ -82,13 +112,15 @@ static int ipa_stop(struct net_device *netdev) * NETDEV_TX_OK: Success * NETDEV_TX_BUSY: Error while transmitting the skb. Try again later */ -static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t +ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct net_device_stats *stats = &netdev->stats; struct ipa_priv *priv = netdev_priv(netdev); struct ipa_endpoint *endpoint; struct ipa *ipa = priv->ipa; u32 skb_len = skb->len; + struct device *dev; int ret; if (!skb_len) @@ -98,7 +130,35 @@ static int ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP)) goto err_drop_skb; + /* The hardware must be powered for us to transmit */ + dev = &ipa->pdev->dev; + ret = pm_runtime_get(dev); + if (ret < 1) { + /* If a resume won't happen, just drop the packet */ + if (ret < 0 && ret != -EINPROGRESS) { + ipa_power_modem_queue_active(ipa); + pm_runtime_put_noidle(dev); + goto err_drop_skb; + } + + /* No power (yet). Stop the network stack from transmitting + * until we're resumed; ipa_modem_resume() arranges for the + * TX queue to be started again. + */ + ipa_power_modem_queue_stop(ipa); + + pm_runtime_put_noidle(dev); + + return NETDEV_TX_BUSY; + } + + ipa_power_modem_queue_active(ipa); + ret = ipa_endpoint_skb_tx(endpoint, skb); + + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + if (ret) { if (ret != -E2BIG) return NETDEV_TX_BUSY; @@ -169,12 +229,31 @@ void ipa_modem_suspend(struct net_device *netdev) struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; - netif_stop_queue(netdev); + if (!(netdev->flags & IFF_UP)) + return; ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); } +/** + * ipa_modem_wake_queue_work() - enable modem netdev queue + * @work: Work structure + * + * Re-enable transmit on the modem network device. This is called + * in (power management) work queue context, scheduled when resuming + * the modem. We can't enable the queue directly in ipa_modem_resume() + * because transmits restart the instant the queue is awakened; but the + * device power state won't be ACTIVE until *after* ipa_modem_resume() + * returns. + */ +static void ipa_modem_wake_queue_work(struct work_struct *work) +{ + struct ipa_priv *priv = container_of(work, struct ipa_priv, work); + + ipa_power_modem_queue_wake(priv->ipa); +} + /** ipa_modem_resume() - resume callback for runtime_pm * @dev: pointer to device * @@ -185,10 +264,14 @@ void ipa_modem_resume(struct net_device *netdev) struct ipa_priv *priv = netdev_priv(netdev); struct ipa *ipa = priv->ipa; + if (!(netdev->flags & IFF_UP)) + return; + ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]); ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]); - netif_wake_queue(netdev); + /* Arrange for the TX queue to be restarted */ + (void)queue_pm_work(&priv->work); } int ipa_modem_start(struct ipa *ipa) @@ -216,13 +299,16 @@ int ipa_modem_start(struct ipa *ipa) SET_NETDEV_DEV(netdev, &ipa->pdev->dev); priv = netdev_priv(netdev); priv->ipa = ipa; + INIT_WORK(&priv->work, ipa_modem_wake_queue_work); + ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev; + ipa->modem_netdev = netdev; ret = register_netdev(netdev); - if (!ret) { - ipa->modem_netdev = netdev; - ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev; - ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev; - } else { + if (ret) { + ipa->modem_netdev = NULL; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL; + ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL; free_netdev(netdev); } @@ -256,13 +342,18 @@ int ipa_modem_stop(struct ipa *ipa) /* Prevent the modem from triggering a call to ipa_setup() */ ipa_smp2p_disable(ipa); - /* Stop the queue and disable the endpoints if it's open */ + /* Clean up the netdev and endpoints if it was started */ if (netdev) { - (void)ipa_stop(netdev); + struct ipa_priv *priv = netdev_priv(netdev); + + cancel_work_sync(&priv->work); + /* If it was opened, stop it first */ + if (netdev->flags & IFF_UP) + (void)ipa_stop(netdev); + unregister_netdev(netdev); + ipa->modem_netdev = NULL; ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL; ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL; - ipa->modem_netdev = NULL; - unregister_netdev(netdev); free_netdev(netdev); } @@ -278,6 +369,12 @@ static void ipa_modem_crashed(struct ipa *ipa) struct device *dev = &ipa->pdev->dev; int ret; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "error %d getting power to handle crash\n", ret); + goto out_power_put; + } + ipa_endpoint_modem_pause_all(ipa, true); ipa_endpoint_modem_hol_block_clear_all(ipa); @@ -302,6 +399,10 @@ static void ipa_modem_crashed(struct ipa *ipa) ret = ipa_mem_zero_modem(ipa); if (ret) dev_err(dev, "error %d zeroing modem memory regions\n", ret); + +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); } static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, @@ -314,6 +415,7 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, switch (action) { case QCOM_SSR_BEFORE_POWERUP: dev_info(dev, "received modem starting event\n"); + ipa_uc_power(ipa); ipa_smp2p_notify_reset(ipa); break; @@ -377,13 +479,3 @@ void ipa_modem_deconfig(struct ipa *ipa) ipa->notifier = NULL; memset(&ipa->nb, 0, sizeof(ipa->nb)); } - -int ipa_modem_setup(struct ipa *ipa) -{ - return ipa_qmi_setup(ipa); -} - -void ipa_modem_teardown(struct ipa *ipa) -{ - ipa_qmi_teardown(ipa); -} diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h index 2de3e216d1d4..5e6e3d234454 100644 --- a/drivers/net/ipa/ipa_modem.h +++ b/drivers/net/ipa/ipa_modem.h @@ -7,7 +7,6 @@ #define _IPA_MODEM_H_ struct ipa; -struct ipa_endpoint; struct net_device; struct sk_buff; @@ -25,7 +24,4 @@ void ipa_modem_exit(struct ipa *ipa); int ipa_modem_config(struct ipa *ipa); void ipa_modem_deconfig(struct ipa *ipa); -int ipa_modem_setup(struct ipa *ipa); -void ipa_modem_teardown(struct ipa *ipa); - #endif /* _IPA_MODEM_H_ */ diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c new file mode 100644 index 000000000000..b1c6c0fcb654 --- /dev/null +++ b/drivers/net/ipa/ipa_power.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2018-2021 Linaro Ltd. + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/interconnect.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/bitops.h> + +#include "ipa.h" +#include "ipa_power.h" +#include "ipa_endpoint.h" +#include "ipa_modem.h" +#include "ipa_data.h" + +/** + * DOC: IPA Power Management + * + * The IPA hardware is enabled when the IPA core clock and all the + * interconnects (buses) it depends on are enabled. Runtime power + * management is used to determine whether the core clock and + * interconnects are enabled, and if not in use to be suspended + * automatically. + * + * The core clock currently runs at a fixed clock rate when enabled, + * an all interconnects use a fixed average and peak bandwidth. + */ + +#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ + +/** + * struct ipa_interconnect - IPA interconnect information + * @path: Interconnect path + * @average_bandwidth: Average interconnect bandwidth (KB/second) + * @peak_bandwidth: Peak interconnect bandwidth (KB/second) + */ +struct ipa_interconnect { + struct icc_path *path; + u32 average_bandwidth; + u32 peak_bandwidth; +}; + +/** + * enum ipa_power_flag - IPA power flags + * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled + * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended + * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit() + * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume() + * @IPA_POWER_FLAG_COUNT: Number of defined power flags + */ +enum ipa_power_flag { + IPA_POWER_FLAG_RESUMED, + IPA_POWER_FLAG_SYSTEM, + IPA_POWER_FLAG_STOPPED, + IPA_POWER_FLAG_STARTED, + IPA_POWER_FLAG_COUNT, /* Last; not a flag */ +}; + +/** + * struct ipa_power - IPA power management information + * @dev: IPA device pointer + * @core: IPA core clock + * @spinlock: Protects modem TX queue enable/disable + * @flags: Boolean state flags + * @interconnect_count: Number of elements in interconnect[] + * @interconnect: Interconnect array + */ +struct ipa_power { + struct device *dev; + struct clk *core; + spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ + DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); + u32 interconnect_count; + struct ipa_interconnect *interconnect; +}; + +static int ipa_interconnect_init_one(struct device *dev, + struct ipa_interconnect *interconnect, + const struct ipa_interconnect_data *data) +{ + struct icc_path *path; + + path = of_icc_get(dev, data->name); + if (IS_ERR(path)) { + int ret = PTR_ERR(path); + + dev_err_probe(dev, ret, "error getting %s interconnect\n", + data->name); + + return ret; + } + + interconnect->path = path; + interconnect->average_bandwidth = data->average_bandwidth; + interconnect->peak_bandwidth = data->peak_bandwidth; + + return 0; +} + +static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) +{ + icc_put(interconnect->path); + memset(interconnect, 0, sizeof(*interconnect)); +} + +/* Initialize interconnects required for IPA operation */ +static int ipa_interconnect_init(struct ipa_power *power, struct device *dev, + const struct ipa_interconnect_data *data) +{ + struct ipa_interconnect *interconnect; + u32 count; + int ret; + + count = power->interconnect_count; + interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); + if (!interconnect) + return -ENOMEM; + power->interconnect = interconnect; + + while (count--) { + ret = ipa_interconnect_init_one(dev, interconnect, data++); + if (ret) + goto out_unwind; + interconnect++; + } + + return 0; + +out_unwind: + while (interconnect-- > power->interconnect) + ipa_interconnect_exit_one(interconnect); + kfree(power->interconnect); + power->interconnect = NULL; + + return ret; +} + +/* Inverse of ipa_interconnect_init() */ +static void ipa_interconnect_exit(struct ipa_power *power) +{ + struct ipa_interconnect *interconnect; + + interconnect = power->interconnect + power->interconnect_count; + while (interconnect-- > power->interconnect) + ipa_interconnect_exit_one(interconnect); + kfree(power->interconnect); + power->interconnect = NULL; +} + +/* Currently we only use one bandwidth level, so just "enable" interconnects */ +static int ipa_interconnect_enable(struct ipa *ipa) +{ + struct ipa_interconnect *interconnect; + struct ipa_power *power = ipa->power; + int ret; + u32 i; + + interconnect = power->interconnect; + for (i = 0; i < power->interconnect_count; i++) { + ret = icc_set_bw(interconnect->path, + interconnect->average_bandwidth, + interconnect->peak_bandwidth); + if (ret) { + dev_err(&ipa->pdev->dev, + "error %d enabling %s interconnect\n", + ret, icc_get_name(interconnect->path)); + goto out_unwind; + } + interconnect++; + } + + return 0; + +out_unwind: + while (interconnect-- > power->interconnect) + (void)icc_set_bw(interconnect->path, 0, 0); + + return ret; +} + +/* To disable an interconnect, we just its bandwidth to 0 */ +static int ipa_interconnect_disable(struct ipa *ipa) +{ + struct ipa_interconnect *interconnect; + struct ipa_power *power = ipa->power; + struct device *dev = &ipa->pdev->dev; + int result = 0; + u32 count; + int ret; + + count = power->interconnect_count; + interconnect = power->interconnect + count; + while (count--) { + interconnect--; + ret = icc_set_bw(interconnect->path, 0, 0); + if (ret) { + dev_err(dev, "error %d disabling %s interconnect\n", + ret, icc_get_name(interconnect->path)); + /* Try to disable all; record only the first error */ + if (!result) + result = ret; + } + } + + return result; +} + +/* Enable IPA power, enabling interconnects and the core clock */ +static int ipa_power_enable(struct ipa *ipa) +{ + int ret; + + ret = ipa_interconnect_enable(ipa); + if (ret) + return ret; + + ret = clk_prepare_enable(ipa->power->core); + if (ret) { + dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret); + (void)ipa_interconnect_disable(ipa); + } + + return ret; +} + +/* Inverse of ipa_power_enable() */ +static int ipa_power_disable(struct ipa *ipa) +{ + clk_disable_unprepare(ipa->power->core); + + return ipa_interconnect_disable(ipa); +} + +static int ipa_runtime_suspend(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + + /* Endpoints aren't usable until setup is complete */ + if (ipa->setup_complete) { + __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags); + ipa_endpoint_suspend(ipa); + gsi_suspend(&ipa->gsi); + } + + return ipa_power_disable(ipa); +} + +static int ipa_runtime_resume(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + int ret; + + ret = ipa_power_enable(ipa); + if (WARN_ON(ret < 0)) + return ret; + + /* Endpoints aren't usable until setup is complete */ + if (ipa->setup_complete) { + gsi_resume(&ipa->gsi); + ipa_endpoint_resume(ipa); + } + + return 0; +} + +static int ipa_suspend(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + + __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); + + return pm_runtime_force_suspend(dev); +} + +static int ipa_resume(struct device *dev) +{ + struct ipa *ipa = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_force_resume(dev); + + __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); + + return ret; +} + +/* Return the current IPA core clock rate */ +u32 ipa_core_clock_rate(struct ipa *ipa) +{ + return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; +} + +/** + * ipa_suspend_handler() - Handle the suspend IPA interrupt + * @ipa: IPA pointer + * @irq_id: IPA interrupt type (unused) + * + * If an RX endpoint is suspended, and the IPA has a packet destined for + * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP + * that it should resume the endpoint. If we get one of these interrupts + * we just wake up the system. + */ +static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) +{ + /* To handle an IPA interrupt we will have resumed the hardware + * just to handle the interrupt, so we're done. If we are in a + * system suspend, trigger a system resume. + */ + if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags)) + if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags)) + pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); + + /* Acknowledge/clear the suspend interrupt on all endpoints */ + ipa_interrupt_suspend_clear_all(ipa->interrupt); +} + +/* The next few functions coordinate stopping and starting the modem + * network device transmit queue. + * + * Transmit can be running concurrent with power resume, and there's a + * chance the resume completes before the transmit path stops the queue, + * leaving the queue in a stopped state. The next two functions are used + * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit() + * to conditionally stop the TX queue; and ipa_power_modem_queue_start() + * is used by ipa_runtime_resume() to conditionally restart it. + * + * Two flags and a spinlock are used. If the queue is stopped, the STOPPED + * power flag is set. And if the queue is started, the STARTED flag is set. + * The queue is only started on resume if the STOPPED flag is set. And the + * queue is only started in ipa_start_xmit() if the STARTED flag is *not* + * set. As a result, the queue remains operational if the two activites + * happen concurrently regardless of the order they complete. The spinlock + * ensures the flag and TX queue operations are done atomically. + * + * The first function stops the modem netdev transmit queue, but only if + * the STARTED flag is *not* set. That flag is cleared if it was set. + * If the queue is stopped, the STOPPED flag is set. This is called only + * from the power ->runtime_resume operation. + */ +void ipa_power_modem_queue_stop(struct ipa *ipa) +{ + struct ipa_power *power = ipa->power; + unsigned long flags; + + spin_lock_irqsave(&power->spinlock, flags); + + if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) { + netif_stop_queue(ipa->modem_netdev); + __set_bit(IPA_POWER_FLAG_STOPPED, power->flags); + } + + spin_unlock_irqrestore(&power->spinlock, flags); +} + +/* This function starts the modem netdev transmit queue, but only if the + * STOPPED flag is set. That flag is cleared if it was set. If the queue + * was restarted, the STARTED flag is set; this allows ipa_start_xmit() + * to skip stopping the queue in the event of a race. + */ +void ipa_power_modem_queue_wake(struct ipa *ipa) +{ + struct ipa_power *power = ipa->power; + unsigned long flags; + + spin_lock_irqsave(&power->spinlock, flags); + + if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) { + __set_bit(IPA_POWER_FLAG_STARTED, power->flags); + netif_wake_queue(ipa->modem_netdev); + } + + spin_unlock_irqrestore(&power->spinlock, flags); +} + +/* This function clears the STARTED flag once the TX queue is operating */ +void ipa_power_modem_queue_active(struct ipa *ipa) +{ + clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); +} + +int ipa_power_setup(struct ipa *ipa) +{ + int ret; + + ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, + ipa_suspend_handler); + + ret = device_init_wakeup(&ipa->pdev->dev, true); + if (ret) + ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); + + return ret; +} + +void ipa_power_teardown(struct ipa *ipa) +{ + (void)device_init_wakeup(&ipa->pdev->dev, false); + ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); +} + +/* Initialize IPA power management */ +struct ipa_power * +ipa_power_init(struct device *dev, const struct ipa_power_data *data) +{ + struct ipa_power *power; + struct clk *clk; + int ret; + + clk = clk_get(dev, "core"); + if (IS_ERR(clk)) { + dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); + + return ERR_CAST(clk); + } + + ret = clk_set_rate(clk, data->core_clock_rate); + if (ret) { + dev_err(dev, "error %d setting core clock rate to %u\n", + ret, data->core_clock_rate); + goto err_clk_put; + } + + power = kzalloc(sizeof(*power), GFP_KERNEL); + if (!power) { + ret = -ENOMEM; + goto err_clk_put; + } + power->dev = dev; + power->core = clk; + spin_lock_init(&power->spinlock); + power->interconnect_count = data->interconnect_count; + + ret = ipa_interconnect_init(power, dev, data->interconnect_data); + if (ret) + goto err_kfree; + + pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + + return power; + +err_kfree: + kfree(power); +err_clk_put: + clk_put(clk); + + return ERR_PTR(ret); +} + +/* Inverse of ipa_power_init() */ +void ipa_power_exit(struct ipa_power *power) +{ + struct device *dev = power->dev; + struct clk *clk = power->core; + + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + ipa_interconnect_exit(power); + kfree(power); + clk_put(clk); +} + +const struct dev_pm_ops ipa_pm_ops = { + .suspend = ipa_suspend, + .resume = ipa_resume, + .runtime_suspend = ipa_runtime_suspend, + .runtime_resume = ipa_runtime_resume, +}; diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h new file mode 100644 index 000000000000..2151805d7fbb --- /dev/null +++ b/drivers/net/ipa/ipa_power.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2018-2020 Linaro Ltd. + */ +#ifndef _IPA_POWER_H_ +#define _IPA_POWER_H_ + +struct device; + +struct ipa; +struct ipa_power_data; + +/* IPA device power management function block */ +extern const struct dev_pm_ops ipa_pm_ops; + +/** + * ipa_core_clock_rate() - Return the current IPA core clock rate + * @ipa: IPA structure + * + * Return: The current clock rate (in Hz), or 0. + */ +u32 ipa_core_clock_rate(struct ipa *ipa); + +/** + * ipa_power_modem_queue_stop() - Possibly stop the modem netdev TX queue + * @ipa: IPA pointer + */ +void ipa_power_modem_queue_stop(struct ipa *ipa); + +/** + * ipa_power_modem_queue_wake() - Possibly wake the modem netdev TX queue + * @ipa: IPA pointer + */ +void ipa_power_modem_queue_wake(struct ipa *ipa); + +/** + * ipa_power_modem_queue_active() - Report modem netdev TX queue active + * @ipa: IPA pointer + */ +void ipa_power_modem_queue_active(struct ipa *ipa); + +/** + * ipa_power_setup() - Set up IPA power management + * @ipa: IPA pointer + * + * Return: 0 if successful, or a negative error code + */ +int ipa_power_setup(struct ipa *ipa); + +/** + * ipa_power_teardown() - Inverse of ipa_power_setup() + * @ipa: IPA pointer + */ +void ipa_power_teardown(struct ipa *ipa); + +/** + * ipa_power_init() - Initialize IPA power management + * @dev: IPA device + * @data: Clock configuration data + * + * Return: A pointer to an ipa_power structure, or a pointer-coded error + */ +struct ipa_power *ipa_power_init(struct device *dev, + const struct ipa_power_data *data); + +/** + * ipa_power_exit() - Inverse of ipa_power_init() + * @power: IPA power pointer + */ +void ipa_power_exit(struct ipa_power *power); + +#endif /* _IPA_POWER_H_ */ diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c index 4661105ce7ab..90f3aec55b36 100644 --- a/drivers/net/ipa/ipa_qmi.c +++ b/drivers/net/ipa/ipa_qmi.c @@ -467,10 +467,7 @@ static const struct qmi_ops ipa_client_ops = { .new_server = ipa_client_new_server, }; -/* This is called by ipa_setup(). We can be informed via remoteproc that - * the modem has shut down, in which case this function will be called - * again to prepare for it coming back up again. - */ +/* Set up for QMI message exchange */ int ipa_qmi_setup(struct ipa *ipa) { struct ipa_qmi *ipa_qmi = &ipa->qmi; @@ -526,6 +523,7 @@ err_server_handle_release: return ret; } +/* Tear down IPA QMI handles */ void ipa_qmi_teardown(struct ipa *ipa) { cancel_work_sync(&ipa->qmi.init_driver_work); diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h index b6f2055d35a6..856ef629ccc8 100644 --- a/drivers/net/ipa/ipa_qmi.h +++ b/drivers/net/ipa/ipa_qmi.h @@ -39,7 +39,26 @@ struct ipa_qmi { bool indication_sent; }; +/** + * ipa_qmi_setup() - Set up for QMI message exchange + * @ipa: IPA pointer + * + * This is called at the end of ipa_setup(), to prepare for the exchange + * of QMI messages that perform a "handshake" between the AP and modem. + * When the modem QMI server announces its presence, an AP request message + * supplies operating parameters to be used to the modem, and the modem + * acknowledges receipt of those parameters. The modem will not touch the + * IPA hardware until this handshake is complete. + * + * If the modem crashes (or shuts down) a new handshake begins when the + * modem's QMI server is started again. + */ int ipa_qmi_setup(struct ipa *ipa); + +/** + * ipa_qmi_teardown() - Tear down IPA QMI handles + * @ipa: IPA pointer + */ void ipa_qmi_teardown(struct ipa *ipa); #endif /* !_IPA_QMI_H_ */ diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index b89dec5865a5..a5b355384d4a 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -99,7 +99,7 @@ struct ipa; static inline u32 arbitration_lock_disable_encoded(enum ipa_version version, u32 mask) { - /* assert(version >= IPA_VERSION_4_0); */ + WARN_ON(version < IPA_VERSION_4_0); if (version < IPA_VERSION_4_9) return u32_encode_bits(mask, GENMASK(20, 17)); @@ -116,7 +116,7 @@ static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version, { u32 val = enable ? 1 : 0; - /* assert(version >= IPA_VERSION_4_5); */ + WARN_ON(version < IPA_VERSION_4_5); if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7) return u32_encode_bits(val, GENMASK(21, 21)); @@ -409,7 +409,7 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version, val = u32_encode_bits(size, HDR_LEN_FMASK); if (version < IPA_VERSION_4_5) { - /* ipa_assert(header_size == size); */ + WARN_ON(header_size != size); return val; } @@ -429,7 +429,7 @@ static inline u32 ipa_metadata_offset_encoded(enum ipa_version version, val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK); if (version < IPA_VERSION_4_5) { - /* ipa_assert(offset == off); */ + WARN_ON(offset != off); return val; } @@ -812,7 +812,7 @@ ipa_reg_irq_suspend_info_offset(enum ipa_version version) static inline u32 ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee) { - /* assert(version != IPA_VERSION_3_0); */ + WARN_ON(version == IPA_VERSION_3_0); if (version < IPA_VERSION_4_9) return 0x00003034 + 0x1000 * ee; @@ -830,7 +830,7 @@ ipa_reg_irq_suspend_en_offset(enum ipa_version version) static inline u32 ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee) { - /* assert(version != IPA_VERSION_3_0); */ + WARN_ON(version == IPA_VERSION_3_0); if (version < IPA_VERSION_4_9) return 0x00003038 + 0x1000 * ee; diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c index 3b2dc216d3a6..e3da95d69409 100644 --- a/drivers/net/ipa/ipa_resource.c +++ b/drivers/net/ipa/ipa_resource.c @@ -29,7 +29,6 @@ static bool ipa_resource_limits_valid(struct ipa *ipa, const struct ipa_resource_data *data) { -#ifdef IPA_VALIDATION u32 group_count; u32 i; u32 j; @@ -65,7 +64,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa, if (resource->limits[j].min || resource->limits[j].max) return false; } -#endif /* !IPA_VALIDATION */ + return true; } diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c index 93270e50b6b3..df7639c39d71 100644 --- a/drivers/net/ipa/ipa_smp2p.c +++ b/drivers/net/ipa/ipa_smp2p.c @@ -9,13 +9,13 @@ #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/panic_notifier.h> +#include <linux/pm_runtime.h> #include <linux/soc/qcom/smem.h> #include <linux/soc/qcom/smem_state.h> #include "ipa_smp2p.h" #include "ipa.h" #include "ipa_uc.h" -#include "ipa_clock.h" /** * DOC: IPA SMP2P communication with the modem @@ -23,19 +23,19 @@ * SMP2P is a primitive communication mechanism available between the AP and * the modem. The IPA driver uses this for two purposes: to enable the modem * to state that the GSI hardware is ready to use; and to communicate the - * state of the IPA clock in the event of a crash. + * state of IPA power in the event of a crash. * * GSI needs to have early initialization completed before it can be used. * This initialization is done either by Trust Zone or by the modem. In the * latter case, the modem uses an SMP2P interrupt to tell the AP IPA driver * when the GSI is ready to use. * - * The modem is also able to inquire about the current state of the IPA - * clock by trigging another SMP2P interrupt to the AP. We communicate - * whether the clock is enabled using two SMP2P state bits--one to - * indicate the clock state (on or off), and a second to indicate the - * clock state bit is valid. The modem will poll the valid bit until it - * is set, and at that time records whether the AP has the IPA clock enabled. + * The modem is also able to inquire about the current state of IPA + * power by trigging another SMP2P interrupt to the AP. We communicate + * whether power is enabled using two SMP2P state bits--one to indicate + * the power state (on or off), and a second to indicate the power state + * bit is valid. The modem will poll the valid bit until it is set, and + * at that time records whether the AP has IPA power enabled. * * Finally, if the AP kernel panics, we update the SMP2P state bits even if * we never receive an interrupt from the modem requesting this. @@ -45,14 +45,14 @@ * struct ipa_smp2p - IPA SMP2P information * @ipa: IPA pointer * @valid_state: SMEM state indicating enabled state is valid - * @enabled_state: SMEM state to indicate clock is enabled + * @enabled_state: SMEM state to indicate power is enabled * @valid_bit: Valid bit in 32-bit SMEM state mask * @enabled_bit: Enabled bit in 32-bit SMEM state mask * @enabled_bit: Enabled bit in 32-bit SMEM state mask - * @clock_query_irq: IPA interrupt triggered by modem for clock query + * @clock_query_irq: IPA interrupt triggered by modem for power query * @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready - * @clock_on: Whether IPA clock is on - * @notified: Whether modem has been notified of clock state + * @power_on: Whether IPA power is on + * @notified: Whether modem has been notified of power state * @disabled: Whether setup ready interrupt handling is disabled * @mutex: Mutex protecting ready-interrupt/shutdown interlock * @panic_notifier: Panic notifier structure @@ -65,7 +65,7 @@ struct ipa_smp2p { u32 enabled_bit; u32 clock_query_irq; u32 setup_ready_irq; - bool clock_on; + bool power_on; bool notified; bool disabled; struct mutex mutex; @@ -73,28 +73,30 @@ struct ipa_smp2p { }; /** - * ipa_smp2p_notify() - use SMP2P to tell modem about IPA clock state + * ipa_smp2p_notify() - use SMP2P to tell modem about IPA power state * @smp2p: SMP2P information * * This is called either when the modem has requested it (by triggering - * the modem clock query IPA interrupt) or whenever the AP is shutting down + * the modem power query IPA interrupt) or whenever the AP is shutting down * (via a panic notifier). It sets the two SMP2P state bits--one saying - * whether the IPA clock is running, and the other indicating the first bit + * whether the IPA power is on, and the other indicating the first bit * is valid. */ static void ipa_smp2p_notify(struct ipa_smp2p *smp2p) { + struct device *dev; u32 value; u32 mask; if (smp2p->notified) return; - smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa); + dev = &smp2p->ipa->pdev->dev; + smp2p->power_on = pm_runtime_get_if_active(dev, true) > 0; - /* Signal whether the clock is enabled */ + /* Signal whether the IPA power is enabled */ mask = BIT(smp2p->enabled_bit); - value = smp2p->clock_on ? mask : 0; + value = smp2p->power_on ? mask : 0; qcom_smem_state_update_bits(smp2p->enabled_state, mask, value); /* Now indicate that the enabled flag is valid */ @@ -124,7 +126,7 @@ static int ipa_smp2p_panic_notifier(struct notifier_block *nb, ipa_smp2p_notify(smp2p); - if (smp2p->clock_on) + if (smp2p->power_on) ipa_uc_panic_notifier(smp2p->ipa); return NOTIFY_DONE; @@ -150,19 +152,31 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p) static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id) { struct ipa_smp2p *smp2p = dev_id; + struct device *dev; + int ret; mutex_lock(&smp2p->mutex); - if (!smp2p->disabled) { - int ret; + if (smp2p->disabled) + goto out_mutex_unlock; + smp2p->disabled = true; /* If any others arrive, ignore them */ - ret = ipa_setup(smp2p->ipa); - if (ret) - dev_err(&smp2p->ipa->pdev->dev, - "error %d from ipa_setup()\n", ret); - smp2p->disabled = true; + /* Power needs to be active for setup */ + dev = &smp2p->ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "error %d getting power for setup\n", ret); + goto out_power_put; } + /* An error here won't cause driver shutdown, so warn if one occurs */ + ret = ipa_setup(smp2p->ipa); + WARN(ret != 0, "error %d from ipa_setup()\n", ret); + +out_power_put: + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); +out_mutex_unlock: mutex_unlock(&smp2p->mutex); return IRQ_HANDLED; @@ -195,14 +209,17 @@ static void ipa_smp2p_irq_exit(struct ipa_smp2p *smp2p, u32 irq) free_irq(irq, smp2p); } -/* Drop the clock reference if it was taken in ipa_smp2p_notify() */ -static void ipa_smp2p_clock_release(struct ipa *ipa) +/* Drop the power reference if it was taken in ipa_smp2p_notify() */ +static void ipa_smp2p_power_release(struct ipa *ipa) { - if (!ipa->smp2p->clock_on) + struct device *dev = &ipa->pdev->dev; + + if (!ipa->smp2p->power_on) return; - ipa_clock_put(ipa); - ipa->smp2p->clock_on = false; + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + ipa->smp2p->power_on = false; } /* Initialize the IPA SMP2P subsystem */ @@ -236,7 +253,7 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init) smp2p->ipa = ipa; - /* These fields are needed by the clock query interrupt + /* These fields are needed by the power query interrupt * handler, so initialize them now. */ mutex_init(&smp2p->mutex); @@ -289,8 +306,8 @@ void ipa_smp2p_exit(struct ipa *ipa) ipa_smp2p_irq_exit(smp2p, smp2p->setup_ready_irq); ipa_smp2p_panic_notifier_unregister(smp2p); ipa_smp2p_irq_exit(smp2p, smp2p->clock_query_irq); - /* We won't get notified any more; drop clock reference (if any) */ - ipa_smp2p_clock_release(ipa); + /* We won't get notified any more; drop power reference (if any) */ + ipa_smp2p_power_release(ipa); ipa->smp2p = NULL; mutex_destroy(&smp2p->mutex); kfree(smp2p); @@ -319,13 +336,13 @@ void ipa_smp2p_notify_reset(struct ipa *ipa) if (!smp2p->notified) return; - ipa_smp2p_clock_release(ipa); + ipa_smp2p_power_release(ipa); - /* Reset the clock enabled valid flag */ + /* Reset the power enabled valid flag */ mask = BIT(smp2p->valid_bit); qcom_smem_state_update_bits(smp2p->valid_state, mask, 0); - /* Mark the clock disabled for good measure... */ + /* Mark the power disabled for good measure... */ mask = BIT(smp2p->enabled_bit); qcom_smem_state_update_bits(smp2p->enabled_state, mask, 0); diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 20319438a841..99a956789638 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -39,7 +39,7 @@ void ipa_smp2p_disable(struct ipa *ipa); * ipa_smp2p_notify_reset() - Reset modem notification state * @ipa: IPA pointer * - * If the modem crashes it queries the IPA clock state. In cleaning + * If the modem crashes it queries the IPA power state. In cleaning * up after such a crash this is used to reset some state maintained * for managing this notification. */ diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index c617a9156f26..2324e1b93e37 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -120,8 +120,6 @@ */ #define IPA_ZERO_RULE_SIZE (2 * sizeof(__le32)) -#ifdef IPA_VALIDATE - /* Check things that can be validated at build time. */ static void ipa_table_validate_build(void) { @@ -161,7 +159,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route) else size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64); - if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed)) + if (!ipa_cmd_table_valid(ipa, mem, route)) return false; /* mem->size >= size is sufficient, but we'll demand more */ @@ -169,7 +167,7 @@ ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route) return true; /* Hashed table regions can be zero size if hashing is not supported */ - if (hashed && !mem->size) + if (ipa_table_hash_support(ipa) && !mem->size) return true; dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n", @@ -183,14 +181,22 @@ bool ipa_table_valid(struct ipa *ipa) { bool valid; - valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false); - valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true); - valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true); - valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true); + valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true); + + if (!ipa_table_hash_support(ipa)) + return valid; + + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED, + false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED, + false); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED, + true); + valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED, + true); return valid; } @@ -217,14 +223,6 @@ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map) return true; } -#else /* !IPA_VALIDATE */ -static void ipa_table_validate_build(void) - -{ -} - -#endif /* !IPA_VALIDATE */ - /* Zero entry count means no table, so just return a 0 address */ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) { @@ -233,7 +231,7 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) if (!count) return 0; -/* assert(count <= max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); */ + WARN_ON(count > max_t(u32, IPA_FILTER_COUNT_MAX, IPA_ROUTE_COUNT_MAX)); /* Skip over the zero rule and possibly the filter mask */ skip = filter_mask ? 1 : 2; diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 1e2be9fce2f8..b6a9a0d79d68 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -16,8 +16,6 @@ struct ipa; /* The maximum number of route table entries (IPv4, IPv6; hashed or not) */ #define IPA_ROUTE_COUNT_MAX 15 -#ifdef IPA_VALIDATE - /** * ipa_table_valid() - Validate route and filter table memory regions * @ipa: IPA pointer @@ -35,20 +33,6 @@ bool ipa_table_valid(struct ipa *ipa); */ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask); -#else /* !IPA_VALIDATE */ - -static inline bool ipa_table_valid(struct ipa *ipa) -{ - return true; -} - -static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask) -{ - return true; -} - -#endif /* !IPA_VALIDATE */ - /** * ipa_table_hash_support() - Return true if hashed tables are supported * @ipa: IPA pointer diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index fd9219863234..856e55a080a7 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -7,9 +7,9 @@ #include <linux/types.h> #include <linux/io.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include "ipa.h" -#include "ipa_clock.h" #include "ipa_uc.h" /** @@ -131,7 +131,7 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) if (shared->event == IPA_UC_EVENT_ERROR) dev_err(dev, "microcontroller error event\n"); else if (shared->event != IPA_UC_EVENT_LOG_INFO) - dev_err(dev, "unsupported microcontroller event %hhu\n", + dev_err(dev, "unsupported microcontroller event %u\n", shared->event); /* The LOG_INFO event can be safely ignored */ } @@ -140,53 +140,77 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id) static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id) { struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa); + struct device *dev = &ipa->pdev->dev; /* An INIT_COMPLETED response message is sent to the AP by the * microcontroller when it is operational. Other than this, the AP * should only receive responses from the microcontroller when it has * sent it a request message. * - * We can drop the clock reference taken in ipa_uc_setup() once we + * We can drop the power reference taken in ipa_uc_power() once we * know the microcontroller has finished its initialization. */ switch (shared->response) { case IPA_UC_RESPONSE_INIT_COMPLETED: - ipa->uc_loaded = true; - ipa_clock_put(ipa); + if (ipa->uc_powered) { + ipa->uc_loaded = true; + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); + ipa->uc_powered = false; + } else { + dev_warn(dev, "unexpected init_completed response\n"); + } break; default: - dev_warn(&ipa->pdev->dev, - "unsupported microcontroller response %hhu\n", + dev_warn(dev, "unsupported microcontroller response %u\n", shared->response); break; } } -/* ipa_uc_setup() - Set up the microcontroller */ -void ipa_uc_setup(struct ipa *ipa) +/* Configure the IPA microcontroller subsystem */ +void ipa_uc_config(struct ipa *ipa) { - /* The microcontroller needs the IPA clock running until it has - * completed its initialization. It signals this by sending an - * INIT_COMPLETED response message to the AP. This could occur after - * we have finished doing the rest of the IPA initialization, so we - * need to take an extra "proxy" reference, and hold it until we've - * received that signal. (This reference is dropped in - * ipa_uc_response_hdlr(), above.) - */ - ipa_clock_get(ipa); - + ipa->uc_powered = false; ipa->uc_loaded = false; ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler); ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr); } -/* Inverse of ipa_uc_setup() */ -void ipa_uc_teardown(struct ipa *ipa) +/* Inverse of ipa_uc_config() */ +void ipa_uc_deconfig(struct ipa *ipa) { + struct device *dev = &ipa->pdev->dev; + ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0); - if (!ipa->uc_loaded) - ipa_clock_put(ipa); + if (!ipa->uc_powered) + return; + + pm_runtime_mark_last_busy(dev); + (void)pm_runtime_put_autosuspend(dev); +} + +/* Take a proxy power reference for the microcontroller */ +void ipa_uc_power(struct ipa *ipa) +{ + static bool already; + struct device *dev; + int ret; + + if (already) + return; + already = true; /* Only do this on first boot */ + + /* This power reference dropped in ipa_uc_response_hdlr() above */ + dev = &ipa->pdev->dev; + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + dev_err(dev, "error %d getting proxy power\n", ret); + } else { + ipa->uc_powered = true; + } } /* Send a command to the microcontroller */ diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h index e8510899a3f0..23847f934d64 100644 --- a/drivers/net/ipa/ipa_uc.h +++ b/drivers/net/ipa/ipa_uc.h @@ -9,16 +9,30 @@ struct ipa; /** - * ipa_uc_setup() - set up the IPA microcontroller subsystem + * ipa_uc_config() - Configure the IPA microcontroller subsystem * @ipa: IPA pointer */ -void ipa_uc_setup(struct ipa *ipa); +void ipa_uc_config(struct ipa *ipa); /** - * ipa_uc_teardown() - inverse of ipa_uc_setup() + * ipa_uc_deconfig() - Inverse of ipa_uc_config() * @ipa: IPA pointer */ -void ipa_uc_teardown(struct ipa *ipa); +void ipa_uc_deconfig(struct ipa *ipa); + +/** + * ipa_uc_power() - Take a proxy power reference for the microcontroller + * @ipa: IPA pointer + * + * The first time the modem boots, it loads firmware for and starts the + * IPA-resident microcontroller. The microcontroller signals that it + * has completed its initialization by sending an INIT_COMPLETED response + * message to the AP. The AP must ensure the IPA is powered until + * it receives this message, and to do so we take a "proxy" clock + * reference on its behalf here. Once we receive the INIT_COMPLETED + * message (in ipa_uc_response_hdlr()) we drop this power reference. + */ +void ipa_uc_power(struct ipa *ipa); /** * ipa_uc_panic_notifier() |