diff options
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r-- | drivers/net/ipa/gsi_trans.c | 11 | ||||
-rw-r--r-- | drivers/net/ipa/gsi_trans.h | 10 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-v3.1.c | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-v3.5.1.c | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-v4.11.c | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-v4.2.c | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-v4.5.c | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data-v4.9.c | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_data.h | 2 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.c | 217 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.h | 8 | ||||
-rw-r--r-- | drivers/net/ipa/ipa_power.c | 178 |
12 files changed, 180 insertions, 258 deletions
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c index 1544564bc283..87e1d43c118c 100644 --- a/drivers/net/ipa/gsi_trans.c +++ b/drivers/net/ipa/gsi_trans.c @@ -320,6 +320,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count) atomic_add(tre_count, &trans_info->tre_avail); } +/* Return true if no transactions are allocated, false otherwise */ +bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id) +{ + u32 tre_max = gsi_channel_tre_max(gsi, channel_id); + struct gsi_trans_info *trans_info; + + trans_info = &gsi->channel[channel_id].trans_info; + + return atomic_read(&trans_info->tre_avail) == tre_max; +} + /* Allocate a GSI transaction on a channel */ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, u32 tre_count, diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index 17fd1822d8a9..af379b49299e 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -130,6 +130,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr); void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool); /** + * gsi_channel_trans_idle() - Return whether no transactions are allocated + * @gsi: GSI pointer + * @channel_id: Channel the transaction is associated with + * + * Return: True if no transactions are allocated, false otherwise + * + */ +bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id); + +/** * gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel * @gsi: GSI pointer * @channel_id: Channel the transaction is associated with diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/ipa_data-v3.1.c index 06ddb85f39b2..8ff351aefd23 100644 --- a/drivers/net/ipa/ipa_data-v3.1.c +++ b/drivers/net/ipa/ipa_data-v3.1.c @@ -101,6 +101,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .status_enable = true, .rx = { + .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), }, }, @@ -148,6 +149,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .qmap = true, .aggregation = true, .rx = { + .buffer_size = 8192, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c index 760c22bbdf70..d1c466abddb2 100644 --- a/drivers/net/ipa/ipa_data-v3.5.1.c +++ b/drivers/net/ipa/ipa_data-v3.5.1.c @@ -92,6 +92,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .status_enable = true, .rx = { + .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), }, }, @@ -140,6 +141,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .qmap = true, .aggregation = true, .rx = { + .buffer_size = 8192, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c index fea91451a0c3..b1991cc6f0ca 100644 --- a/drivers/net/ipa/ipa_data-v4.11.c +++ b/drivers/net/ipa/ipa_data-v4.11.c @@ -86,6 +86,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .status_enable = true, .rx = { + .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), }, }, @@ -133,6 +134,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .qmap = true, .aggregation = true, .rx = { + .buffer_size = 32768, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c index 2a231e79d5e1..1190a43e8743 100644 --- a/drivers/net/ipa/ipa_data-v4.2.c +++ b/drivers/net/ipa/ipa_data-v4.2.c @@ -82,6 +82,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .status_enable = true, .rx = { + .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), }, }, @@ -130,6 +131,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .qmap = true, .aggregation = true, .rx = { + .buffer_size = 8192, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c index 2da2c4194f2e..944f72b0f285 100644 --- a/drivers/net/ipa/ipa_data-v4.5.c +++ b/drivers/net/ipa/ipa_data-v4.5.c @@ -95,6 +95,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .status_enable = true, .rx = { + .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), }, }, @@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .qmap = true, .aggregation = true, .rx = { + .buffer_size = 8192, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c index 2421b5abb5d4..16786bff7ef8 100644 --- a/drivers/net/ipa/ipa_data-v4.9.c +++ b/drivers/net/ipa/ipa_data-v4.9.c @@ -87,6 +87,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .aggregation = true, .status_enable = true, .rx = { + .buffer_size = 8192, .pad_align = ilog2(sizeof(u32)), }, }, @@ -134,6 +135,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .qmap = true, .aggregation = true, .rx = { + .buffer_size = 8192, .aggr_close_eof = true, }, }, diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h index 6d329e9ce5d2..dbbeecf6df29 100644 --- a/drivers/net/ipa/ipa_data.h +++ b/drivers/net/ipa/ipa_data.h @@ -112,6 +112,7 @@ struct ipa_endpoint_tx_data { /** * struct ipa_endpoint_rx_data - configuration data for RX endpoints + * @buffer_size: requested receive buffer size (bytes) * @pad_align: power-of-2 boundary to which packet payload is aligned * @aggr_close_eof: whether aggregation closes on end-of-frame * @@ -125,6 +126,7 @@ struct ipa_endpoint_tx_data { * a "frame" consisting of several transfers has ended. */ struct ipa_endpoint_rx_data { + u32 buffer_size; u32 pad_align; bool aggr_close_eof; }; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 68291a3efd04..888e94278a84 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -25,10 +25,8 @@ #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) -#define IPA_REPLENISH_BATCH 16 - -/* RX buffer is 1 page (or a power-of-2 contiguous pages) */ -#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ +/* Hardware is told about receive buffers once a "batch" has been queued */ +#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ /* The amount of RX buffer space consumed by standard skb overhead */ #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) @@ -75,6 +73,14 @@ struct ipa_status { #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) +static u32 aggr_byte_limit_max(enum ipa_version version) +{ + if (version < IPA_VERSION_4_5) + return field_max(aggr_byte_limit_fmask(true)); + + return field_max(aggr_byte_limit_fmask(false)); +} + static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *all_data, const struct ipa_gsi_endpoint_data *data) @@ -87,6 +93,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; if (!data->toward_ipa) { + u32 buffer_size; + u32 limit; + if (data->endpoint.filter_support) { dev_err(dev, "filtering not supported for " "RX endpoint %u\n", @@ -94,6 +103,41 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return false; } + /* Nothing more to check for non-AP RX */ + if (data->ee_id != GSI_EE_AP) + return true; + + buffer_size = data->endpoint.config.rx.buffer_size; + /* The buffer size must hold an MTU plus overhead */ + limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; + if (buffer_size < limit) { + dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n", + data->endpoint_id, buffer_size, limit); + return false; + } + + /* For an endpoint supporting receive aggregation, the + * aggregation byte limit defines the point at which an + * aggregation window will close. It is programmed into the + * IPA hardware as a number of KB. We don't use "hard byte + * limit" aggregation, so we need to supply enough space in + * a receive buffer to hold a complete MTU plus normal skb + * overhead *after* that aggregation byte limit has been + * crossed. + * + * This check just ensures the receive buffer size doesn't + * exceed what's representable in the aggregation limit field. + */ + if (data->endpoint.config.aggregation) { + limit += SZ_1K * aggr_byte_limit_max(ipa->version); + if (buffer_size > limit) { + dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n", + data->endpoint_id, buffer_size, limit); + + return false; + } + } + return true; /* Nothing more to check for RX */ } @@ -156,21 +200,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; } -static u32 aggr_byte_limit_max(enum ipa_version version) -{ - if (version < IPA_VERSION_4_5) - return field_max(aggr_byte_limit_fmask(true)); - - return field_max(aggr_byte_limit_fmask(false)); -} - static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *data) { const struct ipa_gsi_endpoint_data *dp = data; struct device *dev = &ipa->pdev->dev; enum ipa_endpoint_name name; - u32 limit; if (count > IPA_ENDPOINT_COUNT) { dev_err(dev, "too many endpoints specified (%u > %u)\n", @@ -178,26 +213,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, return false; } - /* The aggregation byte limit defines the point at which an - * aggregation window will close. It is programmed into the - * IPA hardware as a number of KB. We don't use "hard byte - * limit" aggregation, which means that we need to supply - * enough space in a receive buffer to hold a complete MTU - * plus normal skb overhead *after* that aggregation byte - * limit has been crossed. - * - * This check ensures we don't define a receive buffer size - * that would exceed what we can represent in the field that - * is used to program its size. - */ - limit = aggr_byte_limit_max(ipa->version) * SZ_1K; - limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; - if (limit < IPA_RX_BUFFER_SIZE) { - dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", - IPA_RX_BUFFER_SIZE, limit); - return false; - } - /* Make sure needed endpoints have defined data */ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { dev_err(dev, "command TX endpoint not defined\n"); @@ -723,13 +738,15 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) if (endpoint->data->aggregation) { if (!endpoint->toward_ipa) { + const struct ipa_endpoint_rx_data *rx_data; bool close_eof; u32 limit; + rx_data = &endpoint->data->rx; val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); - limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); + limit = ipa_aggr_size_kb(rx_data->buffer_size); val |= aggr_byte_limit_encoded(version, limit); limit = IPA_AGGR_TIME_LIMIT; @@ -737,7 +754,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) /* AGGR_PKT_LIMIT is 0 (unlimited) */ - close_eof = endpoint->data->rx.aggr_close_eof; + close_eof = rx_data->aggr_close_eof; val |= aggr_sw_eof_active_encoded(version, close_eof); /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ @@ -1020,134 +1037,98 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint) iowrite32(val, ipa->reg_virt + offset); } -static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) +static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, + struct gsi_trans *trans) { - struct gsi_trans *trans; - bool doorbell = false; struct page *page; + u32 buffer_size; u32 offset; u32 len; int ret; - page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); + buffer_size = endpoint->data->rx.buffer_size; + page = dev_alloc_pages(get_order(buffer_size)); if (!page) return -ENOMEM; - trans = ipa_endpoint_trans_alloc(endpoint, 1); - if (!trans) - goto err_free_pages; - /* Offset the buffer to make space for skb headroom */ offset = NET_SKB_PAD; - len = IPA_RX_BUFFER_SIZE - offset; + len = buffer_size - offset; ret = gsi_trans_page_add(trans, page, len, offset); if (ret) - goto err_trans_free; - trans->data = page; /* transaction owns page now */ - - if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { - doorbell = true; - endpoint->replenish_ready = 0; - } - - gsi_trans_commit(trans, doorbell); - - return 0; - -err_trans_free: - gsi_trans_free(trans); -err_free_pages: - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + __free_pages(page, get_order(buffer_size)); + else + trans->data = page; /* transaction owns page now */ - return -ENOMEM; + return ret; } /** * ipa_endpoint_replenish() - Replenish endpoint receive buffers * @endpoint: Endpoint to be replenished - * @add_one: Whether this is replacing a just-consumed buffer * * The IPA hardware can hold a fixed number of receive buffers for an RX * endpoint, based on the number of entries in the underlying channel ring * buffer. If an endpoint's "backlog" is non-zero, it indicates how many * more receive buffers can be supplied to the hardware. Replenishing for - * an endpoint can be disabled, in which case requests to replenish a - * buffer are "saved", and transferred to the backlog once it is re-enabled - * again. + * an endpoint can be disabled, in which case buffers are not queued to + * the hardware. */ -static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) +static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) { - struct gsi *gsi; - u32 backlog; - int delta; + struct gsi_trans *trans; - if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { - if (add_one) - atomic_inc(&endpoint->replenish_saved); + if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) return; - } - /* If already active, just update the backlog */ - if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { - if (add_one) - atomic_inc(&endpoint->replenish_backlog); + /* Skip it if it's already active */ + if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) return; - } - while (atomic_dec_not_zero(&endpoint->replenish_backlog)) - if (ipa_endpoint_replenish_one(endpoint)) + while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { + bool doorbell; + + if (ipa_endpoint_replenish_one(endpoint, trans)) goto try_again_later; - clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - if (add_one) - atomic_inc(&endpoint->replenish_backlog); + /* Ring the doorbell if we've got a full batch */ + doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); + gsi_trans_commit(trans, doorbell); + } + + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); return; try_again_later: + gsi_trans_free(trans); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - /* The last one didn't succeed, so fix the backlog */ - delta = add_one ? 2 : 1; - backlog = atomic_add_return(delta, &endpoint->replenish_backlog); - /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even * one buffer, nothing will trigger another replenish attempt. - * Receive buffer transactions use one TRE, so schedule work to - * try replenishing again if our backlog is *all* available TREs. + * If the hardware has no receive buffers queued, schedule work to + * try replenishing again. */ - gsi = &endpoint->ipa->gsi; - if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) + if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) schedule_delayed_work(&endpoint->replenish_work, msecs_to_jiffies(1)); } static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) { - struct gsi *gsi = &endpoint->ipa->gsi; - u32 max_backlog; - u32 saved; - set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); - while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) - atomic_add(saved, &endpoint->replenish_backlog); /* Start replenishing if hardware currently has no buffers */ - max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); - if (atomic_read(&endpoint->replenish_backlog) == max_backlog) - ipa_endpoint_replenish(endpoint, false); + if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) + ipa_endpoint_replenish(endpoint); } static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) { - u32 backlog; - clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); - while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) - atomic_add(backlog, &endpoint->replenish_saved); } static void ipa_endpoint_replenish_work(struct work_struct *work) @@ -1157,7 +1138,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work) endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); - ipa_endpoint_replenish(endpoint, false); + ipa_endpoint_replenish(endpoint); } static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, @@ -1183,15 +1164,16 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, struct page *page, u32 len) { + u32 buffer_size = endpoint->data->rx.buffer_size; struct sk_buff *skb; /* Nothing to do if there's no netdev */ if (!endpoint->netdev) return false; - WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD)); + WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); - skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); + skb = build_skb(page_address(page), buffer_size); if (skb) { /* Reserve the headroom and account for the data */ skb_reserve(skb, NET_SKB_PAD); @@ -1289,8 +1271,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, struct page *page, u32 total_len) { + u32 buffer_size = endpoint->data->rx.buffer_size; void *data = page_address(page) + NET_SKB_PAD; - u32 unused = IPA_RX_BUFFER_SIZE - total_len; + u32 unused = buffer_size - total_len; u32 resid = total_len; while (resid) { @@ -1360,10 +1343,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, { struct page *page; - ipa_endpoint_replenish(endpoint, true); - if (trans->cancelled) - return; + goto done; /* Parse or build a socket buffer using the actual received length */ page = trans->data; @@ -1371,6 +1352,8 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, ipa_endpoint_status_parse(endpoint, page, trans->len); else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) trans->data = NULL; /* Pages have been consumed */ +done: + ipa_endpoint_replenish(endpoint); } void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, @@ -1398,8 +1381,11 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, } else { struct page *page = trans->data; - if (page) - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + if (page) { + u32 buffer_size = endpoint->data->rx.buffer_size; + + __free_pages(page, get_order(buffer_size)); + } } } @@ -1704,9 +1690,6 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) */ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); - atomic_set(&endpoint->replenish_saved, - gsi_channel_tre_max(gsi, endpoint->channel_id)); - atomic_set(&endpoint->replenish_backlog, 0); INIT_DELAYED_WORK(&endpoint->replenish_work, ipa_endpoint_replenish_work); } @@ -1882,6 +1865,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count, enum ipa_endpoint_name name; u32 filter_map; + BUILD_BUG_ON(!IPA_REPLENISH_BATCH); + if (!ipa_endpoint_data_valid(ipa, count, data)) return 0; /* Error */ diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h index 0313cdc607de..12fd5b16c18e 100644 --- a/drivers/net/ipa/ipa_endpoint.h +++ b/drivers/net/ipa/ipa_endpoint.h @@ -65,9 +65,7 @@ enum ipa_replenish_flag { * @evt_ring_id: GSI event ring used by the endpoint * @netdev: Network device pointer, if endpoint uses one * @replenish_flags: Replenishing state flags - * @replenish_ready: Number of replenish transactions without doorbell - * @replenish_saved: Replenish requests held while disabled - * @replenish_backlog: Number of buffers needed to fill hardware queue + * @replenish_count: Total number of replenish transactions committed * @replenish_work: Work item used for repeated replenish failures */ struct ipa_endpoint { @@ -86,9 +84,7 @@ struct ipa_endpoint { /* Receive buffer replenishing for RX endpoints */ DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT); - u32 replenish_ready; - atomic_t replenish_saved; - atomic_t replenish_backlog; + u64 replenish_count; struct delayed_work replenish_work; /* global wq */ }; diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c index f2989aac47a6..db5ac7552286 100644 --- a/drivers/net/ipa/ipa_power.c +++ b/drivers/net/ipa/ipa_power.c @@ -35,18 +35,6 @@ #define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ /** - * struct ipa_interconnect - IPA interconnect information - * @path: Interconnect path - * @average_bandwidth: Average interconnect bandwidth (KB/second) - * @peak_bandwidth: Peak interconnect bandwidth (KB/second) - */ -struct ipa_interconnect { - struct icc_path *path; - u32 average_bandwidth; - u32 peak_bandwidth; -}; - -/** * enum ipa_power_flag - IPA power flags * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended @@ -79,164 +67,78 @@ struct ipa_power { spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); u32 interconnect_count; - struct ipa_interconnect *interconnect; + struct icc_bulk_data interconnect[]; }; -static int ipa_interconnect_init_one(struct device *dev, - struct ipa_interconnect *interconnect, - const struct ipa_interconnect_data *data) -{ - struct icc_path *path; - - path = of_icc_get(dev, data->name); - if (IS_ERR(path)) { - int ret = PTR_ERR(path); - - dev_err_probe(dev, ret, "error getting %s interconnect\n", - data->name); - - return ret; - } - - interconnect->path = path; - interconnect->average_bandwidth = data->average_bandwidth; - interconnect->peak_bandwidth = data->peak_bandwidth; - - return 0; -} - -static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) -{ - icc_put(interconnect->path); - memset(interconnect, 0, sizeof(*interconnect)); -} - /* Initialize interconnects required for IPA operation */ -static int ipa_interconnect_init(struct ipa_power *power, struct device *dev, +static int ipa_interconnect_init(struct ipa_power *power, const struct ipa_interconnect_data *data) { - struct ipa_interconnect *interconnect; - u32 count; - int ret; - - count = power->interconnect_count; - interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); - if (!interconnect) - return -ENOMEM; - power->interconnect = interconnect; - - while (count--) { - ret = ipa_interconnect_init_one(dev, interconnect, data++); - if (ret) - goto out_unwind; - interconnect++; - } - - return 0; - -out_unwind: - while (interconnect-- > power->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(power->interconnect); - power->interconnect = NULL; - - return ret; -} - -/* Inverse of ipa_interconnect_init() */ -static void ipa_interconnect_exit(struct ipa_power *power) -{ - struct ipa_interconnect *interconnect; - - interconnect = power->interconnect + power->interconnect_count; - while (interconnect-- > power->interconnect) - ipa_interconnect_exit_one(interconnect); - kfree(power->interconnect); - power->interconnect = NULL; -} - -/* Currently we only use one bandwidth level, so just "enable" interconnects */ -static int ipa_interconnect_enable(struct ipa *ipa) -{ - struct ipa_interconnect *interconnect; - struct ipa_power *power = ipa->power; + struct icc_bulk_data *interconnect; int ret; u32 i; - interconnect = power->interconnect; + /* Initialize our interconnect data array for bulk operations */ + interconnect = &power->interconnect[0]; for (i = 0; i < power->interconnect_count; i++) { - ret = icc_set_bw(interconnect->path, - interconnect->average_bandwidth, - interconnect->peak_bandwidth); - if (ret) { - dev_err(&ipa->pdev->dev, - "error %d enabling %s interconnect\n", - ret, icc_get_name(interconnect->path)); - goto out_unwind; - } + /* interconnect->path is filled in by of_icc_bulk_get() */ + interconnect->name = data->name; + interconnect->avg_bw = data->average_bandwidth; + interconnect->peak_bw = data->peak_bandwidth; + data++; interconnect++; } - return 0; + ret = of_icc_bulk_get(power->dev, power->interconnect_count, + power->interconnect); + if (ret) + return ret; -out_unwind: - while (interconnect-- > power->interconnect) - (void)icc_set_bw(interconnect->path, 0, 0); + /* All interconnects are initially disabled */ + icc_bulk_disable(power->interconnect_count, power->interconnect); + + /* Set the bandwidth values to be used when enabled */ + ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect); + if (ret) + icc_bulk_put(power->interconnect_count, power->interconnect); return ret; } -/* To disable an interconnect, we just its bandwidth to 0 */ -static int ipa_interconnect_disable(struct ipa *ipa) +/* Inverse of ipa_interconnect_init() */ +static void ipa_interconnect_exit(struct ipa_power *power) { - struct ipa_interconnect *interconnect; - struct ipa_power *power = ipa->power; - struct device *dev = &ipa->pdev->dev; - int result = 0; - u32 count; - int ret; - - count = power->interconnect_count; - interconnect = power->interconnect + count; - while (count--) { - interconnect--; - ret = icc_set_bw(interconnect->path, 0, 0); - if (ret) { - dev_err(dev, "error %d disabling %s interconnect\n", - ret, icc_get_name(interconnect->path)); - /* Try to disable all; record only the first error */ - if (!result) - result = ret; - } - } - - return result; + icc_bulk_put(power->interconnect_count, power->interconnect); } /* Enable IPA power, enabling interconnects and the core clock */ static int ipa_power_enable(struct ipa *ipa) { + struct ipa_power *power = ipa->power; int ret; - ret = ipa_interconnect_enable(ipa); + ret = icc_bulk_enable(power->interconnect_count, power->interconnect); if (ret) return ret; - ret = clk_prepare_enable(ipa->power->core); + ret = clk_prepare_enable(power->core); if (ret) { - dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret); - (void)ipa_interconnect_disable(ipa); + dev_err(power->dev, "error %d enabling core clock\n", ret); + icc_bulk_disable(power->interconnect_count, + power->interconnect); } return ret; } /* Inverse of ipa_power_enable() */ -static int ipa_power_disable(struct ipa *ipa) +static void ipa_power_disable(struct ipa *ipa) { - clk_disable_unprepare(ipa->power->core); + struct ipa_power *power = ipa->power; - return ipa_interconnect_disable(ipa); + clk_disable_unprepare(power->core); + + icc_bulk_disable(power->interconnect_count, power->interconnect); } static int ipa_runtime_suspend(struct device *dev) @@ -250,7 +152,9 @@ static int ipa_runtime_suspend(struct device *dev) gsi_suspend(&ipa->gsi); } - return ipa_power_disable(ipa); + ipa_power_disable(ipa); + + return 0; } static int ipa_runtime_resume(struct device *dev) @@ -453,6 +357,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) { struct ipa_power *power; struct clk *clk; + size_t size; int ret; clk = clk_get(dev, "core"); @@ -469,7 +374,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) goto err_clk_put; } - power = kzalloc(sizeof(*power), GFP_KERNEL); + size = struct_size(power, interconnect, data->interconnect_count); + power = kzalloc(size, GFP_KERNEL); if (!power) { ret = -ENOMEM; goto err_clk_put; @@ -479,7 +385,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data) spin_lock_init(&power->spinlock); power->interconnect_count = data->interconnect_count; - ret = ipa_interconnect_init(power, dev, data->interconnect_data); + ret = ipa_interconnect_init(power, data->interconnect_data); if (ret) goto err_kfree; |